test_main
Run all acceptance tests available in the input_yaml_files folder
test_process_input(input_filename)
Execute acceptance test using a python subprocess using all input yaml files available
Parameters:
Name | Type | Description | Default |
---|---|---|---|
input_filename |
str |
name of input file |
required |
Source code in tests_acceptance/test_main.py
@pytest.mark.parametrize("input_filename", input_yaml_filenames)
def test_process_input(input_filename):
"""Execute acceptance test using a python subprocess
using all input yaml files available
Args:
input_filename (str): name of input file
"""
input_file_path = input_yaml_files_path / input_filename
# Build the subprocess command
command = [sys.executable, str(main_script_path), str(input_file_path)]
# Run the script in a separate Python process
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
stdout, stderr = process.communicate()
# do not use stdout ;-)
if MAIN_SCRIPT_NAME != "":
print(stdout)
# Check the exit code
assert (
process.returncode == 0
), f"Script {main_script_path} failed for {input_filename}\n{stderr}"
# Load the generated and reference NetCDF files using xarray
with open(str(input_file_path), "r") as f:
data = yaml.load(f, Loader=SafeLoaderIgnoreUnknown)
output_filename = Path(data["output-data"]["filename"])
if "*" in output_filename.name:
outputname = output_filename.name
else:
outputname = output_filename.stem + "*"
filenames_list = list(reference_files_path.glob(outputname))
assert len(filenames_list) > 0, f"No output files generated for {input_filename}"
for filename in filenames_list:
generated_nc = _xr.open_dataset(output_filename.parent / filename.name, engine="netcdf4")
reference_nc = _xr.open_dataset(filename, engine="netcdf4")
# Compare the datasets if they have matching variables and coordinates
assert generated_nc.equals(
reference_nc
), f"Generated output does not match reference for {input_filename}"