diff --git a/src/_ert/forward_model_runner/forward_model_step.py b/src/_ert/forward_model_runner/forward_model_step.py index 0249bafe5b1..0f4484fb06c 100644 --- a/src/_ert/forward_model_runner/forward_model_step.py +++ b/src/_ert/forward_model_runner/forward_model_step.py @@ -452,9 +452,12 @@ def _get_processtree_data( oom_score = int( Path(f"/proc/{process.pid}/oom_score").read_text(encoding="utf-8") ) - with contextlib.suppress( - ValueError, NoSuchProcess, AccessDenied, ZombieProcess, ProcessLookupError - ), process.oneshot(): + with ( + contextlib.suppress( + ValueError, NoSuchProcess, AccessDenied, ZombieProcess, ProcessLookupError + ), + process.oneshot(), + ): memory_rss = process.memory_info().rss cpu_seconds = process.cpu_times().user @@ -478,9 +481,10 @@ def _get_processtree_data( if oom_score is not None else oom_score_child ) - with contextlib.suppress( - NoSuchProcess, AccessDenied, ZombieProcess - ), child.oneshot(): + with ( + contextlib.suppress(NoSuchProcess, AccessDenied, ZombieProcess), + child.oneshot(), + ): memory_rss += child.memory_info().rss cpu_seconds += child.cpu_times().user return (memory_rss, cpu_seconds, oom_score) diff --git a/src/ert/analysis/_es_update.py b/src/ert/analysis/_es_update.py index 8915fc02f0e..46d32eed69a 100644 --- a/src/ert/analysis/_es_update.py +++ b/src/ert/analysis/_es_update.py @@ -376,6 +376,7 @@ def _load_observations_and_responses( ens_mean_mask, ens_std_mask, indexes, + strict=False, ): update_snapshot.append( ObservationAndResponseSnapshot( diff --git a/src/ert/config/design_matrix.py b/src/ert/config/design_matrix.py index 79297215c98..f866766e41c 100644 --- a/src/ert/config/design_matrix.py +++ b/src/ert/config/design_matrix.py @@ -192,7 +192,7 @@ def _validate_design_matrix(design_matrix: pd.DataFrame) -> List[str]: errors.append("Duplicate parameter names found in design sheet") empties = [ f"Realization {design_matrix.index[i]}, column {design_matrix.columns[j]}" - for i, j in zip(*np.where(pd.isna(design_matrix))) + for i, j in zip(*np.where(pd.isna(design_matrix)), strict=False) ] if len(empties) > 0: errors.append(f"Design matrix contains empty cells {empties}") @@ -225,7 +225,7 @@ def _read_defaultssheet( raise ValueError("Defaults sheet must have at least two columns") empty_cells = [ f"Row {default_df.index[i]}, column {default_df.columns[j]}" - for i, j in zip(*np.where(pd.isna(default_df))) + for i, j in zip(*np.where(pd.isna(default_df)), strict=False) ] if len(empty_cells) > 0: raise ValueError(f"Default sheet contains empty cells {empty_cells}") diff --git a/src/ert/config/gen_data_config.py b/src/ert/config/gen_data_config.py index 9e90c2b0b51..1a1cc736577 100644 --- a/src/ert/config/gen_data_config.py +++ b/src/ert/config/gen_data_config.py @@ -34,7 +34,9 @@ def __post_init__(self) -> None: @property def expected_input_files(self) -> List[str]: expected_files = [] - for input_file, report_steps in zip(self.input_files, self.report_steps_list): + for input_file, report_steps in zip( + self.input_files, self.report_steps_list, strict=False + ): if report_steps is None: expected_files.append(input_file) else: @@ -144,7 +146,7 @@ def _read_file(filename: Path, report_step: int) -> polars.DataFrame: datasets_per_name = [] for name, input_file, report_steps in zip( - self.keys, self.input_files, self.report_steps_list + self.keys, self.input_files, self.report_steps_list, strict=False ): datasets_per_report_step = [] if report_steps is None: diff --git a/src/ert/config/gen_kw_config.py b/src/ert/config/gen_kw_config.py index eff157e6a1a..c99438fdd65 100644 --- a/src/ert/config/gen_kw_config.py +++ b/src/ert/config/gen_kw_config.py @@ -293,7 +293,9 @@ def write_to_runpath( f" is of size {len(self.transform_functions)}, expected {array.size}" ) - data = dict(zip(array["names"].values.tolist(), array.values.tolist())) + data = dict( + zip(array["names"].values.tolist(), array.values.tolist(), strict=False) + ) log10_data = { tf.name: math.log(data[tf.name], 10) @@ -477,7 +479,7 @@ def _parse_transform_function_definition( f"Unable to convert float number: {p}" ) from e - params = dict(zip(param_names, param_floats)) + params = dict(zip(param_names, param_floats, strict=False)) return TransformFunction( name=t.name, diff --git a/src/ert/config/observations.py b/src/ert/config/observations.py index 9fec489699f..b89d1f2fbc2 100644 --- a/src/ert/config/observations.py +++ b/src/ert/config/observations.py @@ -177,7 +177,7 @@ def _handle_history_observation( segment_instance, ) data: Dict[Union[int, datetime], Union[GenObservation, SummaryObservation]] = {} - for date, error, value in zip(refcase.dates, std_dev, values): + for date, error, value in zip(refcase.dates, std_dev, values, strict=False): data[date] = SummaryObservation(summary_key, summary_key, value, error) return { diff --git a/src/ert/dark_storage/common.py b/src/ert/dark_storage/common.py index c6871c3e452..c0ef8444009 100644 --- a/src/ert/dark_storage/common.py +++ b/src/ert/dark_storage/common.py @@ -79,7 +79,7 @@ def gen_data_keys(ensemble: Ensemble) -> Iterator[str]: if gen_data_config: assert isinstance(gen_data_config, GenDataConfig) for key, report_steps in zip( - gen_data_config.keys, gen_data_config.report_steps_list + gen_data_config.keys, gen_data_config.report_steps_list, strict=False ): if report_steps is None: yield f"{key}@0" diff --git a/src/ert/ensemble_evaluator/_wait_for_evaluator.py b/src/ert/ensemble_evaluator/_wait_for_evaluator.py index 6bb55adda10..9b5f5591292 100644 --- a/src/ert/ensemble_evaluator/_wait_for_evaluator.py +++ b/src/ert/ensemble_evaluator/_wait_for_evaluator.py @@ -27,13 +27,16 @@ async def attempt_connection( ) -> None: timeout = aiohttp.ClientTimeout(connect=connection_timeout) headers = {} if token is None else {"token": token} - async with aiohttp.ClientSession() as session, session.request( - method="get", - url=url, - ssl=get_ssl_context(cert), - headers=headers, - timeout=timeout, - ) as resp: + async with ( + aiohttp.ClientSession() as session, + session.request( + method="get", + url=url, + ssl=get_ssl_context(cert), + headers=headers, + timeout=timeout, + ) as resp, + ): resp.raise_for_status() diff --git a/src/ert/gui/tools/plot/customize/style_chooser.py b/src/ert/gui/tools/plot/customize/style_chooser.py index e8075b653fe..93cd10dedf6 100644 --- a/src/ert/gui/tools/plot/customize/style_chooser.py +++ b/src/ert/gui/tools/plot/customize/style_chooser.py @@ -199,7 +199,7 @@ def createLabelLayout(self, layout: Optional[QLayout] = None) -> QLayout: titles = ["Line style", "Width", "Marker style", "Size"] sizes = self.getItemSizes() - for title, size in zip(titles, sizes): + for title, size in zip(titles, sizes, strict=False): label = QLabel(title) label.setFixedWidth(size) layout.addWidget(label) diff --git a/src/ert/resources/forward_models/res/script/ecl_run.py b/src/ert/resources/forward_models/res/script/ecl_run.py index e1eb420e12d..e29ea7a9dd3 100644 --- a/src/ert/resources/forward_models/res/script/ecl_run.py +++ b/src/ert/resources/forward_models/res/script/ecl_run.py @@ -137,7 +137,7 @@ def make_SLURM_machine_list(SLURM_JOB_NODELIST, SLURM_TASKS_PER_NODE): task_count_list += _expand_SLURM_task_count(task_count_string) host_list = [] - for node, count in zip(nodelist, task_count_list): + for node, count in zip(nodelist, task_count_list, strict=False): host_list += [node] * count return host_list diff --git a/src/ert/run_arg.py b/src/ert/run_arg.py index 9116b263bcc..9be93670e8b 100644 --- a/src/ert/run_arg.py +++ b/src/ert/run_arg.py @@ -37,7 +37,7 @@ def create_run_arguments( job_names = runpaths.get_jobnames(range(len(active_realizations)), iteration) for iens, (run_path, job_name, active) in enumerate( - zip(paths, job_names, active_realizations) + zip(paths, job_names, active_realizations, strict=False) ): run_args.append( RunArg( diff --git a/src/ert/run_models/base_run_model.py b/src/ert/run_models/base_run_model.py index b000d5fa491..53bc3a9fa9b 100644 --- a/src/ert/run_models/base_run_model.py +++ b/src/ert/run_models/base_run_model.py @@ -288,7 +288,9 @@ def _create_mask_from_failed_realizations(self) -> List[bool]: return [ initial and not completed for initial, completed in zip( - self._initial_realizations_mask, self._completed_realizations_mask + self._initial_realizations_mask, + self._completed_realizations_mask, + strict=False, ) ] else: diff --git a/src/ert/simulator/forward_model_status.py b/src/ert/simulator/forward_model_status.py index abb9be03904..4305a1b1711 100644 --- a/src/ert/simulator/forward_model_status.py +++ b/src/ert/simulator/forward_model_status.py @@ -118,7 +118,9 @@ def try_load(cls, path: str) -> "ForwardModelStatus": end_time = _deserialize_date(status_data["end_time"]) status = cls(status_data["run_id"], start_time, end_time=end_time) - for fm_step, state in zip(fm_steps_data["jobList"], status_data["jobs"]): + for fm_step, state in zip( + fm_steps_data["jobList"], status_data["jobs"], strict=False + ): status.add_step(ForwardModelStepStatus.load(fm_step, state, path)) return status diff --git a/src/everest/bin/main.py b/src/everest/bin/main.py index 553a20a1654..604d7567aed 100644 --- a/src/everest/bin/main.py +++ b/src/everest/bin/main.py @@ -103,7 +103,9 @@ def methods_help(cls): pubmets.remove("methods_help") # Current method should not show up in desc maxlen = max(len(m) for m in pubmets) docstrs = [getattr(cls, m).__doc__ for m in pubmets] - doclist = [m.ljust(maxlen + 1) + d for m, d in zip(pubmets, docstrs)] + doclist = [ + m.ljust(maxlen + 1) + d for m, d in zip(pubmets, docstrs, strict=False) + ] return "\n".join(doclist) def run(self, args): diff --git a/src/everest/bin/utils.py b/src/everest/bin/utils.py index 6a21700783e..188da9cd6c0 100644 --- a/src/everest/bin/utils.py +++ b/src/everest/bin/utils.py @@ -232,7 +232,7 @@ def _get_progress_summary(status): labels = ("Waiting", "Pending", "Running", "Complete", "FAILED") return " | ".join( f"{color}{key}: {value}{Fore.RESET}" - for color, key, value in zip(colors, labels, status) + for color, key, value in zip(colors, labels, status, strict=False) ) @classmethod diff --git a/src/everest/jobs/well_tools/__init__.py b/src/everest/jobs/well_tools/__init__.py index 214572bc8b1..6a93f6445bb 100644 --- a/src/everest/jobs/well_tools/__init__.py +++ b/src/everest/jobs/well_tools/__init__.py @@ -65,7 +65,7 @@ def well_set(well_data_file, new_entry_file, output_file): ) raise ValueError(err_msg) - for well_entry, data_elem in zip(well_data, entry_data): + for well_entry, data_elem in zip(well_data, entry_data, strict=False): well_entry[entry_key] = data_elem with everest.jobs.io.safe_open(output_file, "w") as fout: diff --git a/src/everest/simulator/everest_to_ert.py b/src/everest/simulator/everest_to_ert.py index d4dce80ab7f..5151172d3c2 100644 --- a/src/everest/simulator/everest_to_ert.py +++ b/src/everest/simulator/everest_to_ert.py @@ -538,12 +538,10 @@ def _get_variables( # configuration key. When initializing an ERT config object, it is ignored. # It is used by the Simulator object to inject ExtParamConfig nodes. for control in ever_config.controls or []: - ens_config.parameter_configs[control.name] = ( - ExtParamConfig( - name=control.name, - input_keys=_get_variables(control.variables), - output_file=control.name + ".json", - ) + ens_config.parameter_configs[control.name] = ExtParamConfig( + name=control.name, + input_keys=_get_variables(control.variables), + output_file=control.name + ".json", ) return ert_config diff --git a/src/everest/simulator/simulator.py b/src/everest/simulator/simulator.py index d66ca5d00b2..3e60f533460 100644 --- a/src/everest/simulator/simulator.py +++ b/src/everest/simulator/simulator.py @@ -133,6 +133,7 @@ def _run_forward_model( for control_name, control_value in zip( metadata.config.variables.names, # type: ignore control_values[sim_idx, :], + strict=False, ): self._add_control(controls, control_name, control_value) case_data.append((real_id, controls)) diff --git a/test-data/ert/snake_oil/forward_models/snake_oil_diff.py b/test-data/ert/snake_oil/forward_models/snake_oil_diff.py index fabd8ffded5..a08fcf2be3f 100755 --- a/test-data/ert/snake_oil/forward_models/snake_oil_diff.py +++ b/test-data/ert/snake_oil/forward_models/snake_oil_diff.py @@ -4,7 +4,7 @@ def writeDiff(filename, vector1, vector2): with open(filename, "w", encoding="utf-8") as f: - for node1, node2 in zip(vector1, vector2): + for node1, node2 in zip(vector1, vector2, strict=False): f.write(f"{node1-node2:f}\n") diff --git a/test-data/everest/math_func/jobs/adv_distance3.py b/test-data/everest/math_func/jobs/adv_distance3.py index 4749d58c7e0..200cec442a9 100755 --- a/test-data/everest/math_func/jobs/adv_distance3.py +++ b/test-data/everest/math_func/jobs/adv_distance3.py @@ -6,7 +6,7 @@ def compute_distance_squared(p, q): - d = ((i - j) ** 2 for i, j in zip(p, q)) + d = ((i - j) ** 2 for i, j in zip(p, q, strict=False)) d = sum(d) return -d diff --git a/test-data/everest/math_func/jobs/distance3.py b/test-data/everest/math_func/jobs/distance3.py index 8eeb14b123d..bdfb8f94a3d 100755 --- a/test-data/everest/math_func/jobs/distance3.py +++ b/test-data/everest/math_func/jobs/distance3.py @@ -6,7 +6,7 @@ def compute_distance_squared(p, q): - d = ((i - j) ** 2 for i, j in zip(p, q)) + d = ((i - j) ** 2 for i, j in zip(p, q, strict=False)) d = sum(d) return -d diff --git a/tests/ert/ui_tests/cli/test_cli.py b/tests/ert/ui_tests/cli/test_cli.py index 5ebb9e8d63b..ba070c3acf7 100644 --- a/tests/ert/ui_tests/cli/test_cli.py +++ b/tests/ert/ui_tests/cli/test_cli.py @@ -64,9 +64,10 @@ def test_test_run_on_lsf_configuration_works_with_no_errors(tmp_path): ) @pytest.mark.usefixtures("copy_poly_case") def test_that_the_cli_raises_exceptions_when_parameters_are_missing(mode): - with open("poly.ert", "r", encoding="utf-8") as fin, open( - "poly-no-gen-kw.ert", "w", encoding="utf-8" - ) as fout: + with ( + open("poly.ert", "r", encoding="utf-8") as fin, + open("poly-no-gen-kw.ert", "w", encoding="utf-8") as fout, + ): for line in fin: if "GEN_KW" not in line: fout.write(line) @@ -185,9 +186,10 @@ def test_that_the_model_raises_exception_if_active_less_than_minimum_realization Omit testing of SingleTestRun because that executes with 1 active realization regardless of configuration. """ - with open("poly.ert", "r", encoding="utf-8") as fin, open( - "poly_high_min_reals.ert", "w", encoding="utf-8" - ) as fout: + with ( + open("poly.ert", "r", encoding="utf-8") as fin, + open("poly_high_min_reals.ert", "w", encoding="utf-8") as fout, + ): for line in fin: if "MIN_REALIZATIONS" in line: fout.write("MIN_REALIZATIONS 100") @@ -215,9 +217,10 @@ def test_that_the_model_warns_when_active_realizations_less_min_realizations(): NUM_REALIZATIONS when running ensemble_experiment. A warning is issued when NUM_REALIZATIONS is higher than active_realizations. """ - with open("poly.ert", "r", encoding="utf-8") as fin, open( - "poly_lower_active_reals.ert", "w", encoding="utf-8" - ) as fout: + with ( + open("poly.ert", "r", encoding="utf-8") as fin, + open("poly_lower_active_reals.ert", "w", encoding="utf-8") as fout, + ): for line in fin: if "MIN_REALIZATIONS" in line: fout.write("MIN_REALIZATIONS 100") @@ -864,9 +867,10 @@ def test_that_log_is_cleaned_up_from_repeated_forward_model_steps(caplog): """Verify that the run model now gereneates a cleanup log when there are repeated forward models """ - with open("poly.ert", "r", encoding="utf-8") as fin, open( - "poly_repeated_forward_model_steps.ert", "w", encoding="utf-8" - ) as fout: + with ( + open("poly.ert", "r", encoding="utf-8") as fin, + open("poly_repeated_forward_model_steps.ert", "w", encoding="utf-8") as fout, + ): forward_model_steps = ["FORWARD_MODEL poly_eval\n"] * 5 lines = fin.readlines() + forward_model_steps fout.writelines(lines) diff --git a/tests/ert/ui_tests/cli/test_missing_runpath.py b/tests/ert/ui_tests/cli/test_missing_runpath.py index 08227bb96ab..34257da4fc5 100644 --- a/tests/ert/ui_tests/cli/test_missing_runpath.py +++ b/tests/ert/ui_tests/cli/test_missing_runpath.py @@ -94,13 +94,15 @@ def test_failing_writes_lead_to_isolated_failures(tmp_path, monkeypatch, pytestc NUM_REALIZATIONS 10 """ ) - with pytest.raises( - ErtCliError, - match=r"(?s)active realizations \(9\) is less than .* MIN_REALIZATIONS\(10\).*" - r"Driver reported: Could not create submit script: Don't like realization-1", - ), patch_raising_named_temporary_file( - queue_system.lower() - ), ErtPluginContext() as context: + with ( + pytest.raises( + ErtCliError, + match=r"(?s)active realizations \(9\) is less than .* MIN_REALIZATIONS\(10\).*" + r"Driver reported: Could not create submit script: Don't like realization-1", + ), + patch_raising_named_temporary_file(queue_system.lower()), + ErtPluginContext() as context, + ): run_cli_with_pm( ["ensemble_experiment", "config.ert", "--disable-monitoring"], pm=context.plugin_manager, diff --git a/tests/ert/ui_tests/gui/conftest.py b/tests/ert/ui_tests/gui/conftest.py index 1c0b1421566..56e79b953c7 100644 --- a/tests/ert/ui_tests/gui/conftest.py +++ b/tests/ert/ui_tests/gui/conftest.py @@ -44,12 +44,15 @@ def open_gui_with_config(config_path) -> Iterator[ErtMainWindow]: - with _open_main_window(config_path) as ( - gui, - _, - config, - ), StorageService.init_service( - project=os.path.abspath(config.ens_path), + with ( + _open_main_window(config_path) as ( + gui, + _, + config, + ), + StorageService.init_service( + project=os.path.abspath(config.ens_path), + ), ): yield gui @@ -88,9 +91,10 @@ def _open_main_window( args_mock.config = str(path) with ErtPluginContext(): config = ErtConfig.with_plugins().from_file(path) - with open_storage( - config.ens_path, mode="w" - ) as storage, add_gui_log_handler() as log_handler: + with ( + open_storage(config.ens_path, mode="w") as storage, + add_gui_log_handler() as log_handler, + ): gui = _setup_main_window(config, args_mock, log_handler, storage) yield gui, storage, config gui.close() @@ -107,10 +111,13 @@ def opened_main_window_minimal_realizations(source_root, tmp_path, monkeypatch): def _esmda_run(run_experiment, source_root, tmp_path_factory): path = tmp_path_factory.mktemp("test-data") _new_poly_example(source_root, path) - with pytest.MonkeyPatch.context() as mp, _open_main_window(path / "poly.ert") as ( - gui, - _, - config, + with ( + pytest.MonkeyPatch.context() as mp, + _open_main_window(path / "poly.ert") as ( + gui, + _, + config, + ), ): mp.chdir(path) run_experiment(MultipleDataAssimilation, gui) @@ -128,10 +135,13 @@ def _ensemble_experiment_run( ): path = tmp_path_factory.mktemp("test-data") _new_poly_example(source_root, path) - with pytest.MonkeyPatch.context() as mp, _open_main_window(path / "poly.ert") as ( - gui, - _, - _, + with ( + pytest.MonkeyPatch.context() as mp, + _open_main_window(path / "poly.ert") as ( + gui, + _, + _, + ), ): mp.chdir(path) if failing_reals: @@ -177,12 +187,15 @@ def _evaluate(coeffs, x): def esmda_has_run(_esmda_run, tmp_path, monkeypatch): monkeypatch.chdir(tmp_path) shutil.copytree(_esmda_run, tmp_path, dirs_exist_ok=True) - with _open_main_window(tmp_path / "poly.ert") as ( - gui, - _, - config, - ), StorageService.init_service( - project=os.path.abspath(config.ens_path), + with ( + _open_main_window(tmp_path / "poly.ert") as ( + gui, + _, + config, + ), + StorageService.init_service( + project=os.path.abspath(config.ens_path), + ), ): yield gui diff --git a/tests/ert/ui_tests/gui/test_main_window.py b/tests/ert/ui_tests/gui/test_main_window.py index 709505ae50c..99d25ac53fe 100644 --- a/tests/ert/ui_tests/gui/test_main_window.py +++ b/tests/ert/ui_tests/gui/test_main_window.py @@ -96,7 +96,9 @@ def test_both_errors_and_warning_can_be_shown_in_suggestor( assert isinstance(gui, Suggestor) suggestions = gui.findChildren(SuggestorMessage) shown_messages = [elem.lbl.text() for elem in suggestions] - assert all(e in m for m, e in zip(shown_messages, expected_message_types)) + assert all( + e in m for m, e in zip(shown_messages, expected_message_types, strict=False) + ) @pytest.mark.usefixtures("copy_poly_case") @@ -140,9 +142,10 @@ def test_gui_shows_a_warning_and_disables_update_when_there_are_no_observations( def test_gui_shows_a_warning_and_disables_update_when_parameters_are_missing( qapp, tmp_path ): - with open("poly.ert", "r", encoding="utf-8") as fin, open( - "poly-no-gen-kw.ert", "w", encoding="utf-8" - ) as fout: + with ( + open("poly.ert", "r", encoding="utf-8") as fin, + open("poly-no-gen-kw.ert", "w", encoding="utf-8") as fout, + ): for line in fin: if "GEN_KW" not in line: fout.write(line) diff --git a/tests/ert/ui_tests/gui/test_plotting_of_snake_oil.py b/tests/ert/ui_tests/gui/test_plotting_of_snake_oil.py index 1af18cf5f3d..617acceb206 100644 --- a/tests/ert/ui_tests/gui/test_plotting_of_snake_oil.py +++ b/tests/ert/ui_tests/gui/test_plotting_of_snake_oil.py @@ -53,9 +53,12 @@ def plot_figure(qtbot, heat_equation_storage, snake_oil_case_storage, request): args_mock.config = "config.ert" log_handler = GUILogHandler() - with StorageService.init_service( - project=storage_config.ens_path, - ), open_storage(storage_config.ens_path) as storage: + with ( + StorageService.init_service( + project=storage_config.ens_path, + ), + open_storage(storage_config.ens_path) as storage, + ): gui = _setup_main_window(storage_config, args_mock, log_handler, storage) qtbot.addWidget(gui) @@ -129,9 +132,12 @@ def test_that_all_plotter_filter_boxes_yield_expected_filter_results( args_mock.config = "snake_oil.ert" log_handler = GUILogHandler() - with StorageService.init_service( - project=snake_oil_case_storage.ens_path, - ), open_storage(snake_oil_case_storage.ens_path) as storage: + with ( + StorageService.init_service( + project=snake_oil_case_storage.ens_path, + ), + open_storage(snake_oil_case_storage.ens_path) as storage, + ): gui = _setup_main_window( snake_oil_case_storage, args_mock, log_handler, storage ) diff --git a/tests/ert/ui_tests/gui/test_restart_no_responses_and_parameters.py b/tests/ert/ui_tests/gui/test_restart_no_responses_and_parameters.py index e2fb8c6172b..0cf03a2660d 100644 --- a/tests/ert/ui_tests/gui/test_restart_no_responses_and_parameters.py +++ b/tests/ert/ui_tests/gui/test_restart_no_responses_and_parameters.py @@ -81,12 +81,15 @@ def _open_main_window( @pytest.fixture def open_gui(tmp_path, monkeypatch, run_experiment, tmp_path_factory): monkeypatch.chdir(tmp_path) - with _open_main_window(tmp_path) as ( - gui, - _, - config, - ), StorageService.init_service( - project=os.path.abspath(config.ens_path), + with ( + _open_main_window(tmp_path) as ( + gui, + _, + config, + ), + StorageService.init_service( + project=os.path.abspath(config.ens_path), + ), ): yield gui diff --git a/tests/ert/ui_tests/gui/test_rft_export_plugin.py b/tests/ert/ui_tests/gui/test_rft_export_plugin.py index 83acb37b001..8540c8caa55 100644 --- a/tests/ert/ui_tests/gui/test_rft_export_plugin.py +++ b/tests/ert/ui_tests/gui/test_rft_export_plugin.py @@ -79,9 +79,12 @@ def test_rft_csv_export_plugin_exports_rft_data( output_file = Path("output.csv") ert_config = ErtConfig.from_file(args.config) - with StorageService.init_service( - project=os.path.abspath(ert_config.ens_path), - ), open_storage(ert_config.ens_path, mode="w") as storage: + with ( + StorageService.init_service( + project=os.path.abspath(ert_config.ens_path), + ), + open_storage(ert_config.ens_path, mode="w") as storage, + ): gui = _setup_main_window(ert_config, args, GUILogHandler(), storage) qtbot.addWidget(gui) diff --git a/tests/ert/ui_tests/gui/test_workflow_tool.py b/tests/ert/ui_tests/gui/test_workflow_tool.py index a59354a50d5..136647e2f90 100644 --- a/tests/ert/ui_tests/gui/test_workflow_tool.py +++ b/tests/ert/ui_tests/gui/test_workflow_tool.py @@ -53,12 +53,15 @@ def _open_main_window( @pytest.fixture def open_gui(tmp_path, monkeypatch): monkeypatch.chdir(tmp_path) - with _open_main_window(tmp_path) as ( - gui, - _, - config, - ), StorageService.init_service( - project=os.path.abspath(config.ens_path), + with ( + _open_main_window(tmp_path) as ( + gui, + _, + config, + ), + StorageService.init_service( + project=os.path.abspath(config.ens_path), + ), ): yield gui diff --git a/tests/ert/unit_tests/cli/test_model_hook_order.py b/tests/ert/unit_tests/cli/test_model_hook_order.py index dce91b31953..b1811908ab0 100644 --- a/tests/ert/unit_tests/cli/test_model_hook_order.py +++ b/tests/ert/unit_tests/cli/test_model_hook_order.py @@ -114,13 +114,16 @@ def test_hook_call_order_iterative_ensemble_smoother(monkeypatch): # Mock the return values of iterative_smoother_update # Mock the iteration property of IteratedEnsembleSmoother - with patch( - "ert.run_models.iterated_ensemble_smoother.iterative_smoother_update", - MagicMock(return_value=(MagicMock(), MagicMock())), - ), patch( - "ert.run_models.iterated_ensemble_smoother.IteratedEnsembleSmoother.sies_iteration", - new_callable=PropertyMock, - ) as mock_iteration: + with ( + patch( + "ert.run_models.iterated_ensemble_smoother.iterative_smoother_update", + MagicMock(return_value=(MagicMock(), MagicMock())), + ), + patch( + "ert.run_models.iterated_ensemble_smoother.IteratedEnsembleSmoother.sies_iteration", + new_callable=PropertyMock, + ) as mock_iteration, + ): mock_iteration.return_value = 2 test_class.run_experiment(MagicMock()) diff --git a/tests/ert/unit_tests/config/summary_generator.py b/tests/ert/unit_tests/config/summary_generator.py index be64a6e1505..3d0adaa44c0 100644 --- a/tests/ert/unit_tests/config/summary_generator.py +++ b/tests/ert/unit_tests/config/summary_generator.py @@ -486,7 +486,16 @@ def summaries( ) ) assume( - len(set(zip(smspec.keywords, smspec.region_numbers, smspec.well_names))) + len( + set( + zip( + smspec.keywords, + smspec.region_numbers, + smspec.well_names, + strict=False, + ) + ) + ) == len(smspec.keywords) ) dates = [0.0, *draw(time_deltas)] diff --git a/tests/ert/unit_tests/config/test_ensemble_config.py b/tests/ert/unit_tests/config/test_ensemble_config.py index c1d3e017d2b..b15fb004676 100644 --- a/tests/ert/unit_tests/config/test_ensemble_config.py +++ b/tests/ert/unit_tests/config/test_ensemble_config.py @@ -133,10 +133,13 @@ def test_ensemble_config_duplicate_node_names(): ] ], } - with pytest.raises( - ConfigValidationError, - match="GEN_KW and GEN_DATA contained duplicate name: Test_name", - ), pytest.warns(match="The template file .* is empty"): + with ( + pytest.raises( + ConfigValidationError, + match="GEN_KW and GEN_DATA contained duplicate name: Test_name", + ), + pytest.warns(match="The template file .* is empty"), + ): EnsembleConfig.from_dict(config_dict=config_dict) diff --git a/tests/ert/unit_tests/config/test_ert_config.py b/tests/ert/unit_tests/config/test_ert_config.py index 2f496f12df7..0e95a057738 100644 --- a/tests/ert/unit_tests/config/test_ert_config.py +++ b/tests/ert/unit_tests/config/test_ert_config.py @@ -377,11 +377,14 @@ def test_data_file_with_non_utf_8_character_gives_error_message(): with open(data_file, "ab") as f: f.write(b"\xff") data_file_path = str(Path.cwd() / data_file) - with pytest.raises( - ConfigValidationError, - match="Unsupported non UTF-8 character " - f"'ÿ' found in file: {data_file_path!r}", - ), pytest.warns(match="Failed to read NUM_CPU"): + with ( + pytest.raises( + ConfigValidationError, + match="Unsupported non UTF-8 character " + f"'ÿ' found in file: {data_file_path!r}", + ), + pytest.warns(match="Failed to read NUM_CPU"), + ): ErtConfig.from_file("config.ert") diff --git a/tests/ert/unit_tests/config/test_forward_model.py b/tests/ert/unit_tests/config/test_forward_model.py index d230e714fad..ebc935f10cf 100644 --- a/tests/ert/unit_tests/config/test_forward_model.py +++ b/tests/ert/unit_tests/config/test_forward_model.py @@ -124,9 +124,10 @@ def test_portable_exe_error_message(): mode = os.stat(name).st_mode mode |= stat.S_IXUSR | stat.S_IXGRP os.chmod(name, stat.S_IMODE(mode)) - with pytest.raises( - ConfigValidationError, match="EXECUTABLE must be set" - ), pytest.warns(ConfigWarning, match='"PORTABLE_EXE" key is deprecated'): + with ( + pytest.raises(ConfigValidationError, match="EXECUTABLE must be set"), + pytest.warns(ConfigWarning, match='"PORTABLE_EXE" key is deprecated'), + ): _ = _forward_model_step_from_config_file("CONFIG") @@ -1023,9 +1024,10 @@ def __init__(self): def validate_pre_experiment(self, fm_step_json: ForwardModelStepJSON) -> None: raise ForwardModelStepValidationError("I should not be a warning") - with pytest.raises( - ConfigValidationError, match="I should not be a warning" - ), pytest.warns(ConfigWarning, match="I should be a warning"): + with ( + pytest.raises(ConfigValidationError, match="I should not be a warning"), + pytest.warns(ConfigWarning, match="I should be a warning"), + ): _ = ErtConfig.with_plugins( forward_model_step_classes=[ FMWithFMStepValidationError, diff --git a/tests/ert/unit_tests/config/test_forward_model_data_to_json.py b/tests/ert/unit_tests/config/test_forward_model_data_to_json.py index dafdd1626b4..d53e1e0624f 100644 --- a/tests/ert/unit_tests/config/test_forward_model_data_to_json.py +++ b/tests/ert/unit_tests/config/test_forward_model_data_to_json.py @@ -150,7 +150,7 @@ def _generate_step( ] with open(config_file, "w", encoding="utf-8") as conf: - for key, val in zip(forward_model_keywords, values): + for key, val in zip(forward_model_keywords, values, strict=False): if key == "ENV" and val: for k, v in val.items(): conf.write(f"{key} {k} {v}\n") diff --git a/tests/ert/unit_tests/config/test_queue_config.py b/tests/ert/unit_tests/config/test_queue_config.py index 332df8ba363..73e0ad7d680 100644 --- a/tests/ert/unit_tests/config/test_queue_config.py +++ b/tests/ert/unit_tests/config/test_queue_config.py @@ -154,8 +154,9 @@ def test_invalid_realization_memory(invalid_memory_spec: str): def test_conflicting_realization_slurm_memory(): - with pytest.raises(ConfigValidationError), pytest.warns( - ConfigWarning, match="deprecated" + with ( + pytest.raises(ConfigValidationError), + pytest.warns(ConfigWarning, match="deprecated"), ): ErtConfig.from_file_contents( "NUM_REALIZATIONS 1\n" @@ -176,8 +177,9 @@ def test_conflicting_realization_slurm_memory_per_cpu(): def test_conflicting_realization_openpbs_memory_per_job(): - with pytest.raises(ConfigValidationError), pytest.warns( - ConfigWarning, match="deprecated" + with ( + pytest.raises(ConfigValidationError), + pytest.warns(ConfigWarning, match="deprecated"), ): ErtConfig.from_file_contents( "NUM_REALIZATIONS 1\n" @@ -201,8 +203,9 @@ def test_conflicting_realization_openpbs_memory_per_job_but_slurm_activated_only def test_that_invalid_memory_pr_job_raises_validation_error( torque_memory_with_unit_str, ): - with pytest.raises(ConfigValidationError), pytest.warns( - ConfigWarning, match="deprecated" + with ( + pytest.raises(ConfigValidationError), + pytest.warns(ConfigWarning, match="deprecated"), ): ErtConfig.from_file_contents( "NUM_REALIZATIONS 1\n" @@ -324,8 +327,9 @@ def test_that_valid_torque_queue_mem_options_are_ok(mem_per_job): ["5", "5g"], ) def test_that_torque_queue_mem_options_are_corrected(mem_per_job: str): - with pytest.raises(ConfigValidationError) as e, pytest.warns( - ConfigWarning, match="deprecated" + with ( + pytest.raises(ConfigValidationError) as e, + pytest.warns(ConfigWarning, match="deprecated"), ): ErtConfig.from_file_contents( "NUM_REALIZATIONS 1\n" diff --git a/tests/ert/unit_tests/config/test_workflow_jobs.py b/tests/ert/unit_tests/config/test_workflow_jobs.py index d431ffd30f0..9976f5aae35 100644 --- a/tests/ert/unit_tests/config/test_workflow_jobs.py +++ b/tests/ert/unit_tests/config/test_workflow_jobs.py @@ -28,7 +28,10 @@ def test_that_ert_warns_on_duplicate_workflow_jobs(tmp_path): with open(test_config_file_name, "w", encoding="utf-8") as fh: fh.write(test_config_contents) - with pytest.warns( - ConfigWarning, match="Duplicate workflow jobs with name 'CAREFUL_COPY_FILE'" - ), ErtPluginContext(): + with ( + pytest.warns( + ConfigWarning, match="Duplicate workflow jobs with name 'CAREFUL_COPY_FILE'" + ), + ErtPluginContext(), + ): _ = ErtConfig.from_file(test_config_file_name) diff --git a/tests/ert/unit_tests/data/test_integration_data.py b/tests/ert/unit_tests/data/test_integration_data.py index 9ef8e96424f..a95fb17b358 100644 --- a/tests/ert/unit_tests/data/test_integration_data.py +++ b/tests/ert/unit_tests/data/test_integration_data.py @@ -116,7 +116,7 @@ def create_summary_observation(): rng = np.random.default_rng() values = rng.uniform(0, 1.5, 200) errors = values * 0.1 - for restart, (value, error) in enumerate(zip(values, errors)): + for restart, (value, error) in enumerate(zip(values, errors, strict=False)): observations += f""" \nSUMMARY_OBSERVATION FOPR_{restart + 1} {{ @@ -155,7 +155,7 @@ def test_all_measured_snapshot(snapshot, snake_oil_storage, create_measured_data obs_keys = experiment.observation_keys measured_data = create_measured_data(obs_keys) snapshot.assert_match( - measured_data.data.round(10).to_csv(), "snake_oil_measured_output.csv" + measured_data.data.round(10).to_csv(), "snake_oil_measured_output.csv" ) diff --git a/tests/ert/unit_tests/ensemble_evaluator/test_ensemble_client.py b/tests/ert/unit_tests/ensemble_evaluator/test_ensemble_client.py index a374caa4cee..0e66cc99b46 100644 --- a/tests/ert/unit_tests/ensemble_evaluator/test_ensemble_client.py +++ b/tests/ert/unit_tests/ensemble_evaluator/test_ensemble_client.py @@ -13,8 +13,9 @@ def test_invalid_server(): host = "localhost" url = f"ws://{host}:{port}" - with Client(url, max_retries=2, timeout_multiplier=2) as c1, pytest.raises( - ClientConnectionError + with ( + Client(url, max_retries=2, timeout_multiplier=2) as c1, + pytest.raises(ClientConnectionError), ): c1.send("hei") diff --git a/tests/ert/unit_tests/ensemble_evaluator/test_ensemble_evaluator.py b/tests/ert/unit_tests/ensemble_evaluator/test_ensemble_evaluator.py index d88019aa988..fdae28e50a0 100644 --- a/tests/ert/unit_tests/ensemble_evaluator/test_ensemble_evaluator.py +++ b/tests/ert/unit_tests/ensemble_evaluator/test_ensemble_evaluator.py @@ -155,7 +155,7 @@ async def test_restarted_jobs_do_not_have_error_msgs(evaluator_to_use): async with Monitor(config_info) as monitor: # first snapshot before any event occurs events = monitor.track() - snapshot_event = await events.__anext__() + snapshot_event = await anext(events) snapshot = EnsembleSnapshot.from_nested_dict(snapshot_event.snapshot) assert snapshot.status == ENSEMBLE_STATE_UNKNOWN # two dispatch endpoint clients connect @@ -247,19 +247,22 @@ async def test_new_monitor_can_pick_up_where_we_left_off(evaluator_to_use): config_info = evaluator._config.get_connection_info() async with Monitor(config_info) as monitor: - async with Client( - url + "/dispatch", - cert=cert, - token=token, - max_retries=1, - timeout_multiplier=1, - ) as dispatch1, Client( - url + "/dispatch", - cert=cert, - token=token, - max_retries=1, - timeout_multiplier=1, - ) as dispatch2: + async with ( + Client( + url + "/dispatch", + cert=cert, + token=token, + max_retries=1, + timeout_multiplier=1, + ) as dispatch1, + Client( + url + "/dispatch", + cert=cert, + token=token, + max_retries=1, + timeout_multiplier=1, + ) as dispatch2, + ): # first dispatch endpoint client informs that forward model 0 is running event = ForwardModelStepRunning( ensemble=evaluator.ensemble.id_, @@ -377,24 +380,27 @@ async def test_dispatch_endpoint_clients_can_connect_and_monitor_can_shut_down_e url = evaluator._config.url # first snapshot before any event occurs - snapshot_event = await events.__anext__() + snapshot_event = await anext(events) assert type(snapshot_event) is EESnapshot snapshot = EnsembleSnapshot.from_nested_dict(snapshot_event.snapshot) assert snapshot.status == ENSEMBLE_STATE_UNKNOWN # two dispatch endpoint clients connect - async with Client( - url + "/dispatch", - cert=cert, - token=token, - max_retries=1, - timeout_multiplier=1, - ) as dispatch1, Client( - url + "/dispatch", - cert=cert, - token=token, - max_retries=1, - timeout_multiplier=1, - ) as dispatch2: + async with ( + Client( + url + "/dispatch", + cert=cert, + token=token, + max_retries=1, + timeout_multiplier=1, + ) as dispatch1, + Client( + url + "/dispatch", + cert=cert, + token=token, + max_retries=1, + timeout_multiplier=1, + ) as dispatch2, + ): # first dispatch endpoint client informs that real 0 fm 0 is running event = ForwardModelStepRunning( ensemble=evaluator.ensemble.id_, @@ -428,7 +434,7 @@ async def test_dispatch_endpoint_clients_can_connect_and_monitor_can_shut_down_e ) await dispatch2._send(event_to_json(event)) - event = await events.__anext__() + event = await anext(events) snapshot = EnsembleSnapshot.from_nested_dict(event.snapshot) assert ( snapshot.get_fm_step("1", "0")["status"] == FORWARD_MODEL_STATE_FINISHED @@ -442,7 +448,7 @@ async def test_dispatch_endpoint_clients_can_connect_and_monitor_can_shut_down_e # a second monitor connects async with Monitor(evaluator._config.get_connection_info()) as monitor2: events2 = monitor2.track() - full_snapshot_event = await events2.__anext__() + full_snapshot_event = await anext(events2) event = cast(EESnapshot, full_snapshot_event) snapshot = EnsembleSnapshot.from_nested_dict(event.snapshot) assert snapshot.status == ENSEMBLE_STATE_UNKNOWN @@ -460,8 +466,8 @@ async def test_dispatch_endpoint_clients_can_connect_and_monitor_can_shut_down_e await monitor.signal_cancel() # both monitors should get a terminated event - terminated = await events.__anext__() - terminated2 = await events2.__anext__() + terminated = await anext(events) + terminated2 = await anext(events2) assert type(terminated) is EETerminated assert type(terminated2) is EETerminated @@ -486,7 +492,7 @@ async def test_ensure_multi_level_events_in_order(evaluator_to_use): cert = evaluator._config.cert url = evaluator._config.url - snapshot_event = await events.__anext__() + snapshot_event = await anext(events) assert type(snapshot_event) is EESnapshot async with Client(url + "/dispatch", cert=cert, token=token) as dispatch: event = EnsembleStarted(ensemble=evaluator.ensemble.id_) diff --git a/tests/ert/unit_tests/ensemble_evaluator/test_ensemble_legacy.py b/tests/ert/unit_tests/ensemble_evaluator/test_ensemble_legacy.py index fbca5a2a265..5b845a6e3d8 100644 --- a/tests/ert/unit_tests/ensemble_evaluator/test_ensemble_legacy.py +++ b/tests/ert/unit_tests/ensemble_evaluator/test_ensemble_legacy.py @@ -46,9 +46,10 @@ async def test_run_legacy_ensemble( use_token=False, generate_cert=False, ) - async with evaluator_to_use(ensemble, config) as evaluator, Monitor( - config - ) as monitor: + async with ( + evaluator_to_use(ensemble, config) as evaluator, + Monitor(config) as monitor, + ): async for event in monitor.track(): if type(event) in ( EESnapshotUpdate, @@ -84,9 +85,10 @@ async def test_run_and_cancel_legacy_ensemble( terminated_event = False - async with evaluator_to_use(ensemble, config) as evaluator, Monitor( - config - ) as monitor: + async with ( + evaluator_to_use(ensemble, config) as evaluator, + Monitor(config) as monitor, + ): # on lesser hardware the realizations might be killed by max_runtime # and the ensemble is set to STOPPED monitor._receiver_timeout = 10.0 diff --git a/tests/ert/unit_tests/forward_model_runner/test_event_reporter.py b/tests/ert/unit_tests/forward_model_runner/test_event_reporter.py index d7dad85f0e8..0575e78b954 100644 --- a/tests/ert/unit_tests/forward_model_runner/test_event_reporter.py +++ b/tests/ert/unit_tests/forward_model_runner/test_event_reporter.py @@ -184,9 +184,12 @@ def test_report_inconsistent_events(unused_tcp_port): reporter = Event(evaluator_url=url) lines = [] - with _mock_ws_thread(host, unused_tcp_port, lines), pytest.raises( - TransitionError, - match=r"Illegal transition None -> \(MessageType,\)", + with ( + _mock_ws_thread(host, unused_tcp_port, lines), + pytest.raises( + TransitionError, + match=r"Illegal transition None -> \(MessageType,\)", + ), ): reporter.report(Finish()) diff --git a/tests/ert/unit_tests/forward_model_runner/test_job_dispatch.py b/tests/ert/unit_tests/forward_model_runner/test_job_dispatch.py index 474ff102785..0befe45c5a9 100644 --- a/tests/ert/unit_tests/forward_model_runner/test_job_dispatch.py +++ b/tests/ert/unit_tests/forward_model_runner/test_job_dispatch.py @@ -345,11 +345,12 @@ def test_job_dispatch_kills_itself_after_unsuccessful_job(unused_tcp_port): port = unused_tcp_port jobs_json = json.dumps({"ens_id": "_id_", "dispatch_url": f"ws://localhost:{port}"}) - with patch("_ert.forward_model_runner.cli.os.killpg") as mock_killpg, patch( - "_ert.forward_model_runner.cli.os.getpgid" - ) as mock_getpgid, patch( - "_ert.forward_model_runner.cli.open", new=mock_open(read_data=jobs_json) - ), patch("_ert.forward_model_runner.cli.ForwardModelRunner") as mock_runner: + with ( + patch("_ert.forward_model_runner.cli.os.killpg") as mock_killpg, + patch("_ert.forward_model_runner.cli.os.getpgid") as mock_getpgid, + patch("_ert.forward_model_runner.cli.open", new=mock_open(read_data=jobs_json)), + patch("_ert.forward_model_runner.cli.ForwardModelRunner") as mock_runner, + ): mock_runner.return_value.run.return_value = [ Init([], 0, 0), Finish().with_error("overall bad run"), diff --git a/tests/ert/unit_tests/gui/simulation/test_run_dialog.py b/tests/ert/unit_tests/gui/simulation/test_run_dialog.py index 85173360bee..b1c34cae85a 100644 --- a/tests/ert/unit_tests/gui/simulation/test_run_dialog.py +++ b/tests/ert/unit_tests/gui/simulation/test_run_dialog.py @@ -558,12 +558,15 @@ def test_that_exception_in_base_run_model_is_handled(qtbot: QtBot, storage): args_mock.config = config_file ert_config = ErtConfig.from_file(config_file) - with StorageService.init_service( - project=os.path.abspath(ert_config.ens_path), - ), patch.object( - ert.run_models.SingleTestRun, - "run_experiment", - MagicMock(side_effect=ValueError("I failed :(")), + with ( + StorageService.init_service( + project=os.path.abspath(ert_config.ens_path), + ), + patch.object( + ert.run_models.SingleTestRun, + "run_experiment", + MagicMock(side_effect=ValueError("I failed :(")), + ), ): gui = _setup_main_window(ert_config, args_mock, GUILogHandler(), storage) qtbot.addWidget(gui) @@ -641,9 +644,12 @@ def test_that_stdout_and_stderr_buttons_react_to_file_content( args_mock = Mock() args_mock.config = "snake_oil.ert" - with StorageService.init_service( - project=os.path.abspath(snake_oil_case.ens_path), - ), open_storage(snake_oil_case.ens_path, mode="w") as storage: + with ( + StorageService.init_service( + project=os.path.abspath(snake_oil_case.ens_path), + ), + open_storage(snake_oil_case.ens_path, mode="w") as storage, + ): gui = _setup_main_window(snake_oil_case, args_mock, GUILogHandler(), storage) experiment_panel = gui.findChild(ExperimentPanel) diff --git a/tests/ert/unit_tests/gui/simulation/test_run_path_dialog.py b/tests/ert/unit_tests/gui/simulation/test_run_path_dialog.py index b0c52bb021c..ad4fde67f3c 100644 --- a/tests/ert/unit_tests/gui/simulation/test_run_path_dialog.py +++ b/tests/ert/unit_tests/gui/simulation/test_run_path_dialog.py @@ -57,9 +57,12 @@ def test_run_path_deleted_error( args_mock = Mock() args_mock.config = "snake_oil.ert" - with StorageService.init_service( - project=os.path.abspath(snake_oil_case.ens_path), - ), open_storage(snake_oil_case.ens_path, mode="w") as storage: + with ( + StorageService.init_service( + project=os.path.abspath(snake_oil_case.ens_path), + ), + open_storage(snake_oil_case.ens_path, mode="w") as storage, + ): gui = _setup_main_window(snake_oil_case, args_mock, GUILogHandler(), storage) experiment_panel = gui.findChild(ExperimentPanel) @@ -103,9 +106,12 @@ def test_run_path_is_deleted(snake_oil_case_storage: ErtConfig, qtbot: QtBot): args_mock = Mock() args_mock.config = "snake_oil.ert" - with StorageService.init_service( - project=os.path.abspath(snake_oil_case.ens_path), - ), open_storage(snake_oil_case.ens_path, mode="w") as storage: + with ( + StorageService.init_service( + project=os.path.abspath(snake_oil_case.ens_path), + ), + open_storage(snake_oil_case.ens_path, mode="w") as storage, + ): gui = _setup_main_window(snake_oil_case, args_mock, GUILogHandler(), storage) experiment_panel = gui.findChild(ExperimentPanel) @@ -147,9 +153,12 @@ def test_run_path_is_not_deleted(snake_oil_case_storage: ErtConfig, qtbot: QtBot args_mock = Mock() args_mock.config = "snake_oil.ert" - with StorageService.init_service( - project=os.path.abspath(snake_oil_case.ens_path), - ), open_storage(snake_oil_case.ens_path, mode="w") as storage: + with ( + StorageService.init_service( + project=os.path.abspath(snake_oil_case.ens_path), + ), + open_storage(snake_oil_case.ens_path, mode="w") as storage, + ): gui = _setup_main_window(snake_oil_case, args_mock, GUILogHandler(), storage) experiment_panel = gui.findChild(ExperimentPanel) diff --git a/tests/ert/unit_tests/plugins/test_misfit_preprocessor.py b/tests/ert/unit_tests/plugins/test_misfit_preprocessor.py index 3d472641ea8..84a6c58552d 100644 --- a/tests/ert/unit_tests/plugins/test_misfit_preprocessor.py +++ b/tests/ert/unit_tests/plugins/test_misfit_preprocessor.py @@ -11,10 +11,13 @@ def test_that_misfit_preprocessor_raises(): fh.writelines("HOOK_WORKFLOW config PRE_FIRST_UPDATE\n") with open("config", "w", encoding="utf-8") as fh: fh.writelines("MISFIT_PREPROCESSOR") - with pytest.raises( - ConfigValidationError, - match="MISFIT_PREPROCESSOR is removed, use ANALYSIS_SET_VAR OBSERVATIONS", - ), ErtPluginContext(): + with ( + pytest.raises( + ConfigValidationError, + match="MISFIT_PREPROCESSOR is removed, use ANALYSIS_SET_VAR OBSERVATIONS", + ), + ErtPluginContext(), + ): ErtConfig.from_file("poly.ert") @@ -25,8 +28,11 @@ def test_that_misfit_preprocessor_raises_with_config(): fh.writelines("HOOK_WORKFLOW config PRE_FIRST_UPDATE\n") with open("config", "w", encoding="utf-8") as fh: fh.writelines("MISFIT_PREPROCESSOR my_config") - with pytest.raises( - ConfigValidationError, - match="Add multiple entries to set up multiple groups", - ), ErtPluginContext(): + with ( + pytest.raises( + ConfigValidationError, + match="Add multiple entries to set up multiple groups", + ), + ErtPluginContext(), + ): ErtConfig.from_file("poly.ert") diff --git a/tests/ert/unit_tests/plugins/test_parameter_disable.py b/tests/ert/unit_tests/plugins/test_parameter_disable.py index aa967fd2e71..358f118efc5 100644 --- a/tests/ert/unit_tests/plugins/test_parameter_disable.py +++ b/tests/ert/unit_tests/plugins/test_parameter_disable.py @@ -15,7 +15,8 @@ def test_that_we_can_disable_a_parameter(): fh.writelines("MY_KEYWORD ") with open("prior.txt", "w", encoding="utf-8") as fh: fh.writelines("MY_KEYWORD NORMAL 0 1") - with pytest.raises( - ConfigValidationError, match="use the UPDATE:FALSE option" - ), ErtPluginContext(): + with ( + pytest.raises(ConfigValidationError, match="use the UPDATE:FALSE option"), + ErtPluginContext(), + ): ErtConfig.from_file("poly.ert") diff --git a/tests/ert/unit_tests/scheduler/test_scheduler.py b/tests/ert/unit_tests/scheduler/test_scheduler.py index d6d77a4c5c1..f76ce96c2a2 100644 --- a/tests/ert/unit_tests/scheduler/test_scheduler.py +++ b/tests/ert/unit_tests/scheduler/test_scheduler.py @@ -1,4 +1,5 @@ import asyncio +import itertools import json import random import shutil @@ -519,8 +520,7 @@ async def wait(): await sch.execute() deltas = [ - next_start - start - for start, next_start in zip(run_start_times[:-1], run_start_times[1:]) + next_start - start for start, next_start in itertools.pairwise(run_start_times) ] assert min(deltas) >= submit_sleep * 0.8 assert max(deltas) <= submit_sleep + 0.1 @@ -568,8 +568,7 @@ async def wait(): await sch.execute() deltas = [ - next_start - start - for start, next_start in zip(run_start_times[:-1], run_start_times[1:]) + next_start - start for start, next_start in itertools.pairwise(run_start_times) ] assert min(deltas) >= submit_sleep * 0.8 diff --git a/tests/ert/unit_tests/simulator/test_batch_sim.py b/tests/ert/unit_tests/simulator/test_batch_sim.py index dfbbb950163..a24b3be6d8f 100644 --- a/tests/ert/unit_tests/simulator/test_batch_sim.py +++ b/tests/ert/unit_tests/simulator/test_batch_sim.py @@ -236,7 +236,7 @@ def test_batch_simulation(batch_simulator, storage): results = ctx.results() assert len(results) == 2 - for result, (_, controls) in zip(results, case_data): + for result, (_, controls) in zip(results, case_data, strict=False): assert sorted(result.keys()) == sorted(["ORDER", "ON_OFF"]) for res_key, ctrl_key in ( @@ -393,7 +393,7 @@ def test_batch_simulation_suffixes(batch_sim_example, storage): assert sorted(result.keys()) == sorted(["ORDER", "ON_OFF"]) keys = ("W1", "W2", "W3") - for result, (_, controls) in zip(results, case_data): + for result, (_, controls) in zip(results, case_data, strict=False): expected = [controls["WELL_ON_OFF"][key] ** 2 for key in keys] # [:3] slicing can be removed when responses are not stored in netcdf leading @@ -403,7 +403,7 @@ def test_batch_simulation_suffixes(batch_sim_example, storage): expected = [ v**2 for key in keys for _, v in controls["WELL_ORDER"][key].items() ] - for exp, act in zip(expected, list(result["ORDER"])): + for exp, act in zip(expected, list(result["ORDER"]), strict=False): assert act == pytest.approx(exp) diff --git a/tests/ert/unit_tests/storage/test_local_storage.py b/tests/ert/unit_tests/storage/test_local_storage.py index 1d6236fa4c9..eecee71bf55 100644 --- a/tests/ert/unit_tests/storage/test_local_storage.py +++ b/tests/ert/unit_tests/storage/test_local_storage.py @@ -63,15 +63,21 @@ def test_create_experiment(tmp_path): def test_that_loading_non_existing_experiment_throws(tmp_path): - with open_storage(tmp_path, mode="w") as storage, pytest.raises( - KeyError, match="Experiment with name 'non-existing-experiment' not found" + with ( + open_storage(tmp_path, mode="w") as storage, + pytest.raises( + KeyError, match="Experiment with name 'non-existing-experiment' not found" + ), ): storage.get_experiment_by_name("non-existing-experiment") def test_that_loading_non_existing_ensemble_throws(tmp_path): - with open_storage(tmp_path, mode="w") as storage, pytest.raises( - KeyError, match="Ensemble with name 'non-existing-ensemble' not found" + with ( + open_storage(tmp_path, mode="w") as storage, + pytest.raises( + KeyError, match="Ensemble with name 'non-existing-ensemble' not found" + ), ): experiment = storage.create_experiment(name="test-experiment") experiment.get_ensemble_by_name("non-existing-ensemble") @@ -330,7 +336,7 @@ def test_get_unique_experiment_name(snake_oil_storage): "default", ] experiment_list = [MagicMock() for _ in range(len(names))] - for k, v in zip(experiment_list, names): + for k, v in zip(experiment_list, names, strict=False): k.name = v experiments.return_value = experiment_list @@ -512,10 +518,13 @@ def __exit__(self, *args, **kwargs): def test_write_transaction_failure(tmp_path): with open_storage(tmp_path, "w") as storage: path = tmp_path / "file.txt" - with patch( - "ert.storage.local_storage.NamedTemporaryFile", - RaisingWriteNamedTemporaryFile, - ) as f, pytest.raises(RuntimeError): + with ( + patch( + "ert.storage.local_storage.NamedTemporaryFile", + RaisingWriteNamedTemporaryFile, + ) as f, + pytest.raises(RuntimeError), + ): storage._write_transaction(path, b"deadbeaf") assert f.entered @@ -605,9 +614,10 @@ def create_field_list(self, fields): def double_open_timeout(self): # Opening with write access will timeout when # already opened with mode="w" somewhere else - with patch( - "ert.storage.local_storage.LocalStorage.LOCK_TIMEOUT", 0.0 - ), pytest.raises(ErtStorageException): + with ( + patch("ert.storage.local_storage.LocalStorage.LOCK_TIMEOUT", 0.0), + pytest.raises(ErtStorageException), + ): open_storage(self.tmpdir + "/storage/", mode="w") @rule() @@ -681,10 +691,13 @@ def write_error_in_save_field(self, model_ensemble: Ensemble, field_data): iens = 0 assume(not storage_ensemble.realizations_initialized([iens])) for f in fields: - with patch( - "ert.storage.local_storage.NamedTemporaryFile", - RaisingWriteNamedTemporaryFile, - ) as temp_file, pytest.raises(RuntimeError): + with ( + patch( + "ert.storage.local_storage.NamedTemporaryFile", + RaisingWriteNamedTemporaryFile, + ) as temp_file, + pytest.raises(RuntimeError), + ): storage_ensemble.save_parameters( f.name, iens, @@ -875,10 +888,13 @@ def write_error_in_set_failure( storage_ensemble = self.storage.get_ensemble(model_ensemble.uuid) - with patch( - "ert.storage.local_storage.NamedTemporaryFile", - RaisingWriteNamedTemporaryFile, - ) as f, pytest.raises(RuntimeError): + with ( + patch( + "ert.storage.local_storage.NamedTemporaryFile", + RaisingWriteNamedTemporaryFile, + ) as f, + pytest.raises(RuntimeError), + ): storage_ensemble.set_failure( realization, RealizationStorageState.PARENT_FAILURE, message ) diff --git a/tests/ert/unit_tests/test_run_path_creation.py b/tests/ert/unit_tests/test_run_path_creation.py index a54ee55fc7e..2d46aa8e21f 100644 --- a/tests/ert/unit_tests/test_run_path_creation.py +++ b/tests/ert/unit_tests/test_run_path_creation.py @@ -502,7 +502,9 @@ def test_write_snakeoil_runpath_file(snake_oil_case, storage, itr): exp_runpaths = list(map(os.path.realpath, exp_runpaths)) with open(runpath_list_path, "r", encoding="utf-8") as f: - dumped_runpaths = list(zip(*[line.split() for line in f.readlines()]))[1] + dumped_runpaths = list( + zip(*[line.split() for line in f.readlines()], strict=False) + )[1] assert list(exp_runpaths) == list(dumped_runpaths) diff --git a/tests/ert/unit_tests/test_tracking.py b/tests/ert/unit_tests/test_tracking.py index 37220ed1cef..cf031d8f1f6 100644 --- a/tests/ert/unit_tests/test_tracking.py +++ b/tests/ert/unit_tests/test_tracking.py @@ -299,7 +299,7 @@ def test_setting_env_context_during_run( thread.join() expected = ["_ERT_SIMULATION_MODE", "_ERT_EXPERIMENT_ID", "_ERT_ENSEMBLE_ID"] - for event, environment in zip(queue.events, queue.environment): + for event, environment in zip(queue.events, queue.environment, strict=False): if isinstance(event, (FullSnapshotEvent, SnapshotUpdateEvent)): for key in expected: assert key in environment diff --git a/tests/ert/unit_tests/workflow_runner/test_workflow_runner.py b/tests/ert/unit_tests/workflow_runner/test_workflow_runner.py index a4d486c46a3..d7b4d3a956c 100644 --- a/tests/ert/unit_tests/workflow_runner/test_workflow_runner.py +++ b/tests/ert/unit_tests/workflow_runner/test_workflow_runner.py @@ -92,9 +92,14 @@ def test_workflow_failed_job(): workflow_runner = WorkflowRunner(workflow) assert not workflow_runner.isRunning() - with patch.object( - WorkflowRunner, "run_blocking", side_effect=Exception("mocked workflow error") - ), workflow_runner: + with ( + patch.object( + WorkflowRunner, + "run_blocking", + side_effect=Exception("mocked workflow error"), + ), + workflow_runner, + ): workflow_runner.wait() assert workflow_runner.exception() is not None diff --git a/tests/everest/entry_points/test_config_branch_entry.py b/tests/everest/entry_points/test_config_branch_entry.py index c7af83df306..4207f73d247 100644 --- a/tests/everest/entry_points/test_config_branch_entry.py +++ b/tests/everest/entry_points/test_config_branch_entry.py @@ -84,9 +84,10 @@ def test_config_branch_preserves_config_section_order( opt_control_val_for_batch_id = {v for k, v in opt_controls.items()} diff_lines = [] - with open(CONFIG_FILE, "r", encoding="utf-8") as initial_config, open( - new_config_file_name, "r", encoding="utf-8" - ) as branch_config: + with ( + open(CONFIG_FILE, "r", encoding="utf-8") as initial_config, + open(new_config_file_name, "r", encoding="utf-8") as branch_config, + ): diff = difflib.unified_diff( initial_config.readlines(), branch_config.readlines(), diff --git a/tests/everest/functional/test_main_everest_entry.py b/tests/everest/functional/test_main_everest_entry.py index 279e63bc3b0..fec6fccd98a 100644 --- a/tests/everest/functional/test_main_everest_entry.py +++ b/tests/everest/functional/test_main_everest_entry.py @@ -32,8 +32,9 @@ def test_everest_entry_docs(): other tests. Here we just check that the entry point triggers the correct execution paths in the applcation """ - with capture_streams() as (out, err), pytest.raises( - SystemExit + with ( + capture_streams() as (out, err), + pytest.raises(SystemExit), ): # there is a call to sys.exit start_everest(["everest", "--docs"]) lines = [line.strip() for line in out.getvalue().split("\n")] diff --git a/tests/everest/test_api.py b/tests/everest/test_api.py index 4195ca67de3..ea9d36ee728 100644 --- a/tests/everest/test_api.py +++ b/tests/everest/test_api.py @@ -108,7 +108,7 @@ def _make_mock(mock_SebaSnapshot): gradient_info=grad_val, ) for bid, mf, obj_val, grad_val in zip( - _batch_ids, _merit_flags, _obj_values, _gradient_info + _batch_ids, _merit_flags, _obj_values, _gradient_info, strict=False ) ] simulation_data = [ @@ -133,6 +133,7 @@ def _make_mock(mock_SebaSnapshot): 8 * [_batch_ids[0]] + 8 * [_batch_ids[1]], _control_values, _function_values, + strict=False, ) ] @@ -217,7 +218,9 @@ def test_batches(api_no_gradient, api): def test_accepted_batches(api_no_gradient, api): - expected_result = [bid for bid, mf in zip(_batch_ids, _merit_flags) if mf] + expected_result = [ + bid for bid, mf in zip(_batch_ids, _merit_flags, strict=False) if mf + ] assert api_no_gradient.accepted_batches == expected_result assert api.accepted_batches == expected_result @@ -255,7 +258,9 @@ def test_realizations(api_no_gradient, api): def test_simulations(api_no_gradient, api): - expected_result = [s for s, g in zip(_simulations, _is_gradient) if not g] + expected_result = [ + s for s, g in zip(_simulations, _is_gradient, strict=False) if not g + ] assert api_no_gradient.simulations == expected_result assert api.simulations == _simulations @@ -270,14 +275,18 @@ def test_control_values(api_no_gradient, api): control_names = api_no_gradient.control_names expected_result = [ {"batch": bid, "control": name, "value": con[name]} - for bid, con in zip(8 * [_batch_ids[0]] + 8 * [_batch_ids[1]], _control_values) + for bid, con in zip( + 8 * [_batch_ids[0]] + 8 * [_batch_ids[1]], _control_values, strict=False + ) for name in control_names ] - for res, er in zip(api.control_values, expected_result): + for res, er in zip(api.control_values, expected_result, strict=False): assert res == er is_gradient = [ig for ig in _is_gradient for name in control_names] - expected_result = [tv for tv, ig in zip(expected_result, is_gradient) if not ig] - for res, er in zip(api_no_gradient.control_values, expected_result): + expected_result = [ + tv for tv, ig in zip(expected_result, is_gradient, strict=False) if not ig + ] + for res, er in zip(api_no_gradient.control_values, expected_result, strict=False): assert res == er @@ -298,14 +307,17 @@ def test_objective_values(api_no_gradient, api): 2 * _simulations, 8 * [_batch_ids[0]] + 8 * [_batch_ids[1]], _function_values, + strict=False, ) for name in function_names ] - for res, er in zip(api.objective_values, expected_result): + for res, er in zip(api.objective_values, expected_result, strict=False): assert res == er is_gradient = [ig for ig in _is_gradient for name in function_names] - expected_result = [tv for tv, ig in zip(expected_result, is_gradient) if not ig] - for res, er in zip(api_no_gradient.objective_values, expected_result): + expected_result = [ + tv for tv, ig in zip(expected_result, is_gradient, strict=False) if not ig + ] + for res, er in zip(api_no_gradient.objective_values, expected_result, strict=False): assert res == er @@ -317,6 +329,7 @@ def test_single_objective_values(api_no_gradient): _is_gradient, 8 * [_batch_ids[0]] + 8 * [_batch_ids[1]], _function_values, + strict=False, ) if not ig ] @@ -332,7 +345,7 @@ def test_single_objective_values(api_no_gradient): expected_result = [ {"accepted": m, "batch": b, "objective": v, **expected_objectives[b]} - for b, v, m in zip(_batch_ids, _obj_values, _merit_flags) + for b, v, m in zip(_batch_ids, _obj_values, _merit_flags, strict=False) ] result = api_no_gradient.single_objective_values @@ -342,7 +355,7 @@ def test_single_objective_values(api_no_gradient): def test_gradient_values(api_no_gradient): expected_result = [ {"batch": bid, "function": func, "control": ctrl, "value": val} - for bid, grad_info in zip(_batch_ids, _gradient_info) + for bid, grad_info in zip(_batch_ids, _gradient_info, strict=False) for func, info in grad_info.items() for ctrl, val in info.items() ] @@ -443,7 +456,9 @@ def test_get_summary_keys(_, api_no_gradient): assert summary_values.shape[0] == len(_realizations) * len(_batch_ids) * len(_dates) assert set(summary_values["batch"]) == set(_batch_ids) assert set(summary_values["realization"]) == set(_realizations) - non_gradient_simulations = [s for s, g in zip(_simulations, _is_gradient) if not g] + non_gradient_simulations = [ + s for s, g in zip(_simulations, _is_gradient, strict=False) if not g + ] assert set(summary_values["simulation"]) == set(non_gradient_simulations) assert set(summary_values["date"]) == set(_dates) # Check key values. @@ -492,7 +507,9 @@ def test_get_summary_keys_single_batch(_, api_no_gradient): assert summary_values.shape[0] == len(_realizations) * len(_dates) assert summary_values["batch"].iloc[0] == 2 assert set(summary_values["realization"]) == set(_realizations) - non_gradient_simulations = [s for s, g in zip(_simulations, _is_gradient) if not g] + non_gradient_simulations = [ + s for s, g in zip(_simulations, _is_gradient, strict=False) if not g + ] assert set(summary_values["simulation"]) == set(non_gradient_simulations) assert set(summary_values["date"]) == set(_dates) @@ -517,7 +534,9 @@ def test_get_summary_keys_single_key(_, api_no_gradient): assert summary_values.shape[0] == len(_realizations) * len(_batch_ids) * len(_dates) assert set(summary_values["batch"]) == set(_batch_ids) assert set(summary_values["realization"]) == set(_realizations) - non_gradient_simulations = [s for s, g in zip(_simulations, _is_gradient) if not g] + non_gradient_simulations = [ + s for s, g in zip(_simulations, _is_gradient, strict=False) if not g + ] assert set(summary_values["simulation"]) == set(non_gradient_simulations) assert set(summary_values["date"]) == set(_dates) # Check key values. diff --git a/tests/everest/test_config_file_loader.py b/tests/everest/test_config_file_loader.py index d1711f89c00..01439396f37 100644 --- a/tests/everest/test_config_file_loader.py +++ b/tests/everest/test_config_file_loader.py @@ -76,7 +76,9 @@ def test_dependent_definitions(copy_mocked_test_data_to_tmp): with open(config_file, encoding="utf-8") as f: raw_config = YAML(typ="safe", pure=True).load(f) - conseq_chars = zip(string.ascii_lowercase[:-1], string.ascii_lowercase[1:]) + conseq_chars = zip( + string.ascii_lowercase[:-1], string.ascii_lowercase[1:], strict=False + ) for c, cdef in [*list(conseq_chars), (string.ascii_lowercase[-1], "configpath")]: raw_config[CK.DEFINITIONS][c] = "r{{{{ {} }}}}".format(cdef) diff --git a/tests/everest/test_data/mocked_test_case/jobs/well_order_mock.py b/tests/everest/test_data/mocked_test_case/jobs/well_order_mock.py index 09cd37c8d56..259fd7d8faf 100755 --- a/tests/everest/test_data/mocked_test_case/jobs/well_order_mock.py +++ b/tests/everest/test_data/mocked_test_case/jobs/well_order_mock.py @@ -6,9 +6,10 @@ def create_file(order, template_file, target_file): - with open(target_file, "w", encoding="utf-8") as writeH, open( - template_file, encoding="utf-8" - ) as readH: + with ( + open(target_file, "w", encoding="utf-8") as writeH, + open(template_file, encoding="utf-8") as readH, + ): for line in readH.readlines(): match_obj = re.search("(__[A-Z]+_[0-9]__)", line) if match_obj: diff --git a/tests/everest/test_everlint.py b/tests/everest/test_everlint.py index 9bbf8ac7297..448e5923ea5 100644 --- a/tests/everest/test_everlint.py +++ b/tests/everest/test_everlint.py @@ -160,7 +160,7 @@ def test_non_existent_file(): def test_invalid_integer(): invalid_values = [-1, -999, "apekatt"] exp_errors = 2 * ["(.*)greater than or equal to 0"] + ["(.*) not a valid integer"] - for invalid_value, err in zip(invalid_values, exp_errors): + for invalid_value, err in zip(invalid_values, exp_errors, strict=False): config = yaml_file_to_substituted_config_dict(SNAKE_OIL_CONFIG) config[ConfigKeys.MODEL][ConfigKeys.REALIZATIONS][1] = invalid_value @@ -203,7 +203,7 @@ def test_malformed_list(): "No such file or directory (.*)", ] - for invalid_val, exp_err in zip(invalid_values, exp_errs): + for invalid_val, exp_err in zip(invalid_values, exp_errs, strict=False): config = yaml_file_to_substituted_config_dict(SNAKE_OIL_CONFIG) config[ConfigKeys.INSTALL_DATA][0][ConfigKeys.SOURCE] = invalid_val @@ -243,7 +243,7 @@ def test_bool_validation(): values = [True, False, 0, 1, "True", ["I'm", [True for real in []]]] exp_errs = 2 * [None] + 4 * ["(.*) could not be parsed to a boolean"] - for val, exp_err in zip(values, exp_errs): + for val, exp_err in zip(values, exp_errs, strict=False): config = yaml_file_to_substituted_config_dict(SNAKE_OIL_CONFIG) config[ConfigKeys.INSTALL_DATA][0][ConfigKeys.LINK] = val @@ -286,7 +286,7 @@ def test_existing_path_validation(): + ["str type expected"] ) - for val, exp_err in zip(values, exp_errs): + for val, exp_err in zip(values, exp_errs, strict=False): config = yaml_file_to_substituted_config_dict(SNAKE_OIL_CONFIG) config[ConfigKeys.INSTALL_DATA][0][ConfigKeys.SOURCE] = val @@ -317,7 +317,7 @@ def test_existing_file_validation(): + ["str type expected"] ) - for val, exp_err in zip(values, exp_errs): + for val, exp_err in zip(values, exp_errs, strict=False): config = yaml_file_to_substituted_config_dict(SNAKE_OIL_CONFIG) jobs = config[ConfigKeys.INSTALL_JOBS] jobs[0][ConfigKeys.SOURCE] = val @@ -337,7 +337,7 @@ def test_existing_dir_validation(): exp_errs = [None, "no such file or directory (.*)"] - for val, exp_err in zip(values, exp_errs): + for val, exp_err in zip(values, exp_errs, strict=False): config = yaml_file_to_substituted_config_dict(SNAKE_OIL_CONFIG) config[ConfigKeys.CONFIGPATH] = Path(val) errors = EverestConfig.lint_config_dict(config) @@ -367,7 +367,7 @@ def test_valid_path_validation(): + ["embedded null byte"] ) - for val, exp_err in zip(values, exp_errs): + for val, exp_err in zip(values, exp_errs, strict=False): config = yaml_file_to_substituted_config_dict(SNAKE_OIL_CONFIG) config[ConfigKeys.ENVIRONMENT][ConfigKeys.OUTPUT_DIR] = val @@ -388,7 +388,7 @@ def test_valid_filepath_validation(): exp_errs = ["Invalid type", None, None, "Invalid type"] - for val, exp_err in zip(values, exp_errs): + for val, exp_err in zip(values, exp_errs, strict=False): config = yaml_file_to_substituted_config_dict(SNAKE_OIL_CONFIG) config["export"] = {} config["export"]["csv_output_filepath"] = val diff --git a/tests/everest/test_math_func.py b/tests/everest/test_math_func.py index ac632532850..3b522b4dd7d 100644 --- a/tests/everest/test_math_func.py +++ b/tests/everest/test_math_func.py @@ -104,6 +104,7 @@ def test_math_func_multiobj( for a, b in zip( dt["obj_fn"], # pylint: disable=unsubscriptable-object ok_evals["sim_avg_obj"], + strict=False, ): # Opposite, because ropt negates values before passing to dakota assert -a == pytest.approx(b) diff --git a/tests/everest/test_res_initialization.py b/tests/everest/test_res_initialization.py index 3a7014ed216..6321b52b74e 100644 --- a/tests/everest/test_res_initialization.py +++ b/tests/everest/test_res_initialization.py @@ -413,7 +413,7 @@ def test_install_data_no_init(copy_test_data_to_tmp): targets = 2 * ["REEK.SMSPEC"] + 2 * ["tno_refcase"] links = [True, False, True, False] cmd_list = ["symlink", "copy_file", "symlink", "copy_directory"] - test_base = list(zip(sources, targets, links, cmd_list)) + test_base = list(zip(sources, targets, links, cmd_list, strict=False)) tutorial_config_path = os.path.join(TUTORIAL_CONFIG_DIR, "mocked_test_case.yml") for source, target, link, cmd in test_base[1:2]: ever_config = EverestConfig.load_file(tutorial_config_path) @@ -518,7 +518,7 @@ def test_install_data(copy_test_data_to_tmp): targets = 2 * ["REEK.SMSPEC"] + 2 * ["tno_refcase"] links = [True, False, True, False] cmds = ["symlink", "copy_file", "symlink", "copy_directory"] - test_base = zip(sources, targets, links, cmds) + test_base = zip(sources, targets, links, cmds, strict=False) tutorial_config_path = os.path.join(TUTORIAL_CONFIG_DIR, "mocked_test_case.yml") for source, target, link, cmd in test_base: ever_config = EverestConfig.load_file(tutorial_config_path)