diff --git a/src/ert/run_models/everest_run_model.py b/src/ert/run_models/everest_run_model.py index 3466493be7d..c294b11f89f 100644 --- a/src/ert/run_models/everest_run_model.py +++ b/src/ert/run_models/everest_run_model.py @@ -3,9 +3,11 @@ import copy import datetime import functools +import json import logging import os import queue +import random import re import shutil import threading @@ -37,6 +39,7 @@ from everest.config import EverestConfig from everest.optimizer.everest2ropt import everest2ropt from everest.simulator import Simulator +from everest.simulator.everest_to_ert import everest_to_ert_config from everest.strings import EVEREST, SIMULATOR_END, SIMULATOR_START, SIMULATOR_UPDATE from ..resources import all_shell_script_fm_steps @@ -298,6 +301,11 @@ def __init__( optimization_callback: OptimizerCallback, display_all_jobs: bool = True, ): + everest_config = self._add_defaults(everest_config) + + Path(everest_config.log_dir).mkdir(parents=True, exist_ok=True) + Path(everest_config.optimization_output_dir).mkdir(parents=True, exist_ok=True) + self.ropt_config = everest2ropt(everest_config) self.everest_config = everest_config self.support_restart = False @@ -333,6 +341,57 @@ def __init__( self.num_retries_per_iter = 0 # OK? + @staticmethod + def _add_defaults(config: EverestConfig) -> EverestConfig: + """This function exists as a temporary mechanism to default configurations that + needs to be global in the sense that they should carry over both to ropt and ERT. + When the proper mechanism for this is implemented this code + should die. + + """ + defaulted_config = config.copy() + assert defaulted_config.environment is not None + + random_seed = defaulted_config.environment.random_seed + if random_seed is None: + random_seed = random.randint(1, 2**30) + + defaulted_config.environment.random_seed = random_seed + + logging.getLogger(EVEREST).info("Using random seed: %d", random_seed) + logging.getLogger(EVEREST).info( + "To deterministically reproduce this experiment, " + "add the above random seed to your configuration file." + ) + + return defaulted_config + + @classmethod + def create( + cls, + ever_config: EverestConfig, + simulation_callback: Optional[SimulationCallback] = None, + optimization_callback: Optional[OptimizerCallback] = None, + random_seed: Optional[int] = None, + ) -> EverestRunModel: + def default_simulation_callback( + simulation_status: SimulationStatus | None, event: str + ) -> str | None: + return None + + def default_optimization_callback() -> str | None: + return None + + ert_config = everest_to_ert_config(cls._add_defaults(ever_config)) + return cls( + random_seed=random_seed, + config=ert_config, + everest_config=ever_config, + simulation_callback=simulation_callback or default_simulation_callback, + optimization_callback=optimization_callback + or default_optimization_callback, + ) + def run_experiment( self, evaluator_server_config: EvaluatorServerConfig, restart: bool = False ) -> None: @@ -500,3 +559,17 @@ def name(cls) -> str: @classmethod def description(cls) -> str: return "Run batches " + + @property + def exit_code( + self, + ) -> Optional[Literal["max_batch_num_reached"] | OptimizerExitCode]: + return self._exit_code + + @property + def result(self) -> Optional[seba_sqlite.sqlite_storage.OptimalResult]: + return self._result + + def __repr__(self) -> str: + config_json = json.dumps(self.everest_config, sort_keys=True, indent=2) + return f"EverestRunModel(config={config_json})" diff --git a/src/everest/detached/jobs/everserver.py b/src/everest/detached/jobs/everserver.py index ce8bdc11d12..aee3bf8fdff 100755 --- a/src/everest/detached/jobs/everserver.py +++ b/src/everest/detached/jobs/everserver.py @@ -19,6 +19,9 @@ from flask import Flask, Response, jsonify, request from ropt.enums import OptimizerExitCode +from ert.config import QueueSystem +from ert.ensemble_evaluator import EvaluatorServerConfig +from ert.run_models.everest_run_model import EverestRunModel from everest import export_to_csv, validate_export from everest.config import EverestConfig from everest.detached import ServerStatus, get_opt_status, update_everserver_status @@ -30,7 +33,6 @@ SIM_PROGRESS_ENDPOINT, STOP_ENDPOINT, ) -from everest.suite import start_optimization from everest.util import configure_logger, makedirs_if_needed, version_info @@ -269,12 +271,22 @@ def main(): try: update_everserver_status(config, ServerStatus.running) - exit_code = start_optimization( + + run_model = EverestRunModel.create( config, simulation_callback=partial(_sim_monitor, shared_data=shared_data), optimization_callback=partial(_opt_monitor, shared_data=shared_data), ) - status, message = _get_optimization_status(exit_code, shared_data) + + evaluator_server_config = EvaluatorServerConfig( + custom_port_range=range(49152, 51819) + if run_model.ert_config.queue_config.queue_system == QueueSystem.LOCAL + else None + ) + + run_model.run_experiment(evaluator_server_config) + + status, message = _get_optimization_status(run_model.exit_code, shared_data) if status != ServerStatus.completed: update_everserver_status(config, status, message) return diff --git a/src/everest/suite.py b/src/everest/suite.py index 5c25bafb2a0..9d48db4f9f8 100644 --- a/src/everest/suite.py +++ b/src/everest/suite.py @@ -1,131 +1 @@ from __future__ import annotations - -import json -import logging -import random - -from ert.config import QueueSystem -from ert.ensemble_evaluator import EvaluatorServerConfig -from ert.run_models.everest_run_model import EverestRunModel -from everest.config import EverestConfig -from everest.plugins.site_config_env import PluginSiteConfigEnv -from everest.simulator.everest_to_ert import everest_to_ert_config -from everest.strings import EVEREST -from everest.util import makedirs_if_needed - - -def start_optimization( - config, simulation_callback=None, optimization_callback=None, display_all_jobs=True -): - workflow = _EverestWorkflow( - config, simulation_callback, optimization_callback, display_all_jobs - ) - with PluginSiteConfigEnv(): - res = workflow.start_optimization() - return res - - -def _add_defaults(config: EverestConfig): - """This function exists as a temporary mechanism to default configurations that - needs to be global in the sense that they should carry over both to ropt and ERT. - When the proper mechanism for this is implemented this code - should die. - - """ - defaulted_config = config.copy() - assert defaulted_config.environment is not None - - random_seed = defaulted_config.environment.random_seed - if random_seed is None: - random_seed = random.randint(1, 2**30) - - defaulted_config.environment.random_seed = random_seed - - logging.getLogger(EVEREST).info("Using random seed: %d", random_seed) - logging.getLogger(EVEREST).info( - "To deterministically reproduce this experiment, " - "add the above random seed to your configuration file." - ) - - return defaulted_config - - -class _EverestWorkflow(object): - """ - An instance of this class is the main object in everest. - - Through this object an optimization experiment is instantiated and executed/run. - This object will provide access to the entire optimization configuration. - """ - - def __init__( - self, - config: EverestConfig, - simulation_callback=None, - optimization_callback=None, - display_all_jobs=True, - ): - """Will initialize an Everest instance either from a configuration file or - a loaded config. - - @config a dictionary containing the configuration. See everest --doc - for documentation on the config - - @callback a function that will be called whenever changes in the - simulation or optimization routine occur, e.g., when one - realization's simulation completes, the status vector will be - sent, with the event SIMULATOR_UPDATE. - """ - - # Callbacks - self._sim_callback = simulation_callback - self._opt_callback = optimization_callback - - self._config = _add_defaults(config) - - makedirs_if_needed(self.config.log_dir) - makedirs_if_needed(self.config.optimization_output_dir) - - def start_optimization(self): - """Run an optimization with the current settings. - - This method must be called from the same thread where this - object has been created (probably because of the use of sqlite3 - deeper down). - This method is not thread safe. Multiple overlapping executions - of this method will probably lead to a crash - """ - ert_config = everest_to_ert_config(self.config) - run_model = EverestRunModel( - random_seed=ert_config.random_seed, - config=ert_config, - everest_config=self.config, - simulation_callback=self._sim_callback, - optimization_callback=self._opt_callback, - ) - - evaluator_server_config = EvaluatorServerConfig( - custom_port_range=range(49152, 51819) - if ert_config.queue_config.queue_system == QueueSystem.LOCAL - else None - ) - - run_model.run_experiment(evaluator_server_config) - - # Extract the best result from the storage. - self._result = run_model._result - - return run_model._exit_code - - @property - def result(self): - return self._result - - @property - def config(self) -> EverestConfig: - return self._config - - def __repr__(self): - return "EverestWorkflow(config=%s)" % json.dumps( - self.config, sort_keys=True, indent=2 - ) diff --git a/tests/everest/conftest.py b/tests/everest/conftest.py index 04b6d888e9f..b65870f8025 100644 --- a/tests/everest/conftest.py +++ b/tests/everest/conftest.py @@ -6,6 +6,8 @@ import pytest +from ert.config import QueueSystem +from ert.ensemble_evaluator import EvaluatorServerConfig from everest.config.control_config import ControlConfig from tests.everest.utils import relpath @@ -123,3 +125,15 @@ def copy_egg_test_data_to_tmp(tmp_path, monkeypatch): @pytest.fixture def change_to_tmpdir(tmp_path, monkeypatch): monkeypatch.chdir(tmp_path) + + +@pytest.fixture +def evaluator_server_config_generator(): + def create_evaluator_server_config(run_model): + return EvaluatorServerConfig( + custom_port_range=range(49152, 51819) + if run_model.ert_config.queue_config.queue_system == QueueSystem.LOCAL + else None + ) + + return create_evaluator_server_config diff --git a/tests/everest/test_cvar.py b/tests/everest/test_cvar.py index f439fb68521..e030532421c 100644 --- a/tests/everest/test_cvar.py +++ b/tests/everest/test_cvar.py @@ -1,25 +1,27 @@ import pytest +from ert.run_models.everest_run_model import EverestRunModel from everest.config import EverestConfig -from everest.suite import _EverestWorkflow CONFIG_FILE_CVAR = "config_cvar.yml" -def test_mathfunc_cvar(copy_math_func_test_data_to_tmp): +def test_mathfunc_cvar( + copy_math_func_test_data_to_tmp, evaluator_server_config_generator +): config = EverestConfig.load_file(CONFIG_FILE_CVAR) - workflow = _EverestWorkflow(config) - assert workflow is not None - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) # Check resulting points - x0, x1, x2 = (workflow.result.controls["point_" + p] for p in ["x", "y", "z"]) + x0, x1, x2 = (run_model.result.controls["point_" + p] for p in ["x", "y", "z"]) assert x0 == pytest.approx(0.5, 0.05) assert x1 == pytest.approx(0.5, 0.05) assert x2 == pytest.approx(0.5, 0.05) - total_objective = workflow.result.total_objective + total_objective = run_model.result.total_objective assert total_objective <= 0.001 assert total_objective >= -0.001 diff --git a/tests/everest/test_discrete.py b/tests/everest/test_discrete.py index ca92aa7d10e..102e8ad5eab 100644 --- a/tests/everest/test_discrete.py +++ b/tests/everest/test_discrete.py @@ -1,15 +1,17 @@ +from ert.run_models.everest_run_model import EverestRunModel from everest.config import EverestConfig -from everest.suite import _EverestWorkflow CONFIG_DISCRETE = "config_discrete.yml" -def test_discrete_optimizer(copy_math_func_test_data_to_tmp): +def test_discrete_optimizer( + copy_math_func_test_data_to_tmp, evaluator_server_config_generator +): config = EverestConfig.load_file(CONFIG_DISCRETE) - workflow = _EverestWorkflow(config) - assert workflow is not None - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) - assert workflow.result.controls["point_x"] == 3 - assert workflow.result.controls["point_y"] == 7 + assert run_model.result.controls["point_x"] == 3 + assert run_model.result.controls["point_y"] == 7 diff --git a/tests/everest/test_environment.py b/tests/everest/test_environment.py index 0d3def5a253..998b64b0384 100644 --- a/tests/everest/test_environment.py +++ b/tests/everest/test_environment.py @@ -1,6 +1,6 @@ import pytest -import everest +from ert.run_models.everest_run_model import EverestRunModel from everest.config import EverestConfig from everest.simulator.everest_to_ert import _everest_to_ert_config_dict @@ -13,9 +13,8 @@ def test_seed(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) config.environment.random_seed = random_seed - ever_workflow = everest.suite._EverestWorkflow(config) - - assert random_seed == ever_workflow.config.environment.random_seed + run_model = EverestRunModel.create(config) + assert random_seed == run_model.everest_config.environment.random_seed # Res ert_config = _everest_to_ert_config_dict(config) @@ -26,6 +25,6 @@ def test_seed(copy_math_func_test_data_to_tmp): def test_loglevel(copy_math_func_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) config.environment.log_level = "info" - ever_workflow = everest.suite._EverestWorkflow(config) - config = ever_workflow.config + run_model = EverestRunModel.create(config) + config = run_model.everest_config assert len(EverestConfig.lint_config_dict(config.to_dict())) == 0 diff --git a/tests/everest/test_everest_initialization.py b/tests/everest/test_everest_initialization.py index e6e8e060deb..b46967637f1 100644 --- a/tests/everest/test_everest_initialization.py +++ b/tests/everest/test_everest_initialization.py @@ -2,8 +2,8 @@ import pytest +from ert.run_models.everest_run_model import EverestRunModel from everest.config import EverestConfig -from everest.suite import _EverestWorkflow NO_PROJECT_RES = ( os.environ.get("NO_PROJECT_RES", False), @@ -14,18 +14,19 @@ @pytest.mark.skipif(NO_PROJECT_RES[0], reason=NO_PROJECT_RES[1]) def test_init_no_project_res(copy_egg_test_data_to_tmp): config_file = os.path.join("everest", "model", "config.yml") - config_dict = EverestConfig.load_file(config_file) - _EverestWorkflow(config_dict) + config = EverestConfig.load_file(config_file) + EverestRunModel.create(config) def test_init(copy_mocked_test_data_to_tmp): config_file = os.path.join("mocked_test_case.yml") - config_dict = EverestConfig.load_file(config_file) - _EverestWorkflow(config_dict) + config = EverestConfig.load_file(config_file) + EverestRunModel.create(config) def test_no_config_init(): with pytest.raises(AttributeError): - _EverestWorkflow(None) # type: ignore + EverestRunModel.create(None) + with pytest.raises(AttributeError): - _EverestWorkflow("Frozen bananas") # type: ignore + EverestRunModel.create("Frozen bananas") diff --git a/tests/everest/test_everest_output.py b/tests/everest/test_everest_output.py index 37cb57314c9..6719795b07c 100644 --- a/tests/everest/test_everest_output.py +++ b/tests/everest/test_everest_output.py @@ -6,6 +6,7 @@ import pytest from ert.config import ErtConfig +from ert.run_models.everest_run_model import EverestRunModel from ert.storage import open_storage from everest.config import EverestConfig from everest.detached import generate_everserver_ert_config, start_server @@ -15,18 +16,17 @@ DETACHED_NODE_DIR, OPTIMIZATION_OUTPUT_DIR, ) -from everest.suite import _EverestWorkflow from everest.util import makedirs_if_needed def test_that_one_experiment_creates_one_ensemble_per_batch( - copy_math_func_test_data_to_tmp, + copy_math_func_test_data_to_tmp, evaluator_server_config_generator ): config = EverestConfig.load_file("config_minimal.yml") - workflow = _EverestWorkflow(config) - assert workflow is not None - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) batches = os.listdir(config.simulation_dir) ert_config = ErtConfig.with_plugins().from_dict(_everest_to_ert_config_dict(config)) @@ -58,7 +58,8 @@ def test_everest_output(start_mock, copy_mocked_test_data_to_tmp): def useless_cb(*args, **kwargs): pass - _EverestWorkflow(config, optimization_callback=useless_cb) + EverestRunModel.create(config, optimization_callback=useless_cb) + # Check the output folder is created when stating the optimization # in everest workflow assert DEFAULT_OUTPUT_DIR not in initial_folders diff --git a/tests/everest/test_everserver.py b/tests/everest/test_everserver.py index 255a09ea172..08e1328ca97 100644 --- a/tests/everest/test_everserver.py +++ b/tests/everest/test_everserver.py @@ -23,15 +23,14 @@ def check_status(*args, **kwargs): assert status["status"] == kwargs["status"] -def fail_optimization( - config, simulation_callback, optimization_callback, from_ropt=False -): +def fail_optimization(self, from_ropt=False): # Patch start_optimization to raise a failed optimization callback. Also # call the provided simulation callback, which has access to the shared_data # variable in the eversever main function. Patch that callback to modify # shared_data (see set_shared_status() below). - simulation_callback(None, None) + self._sim_callback(None, None) if from_ropt: + self._exit_code = OptimizerExitCode.TOO_FEW_REALIZATIONS return OptimizerExitCode.TOO_FEW_REALIZATIONS raise Exception("Failed optimization") @@ -109,8 +108,11 @@ def test_everserver_status_failure(_1, copy_math_func_test_data_to_tmp): ) @patch("everest.detached.jobs.everserver._everserver_thread") @patch( - "everest.detached.jobs.everserver.start_optimization", - side_effect=partial(check_status, status=ServerStatus.running), + "ert.run_models.everest_run_model.EverestRunModel.run_experiment", + autospec=True, + side_effect=lambda self, evaluator_server_config, restart=False: check_status( + self.everest_config, status=ServerStatus.running + ), ) @patch("everest.detached.jobs.everserver.validate_export", return_value=([], False)) @patch( @@ -143,8 +145,11 @@ def test_everserver_status_running_complete( @patch("everest.detached.jobs.everserver._write_hostfile") @patch("everest.detached.jobs.everserver._everserver_thread") @patch( - "everest.detached.jobs.everserver.start_optimization", - side_effect=partial(fail_optimization, from_ropt=True), + "ert.run_models.everest_run_model.EverestRunModel.run_experiment", + autospec=True, + side_effect=lambda self, evaluator_server_config, restart=False: fail_optimization( + self, from_ropt=True + ), ) @patch( "everest.detached.jobs.everserver._sim_monitor", @@ -190,8 +195,11 @@ def test_everserver_status_failed_job( @patch("everest.detached.jobs.everserver._write_hostfile") @patch("everest.detached.jobs.everserver._everserver_thread") @patch( - "everest.detached.jobs.everserver.start_optimization", - side_effect=fail_optimization, + "ert.run_models.everest_run_model.EverestRunModel.run_experiment", + autospec=True, + side_effect=lambda self, evaluator_server_config, restart=False: fail_optimization( + self, from_ropt=False + ), ) @patch( "everest.detached.jobs.everserver._sim_monitor", diff --git a/tests/everest/test_fix_control.py b/tests/everest/test_fix_control.py index 3c04a2698c7..9327ff938e7 100644 --- a/tests/everest/test_fix_control.py +++ b/tests/everest/test_fix_control.py @@ -1,16 +1,18 @@ +from ert.run_models.everest_run_model import EverestRunModel from everest.config import EverestConfig -from everest.suite import _EverestWorkflow CONFIG_FILE_ADVANCED = "config_advanced_scipy.yml" -def test_fix_control(copy_math_func_test_data_to_tmp): +def test_fix_control( + copy_math_func_test_data_to_tmp, evaluator_server_config_generator +): config = EverestConfig.load_file(CONFIG_FILE_ADVANCED) config.controls[0].variables[0].enabled = False - workflow = _EverestWorkflow(config) - assert workflow is not None - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) # Check that the first variable remains fixed: - assert workflow.result.controls["point_x-0"] == config.controls[0].initial_guess + assert run_model.result.controls["point_x-0"] == config.controls[0].initial_guess diff --git a/tests/everest/test_math_func.py b/tests/everest/test_math_func.py index 871de2c0848..418cc23217e 100644 --- a/tests/everest/test_math_func.py +++ b/tests/everest/test_math_func.py @@ -5,11 +5,11 @@ import pandas as pd import pytest +from ert.run_models.everest_run_model import EverestRunModel from everest import ConfigKeys as CK from everest.config import EverestConfig from everest.config.export_config import ExportConfig from everest.export import export -from everest.suite import _EverestWorkflow from everest.util import makedirs_if_needed CONFIG_FILE_MULTIOBJ = "config_multiobj.yml" @@ -19,27 +19,29 @@ @pytest.mark.integration_test -def test_math_func_multiobj(copy_math_func_test_data_to_tmp): +def test_math_func_multiobj( + copy_math_func_test_data_to_tmp, evaluator_server_config_generator +): config = EverestConfig.load_file(CONFIG_FILE_MULTIOBJ) - workflow = _EverestWorkflow(config) - assert workflow is not None - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) # Check resulting points - x, y, z = (workflow.result.controls["point_" + p] for p in ("x", "y", "z")) + x, y, z = (run_model.result.controls["point_" + p] for p in ("x", "y", "z")) assert x == pytest.approx(0.0, abs=0.05) assert y == pytest.approx(0.0, abs=0.05) assert z == pytest.approx(0.5, abs=0.05) # Check the optimum values for each object. - optim_p = workflow.result.expected_objectives["distance_p"] - optim_q = workflow.result.expected_objectives["distance_q"] + optim_p = run_model.result.expected_objectives["distance_p"] + optim_q = run_model.result.expected_objectives["distance_q"] assert optim_p == pytest.approx(-0.5, abs=0.05) assert optim_q == pytest.approx(-4.5, abs=0.05) # The overall optimum is a weighted average of the objectives - assert workflow.result.total_objective == pytest.approx( + assert run_model.result.total_objective == pytest.approx( (-0.5 * (2.0 / 3.0) * 1.5) + (-4.5 * (1.0 / 3.0) * 1.0), abs=0.01 ) @@ -69,7 +71,7 @@ def test_math_func_multiobj(copy_math_func_test_data_to_tmp): assert best["point_z"] == pytest.approx(z) assert best["distance_p"] == pytest.approx(optim_p) assert best["distance_q"] == pytest.approx(optim_q) - assert best["sim_avg_obj"] == pytest.approx(workflow.result.total_objective) + assert best["sim_avg_obj"] == pytest.approx(run_model.result.total_objective) test_space = itertools.product( (first, best), @@ -104,22 +106,24 @@ def test_math_func_multiobj(copy_math_func_test_data_to_tmp): @pytest.mark.integration_test -def test_math_func_advanced(copy_math_func_test_data_to_tmp): +def test_math_func_advanced( + copy_math_func_test_data_to_tmp, evaluator_server_config_generator +): config = EverestConfig.load_file(CONFIG_FILE_ADVANCED) - workflow = _EverestWorkflow(config) - assert workflow is not None - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) point_names = ["x-0", "x-1", "x-2"] # Check resulting points - x0, x1, x2 = (workflow.result.controls["point_" + p] for p in point_names) + x0, x1, x2 = (run_model.result.controls["point_" + p] for p in point_names) assert x0 == pytest.approx(0.1, abs=0.05) assert x1 == pytest.approx(0.0, abs=0.05) assert x2 == pytest.approx(0.4, abs=0.05) # Check optimum value - assert pytest.approx(workflow.result.total_objective, abs=0.1) == -( + assert pytest.approx(run_model.result.total_objective, abs=0.1) == -( 0.25 * (1.6**2 + 1.5**2 + 0.1**2) + 0.75 * (0.4**2 + 0.5**2 + 0.1**2) ) # Expected distance is the weighted average of the (squared) distances @@ -129,7 +133,7 @@ def test_math_func_advanced(copy_math_func_test_data_to_tmp): dist_0 = (x0 + 1.5) ** 2 + (x1 + 1.5) ** 2 + (x2 - 0.5) ** 2 dist_1 = (x0 - 0.5) ** 2 + (x1 - 0.5) ** 2 + (x2 - 0.5) ** 2 expected_opt = -(w[0] * (dist_0) + w[1] * (dist_1)) - assert expected_opt == pytest.approx(workflow.result.total_objective, abs=0.001) + assert expected_opt == pytest.approx(run_model.result.total_objective, abs=0.001) # Test conversion to pandas DataFrame df = export(config) @@ -142,7 +146,7 @@ def test_math_func_advanced(copy_math_func_test_data_to_tmp): assert best_0["point_{}".format(point_names[2])] == pytest.approx(x2) assert best_0["distance"] == pytest.approx(-dist_0, abs=0.001) assert best_0["real_avg_obj"] == pytest.approx( - workflow.result.total_objective, abs=0.001 + run_model.result.total_objective, abs=0.001 ) assert best_0["realization_weight"] == 0.25 @@ -153,7 +157,7 @@ def test_math_func_advanced(copy_math_func_test_data_to_tmp): assert best_1["point_{}".format(point_names[2])] == pytest.approx(x2) assert best_1["distance"] == pytest.approx(-dist_1, abs=0.001) assert best_1["real_avg_obj"] == pytest.approx( - workflow.result.total_objective, abs=0.001 + run_model.result.total_objective, abs=0.001 ) assert best_1["realization_weight"] == 0.75 @@ -174,7 +178,9 @@ def test_math_func_advanced(copy_math_func_test_data_to_tmp): @pytest.mark.integration_test -def test_remove_run_path(copy_math_func_test_data_to_tmp): +def test_remove_run_path( + copy_math_func_test_data_to_tmp, evaluator_server_config_generator +): config = EverestConfig.load_file(CONFIG_FILE_REMOVE_RUN_PATH) simulation_should_fail = "simulation_2" @@ -185,7 +191,9 @@ def test_remove_run_path(copy_math_func_test_data_to_tmp): simulation_dir = config.simulation_dir - _EverestWorkflow(config).start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) # Check the failed simulation folder still exists assert os.path.exists( @@ -205,7 +213,9 @@ def test_remove_run_path(copy_math_func_test_data_to_tmp): makedirs_if_needed(config.output_dir, roll_if_exists=True) config.simulator = None - _EverestWorkflow(config).start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) # Check the all simulation folder exist when delete_run_path is set to False assert os.path.exists( @@ -221,22 +231,24 @@ def test_remove_run_path(copy_math_func_test_data_to_tmp): ), "Simulation folder should be there, something went wrong and was removed" -def test_math_func_auto_scaled_controls(copy_math_func_test_data_to_tmp): +def test_math_func_auto_scaled_controls( + copy_math_func_test_data_to_tmp, evaluator_server_config_generator +): config = EverestConfig.load_file(CONFIG_AUTO_SCALED_CONTROLS) - workflow = _EverestWorkflow(config) - assert workflow is not None - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) # Check resulting points - x, y, z = (workflow.result.controls["point_" + p] for p in ("x", "y", "z")) + x, y, z = (run_model.result.controls["point_" + p] for p in ("x", "y", "z")) assert x == pytest.approx(0.25, abs=0.05) assert y == pytest.approx(0.25, abs=0.05) assert z == pytest.approx(0.5, abs=0.05) # Check optimum value - optim = -workflow.result.total_objective # distance is provided as -distance + optim = -run_model.result.total_objective # distance is provided as -distance expected_dist = 0.25**2 + 0.25**2 assert expected_dist == pytest.approx(optim, abs=0.05) assert expected_dist == pytest.approx(optim, abs=0.05) diff --git a/tests/everest/test_multiobjective.py b/tests/everest/test_multiobjective.py index 67b4ac99c1a..72b0f729d6f 100644 --- a/tests/everest/test_multiobjective.py +++ b/tests/everest/test_multiobjective.py @@ -2,10 +2,10 @@ from ropt.config.enopt import EnOptConfig from ert.config import ErtConfig +from ert.run_models.everest_run_model import EverestRunModel from everest.config import EverestConfig from everest.optimizer.everest2ropt import everest2ropt from everest.simulator.everest_to_ert import _everest_to_ert_config_dict -from everest.suite import _EverestWorkflow from tests.everest.test_config_validation import has_error CONFIG_FILE = "config_multi_objectives.yml" @@ -65,7 +65,7 @@ def test_config_multi_objectives(copy_mocked_test_data_to_tmp): assert len(EverestConfig.lint_config_dict(config_dict)) == 0 # test everest initialization - _EverestWorkflow(config) + EverestRunModel.create(config) def test_multi_objectives2res(copy_mocked_test_data_to_tmp): @@ -98,12 +98,15 @@ def test_multi_objectives2ropt(copy_mocked_test_data_to_tmp): @pytest.mark.integration_test -def test_multi_objectives_run(copy_mocked_test_data_to_tmp): +def test_multi_objectives_run( + copy_mocked_test_data_to_tmp, evaluator_server_config_generator +): config = EverestConfig.load_file(CONFIG_FILE) - workflow = _EverestWorkflow(config) - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) # Loop through objective functions in config and ensure they are in the # result object for obj in config.objective_functions: - assert obj.name in workflow.result.expected_objectives + assert obj.name in run_model.result.expected_objectives diff --git a/tests/everest/test_objective_type.py b/tests/everest/test_objective_type.py index 26f9a3b750a..2b424e58856 100644 --- a/tests/everest/test_objective_type.py +++ b/tests/everest/test_objective_type.py @@ -1,22 +1,24 @@ import pytest +from ert.run_models.everest_run_model import EverestRunModel from everest.config import EverestConfig -from everest.suite import _EverestWorkflow CONFIG_FILE_STDDEV = "config_stddev.yml" -def test_mathfunc_stddev(copy_math_func_test_data_to_tmp): +def test_mathfunc_stddev( + copy_math_func_test_data_to_tmp, evaluator_server_config_generator +): config = EverestConfig.load_file(CONFIG_FILE_STDDEV) - workflow = _EverestWorkflow(config) - assert workflow is not None - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) # Check resulting points - x0, x1, x2 = (workflow.result.controls["point_" + p] for p in ["x", "y", "z"]) + x0, x1, x2 = (run_model.result.controls["point_" + p] for p in ["x", "y", "z"]) assert x0 == pytest.approx(0.5, abs=0.025) assert x1 == pytest.approx(0.5, abs=0.025) assert x2 == pytest.approx(0.5, abs=0.025) - assert workflow.result.total_objective < 0.0 + assert run_model.result.total_objective < 0.0 diff --git a/tests/everest/test_output_constraints.py b/tests/everest/test_output_constraints.py index 49ef017bd5d..e2470e05b1e 100644 --- a/tests/everest/test_output_constraints.py +++ b/tests/everest/test_output_constraints.py @@ -4,10 +4,10 @@ from ropt.config.enopt import EnOptConfig from ropt.enums import ConstraintType +from ert.run_models.everest_run_model import EverestRunModel from everest import ConfigKeys from everest.config import EverestConfig from everest.optimizer.everest2ropt import everest2ropt -from everest.suite import _EverestWorkflow from .test_config_validation import has_error @@ -227,13 +227,14 @@ def test_upper_bound_output_constraint_def(copy_mocked_test_data_to_tmp): assert expected["rhs_value"] == ropt_conf.nonlinear_constraints.rhs_values[0] assert expected["type"] == ropt_conf.nonlinear_constraints.types[0] - workflow = _EverestWorkflow(config) - assert workflow is not None + EverestRunModel.create(config) @pytest.mark.integration_test -def test_sim_output_constraints(copy_mocked_test_data_to_tmp): +def test_sim_output_constraints( + copy_mocked_test_data_to_tmp, evaluator_server_config_generator +): config = EverestConfig.load_file(CONFIG_FILE) - workflow = _EverestWorkflow(config) - assert workflow is not None - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) diff --git a/tests/everest/test_samplers.py b/tests/everest/test_samplers.py index 70488ad2395..4ea0fdc053a 100644 --- a/tests/everest/test_samplers.py +++ b/tests/everest/test_samplers.py @@ -1,29 +1,31 @@ import pytest +from ert.run_models.everest_run_model import EverestRunModel from everest.config import EverestConfig from everest.config.sampler_config import SamplerConfig -from everest.suite import _EverestWorkflow CONFIG_FILE_ADVANCED = "config_advanced_scipy.yml" -def test_sampler_uniform(copy_math_func_test_data_to_tmp): +def test_sampler_uniform( + copy_math_func_test_data_to_tmp, evaluator_server_config_generator +): config = EverestConfig.load_file(CONFIG_FILE_ADVANCED) config.controls[0].sampler = SamplerConfig(**{"method": "uniform"}) - workflow = _EverestWorkflow(config) - assert workflow is not None - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) point_names = ["x-0", "x-1", "x-2"] # Check resulting points - x0, x1, x2 = (workflow.result.controls["point_" + p] for p in point_names) + x0, x1, x2 = (run_model.result.controls["point_" + p] for p in point_names) assert x0 == pytest.approx(0.1, abs=0.025) assert x1 == pytest.approx(0.0, abs=0.025) assert x2 == pytest.approx(0.4, abs=0.025) # Check optimum value - assert pytest.approx(workflow.result.total_objective, abs=0.01) == -( + assert pytest.approx(run_model.result.total_objective, abs=0.01) == -( 0.25 * (1.6**2 + 1.5**2 + 0.1**2) + 0.75 * (0.4**2 + 0.5**2 + 0.1**2) ) # Expected distance is the weighted average of the (squared) distances @@ -33,28 +35,30 @@ def test_sampler_uniform(copy_math_func_test_data_to_tmp): dist_0 = (x0 + 1.5) ** 2 + (x1 + 1.5) ** 2 + (x2 - 0.5) ** 2 dist_1 = (x0 - 0.5) ** 2 + (x1 - 0.5) ** 2 + (x2 - 0.5) ** 2 expected_opt = -(w[0] * (dist_0) + w[1] * (dist_1)) - assert expected_opt == pytest.approx(workflow.result.total_objective, abs=0.001) + assert expected_opt == pytest.approx(run_model.result.total_objective, abs=0.001) -def test_sampler_mixed(copy_math_func_test_data_to_tmp): +def test_sampler_mixed( + copy_math_func_test_data_to_tmp, evaluator_server_config_generator +): config = EverestConfig.load_file(CONFIG_FILE_ADVANCED) config.controls[0].variables[0].sampler = SamplerConfig(**{"method": "uniform"}) config.controls[0].variables[1].sampler = SamplerConfig(**{"method": "norm"}) config.controls[0].variables[2].sampler = SamplerConfig(**{"method": "uniform"}) - workflow = _EverestWorkflow(config) - assert workflow is not None - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) point_names = ["x-0", "x-1", "x-2"] # Check resulting points - x0, x1, x2 = (workflow.result.controls["point_" + p] for p in point_names) + x0, x1, x2 = (run_model.result.controls["point_" + p] for p in point_names) assert x0 == pytest.approx(0.1, abs=0.025) assert x1 == pytest.approx(0.0, abs=0.025) assert x2 == pytest.approx(0.4, abs=0.025) # Check optimum value - assert pytest.approx(workflow.result.total_objective, abs=0.01) == -( + assert pytest.approx(run_model.result.total_objective, abs=0.01) == -( 0.25 * (1.6**2 + 1.5**2 + 0.1**2) + 0.75 * (0.4**2 + 0.5**2 + 0.1**2) ) # Expected distance is the weighted average of the (squared) distances @@ -65,6 +69,6 @@ def test_sampler_mixed(copy_math_func_test_data_to_tmp): dist_1 = (x0 - 0.5) ** 2 + (x1 - 0.5) ** 2 + (x2 - 0.5) ** 2 expected_opt = -(w[0] * (dist_0) + w[1] * (dist_1)) assert expected_opt == pytest.approx( - workflow.result.total_objective, + run_model.result.total_objective, abs=0.001, ) diff --git a/tests/everest/test_templating.py b/tests/everest/test_templating.py index 38bfeba0ec7..d444971d7b9 100644 --- a/tests/everest/test_templating.py +++ b/tests/everest/test_templating.py @@ -6,6 +6,7 @@ from ruamel.yaml import YAML import everest +from ert.run_models.everest_run_model import EverestRunModel from everest.config import EverestConfig TMPL_CONFIG_FILE = "config.yml" @@ -125,10 +126,13 @@ def test_render_executable(copy_template_test_data_to_tmp): @pytest.mark.integration_test -def test_install_template(copy_template_test_data_to_tmp): +def test_install_template( + copy_template_test_data_to_tmp, evaluator_server_config_generator +): config = EverestConfig.load_file(TMPL_CONFIG_FILE) - workflow = everest.suite._EverestWorkflow(config) - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) def test_well_order_template(change_to_tmpdir): @@ -164,7 +168,9 @@ def test_well_order_template(change_to_tmpdir): @pytest.mark.integration_test -def test_user_specified_data_n_template(copy_math_func_test_data_to_tmp): +def test_user_specified_data_n_template( + copy_math_func_test_data_to_tmp, evaluator_server_config_generator +): """ Ensure that a user specifying a data resource and an installed_template with "extra_data", the results of that template will be passed to the @@ -212,10 +218,9 @@ def test_user_specified_data_n_template(copy_math_func_test_data_to_tmp): config = EverestConfig.with_defaults(**updated_config_dict) - workflow = everest.suite._EverestWorkflow(config) - assert workflow is not None - - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) # The data should have been loaded and passed through template to file. expected_file = os.path.join( diff --git a/tests/everest/test_workflows.py b/tests/everest/test_workflows.py index 7b2f6641c57..6e59ffcade1 100644 --- a/tests/everest/test_workflows.py +++ b/tests/everest/test_workflows.py @@ -3,8 +3,8 @@ import pytest +from ert.run_models.everest_run_model import EverestRunModel from everest.config import EverestConfig -from everest.suite import _EverestWorkflow from tests.everest.utils import relpath, skipif_no_everest_models CONFIG_DIR = relpath("test_data", "mocked_test_case") @@ -12,11 +12,12 @@ @pytest.mark.integration_test -def test_workflow_run(copy_mocked_test_data_to_tmp): +def test_workflow_run(copy_mocked_test_data_to_tmp, evaluator_server_config_generator): config = EverestConfig.load_file(CONFIG_FILE) - workflow = _EverestWorkflow(config) - workflow.start_optimization() + run_model = EverestRunModel.create(config) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) for name in ("pre_simulation", "post_simulation"): path = Path.cwd() / f"{name}.txt" @@ -31,12 +32,17 @@ def test_workflow_run(copy_mocked_test_data_to_tmp): @skipif_no_everest_models @pytest.mark.parametrize("config", ("array", "index")) def test_state_modifier_workflow_run( - config: str, copy_testdata_tmpdir: Callable[[Optional[str]], Path] + config: str, + copy_testdata_tmpdir: Callable[[Optional[str]], Path], + evaluator_server_config_generator, ) -> None: cwd = copy_testdata_tmpdir("open_shut_state_modifier") - _EverestWorkflow( - config=EverestConfig.load_file(f"everest/model/{config}.yml") - ).start_optimization() + + run_model = EverestRunModel.create( + EverestConfig.load_file(f"everest/model/{config}.yml") + ) + evaluator_server_config = evaluator_server_config_generator(run_model) + run_model.run_experiment(evaluator_server_config) for path in Path.cwd().glob("**/simulation_0/RESULT.SCH"): assert path.read_bytes() == (cwd / "eclipse/model/EXPECTED.SCH").read_bytes() diff --git a/tests/everest/test_yaml_parser.py b/tests/everest/test_yaml_parser.py index 44f0dd21ae8..1f99a15153c 100644 --- a/tests/everest/test_yaml_parser.py +++ b/tests/everest/test_yaml_parser.py @@ -4,7 +4,7 @@ import pytest from ruamel.yaml import YAML -import everest +from ert.run_models.everest_run_model import EverestRunModel from everest import ConfigKeys from everest.config import EverestConfig from everest.simulator.everest_to_ert import _everest_to_ert_config_dict @@ -17,11 +17,109 @@ def test_default_seed(copy_test_data_to_tmp, monkeypatch): monkeypatch.chdir("snake_oil") config_file = os.path.join("everest/model", "snake_oil_all.yml") + + with open(config_file, "w+", encoding="utf-8") as f: + f.write(""" +# A version of snake_oil.yml where all level one keys are added +definitions: + scratch: /tmp/everest/super/scratch + eclbase: model/SNAKE_OIL + +wells: + - {name: W1} + - {name: W2} + - {name: W3} + - {name: W4} + +controls: + - + name: group + type: well_control + min: 0 + max: 1 + variables: + - + name: W1 + initial_guess: 0 + - + name: W2 + initial_guess: 0 + - + name: W3 + initial_guess: 1 + - + name: W4 + initial_guess: 1 + - + name: super_scalars + type: generic_control + variables: + - + name: gravity + initial_guess: 9.81 + min: 0 + max: 1000 + +objective_functions: + - + name: snake_oil_nvp + +input_constraints: + - + target: 1.0 + weights: + group.W1: 1 + group.W2: 1 + group.W3: 1 + group.W4: 1 + +install_jobs: + - + name: snake_oil_diff + source: ../../jobs/SNAKE_OIL_DIFF + - + name: snake_oil_simulator + source: ../../jobs/SNAKE_OIL_SIMULATOR + - + name: snake_oil_npv + source: ../../jobs/SNAKE_OIL_NPV + +install_data: + - + source: ../../eclipse/include/grid/CASE.EGRID + target: MY_GRID.EGRID + - + source: ../../eclipse/model/SNAKE_OIL.DATA + target: SNAKE_OIL.DATA + +optimization: + algorithm: optpp_q_newton + +environment: + simulation_folder: r{{ scratch }}/simulations + +simulator: + queue_system: lsf + cores: 3 + name: mr + resubmit_limit: 17 + options: span = 1 && select[x86 and GNU/Linux] + +model: + realizations: [0, 1, 2] + +forward_model: + - snake_oil_simulator + - snake_oil_npv + - snake_oil_diff + +""") + config = EverestConfig.load_file(config_file) assert config.environment.random_seed is None - ever_workflow = everest.suite._EverestWorkflow(config) - config = ever_workflow.config + run_model = EverestRunModel.create(config) + config = run_model.everest_config random_seed = config.environment.random_seed assert isinstance(random_seed, int)