From d2e7f056aa8971b991824027e20405ba5b1b00ed Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Tue, 19 Mar 2024 14:32:12 -0700 Subject: [PATCH] Rename `max_iterations` to `max_suggestions` and track in Optimizer `.suggest()` instead of `.register()` (#713) makes optimizers and schedulers a bit simpler. Part of issue #715 Closes #711 Note: the move from `--max_iterations` to `--max_suggestions` is a breaking change, so we will need to cut a new release for this. --------- Co-authored-by: Brian Kroth --- mlos_bench/README.md | 6 +++-- .../config/cli/azure-redis-opt.jsonc | 2 +- .../mlos_bench/config/experiments/README.md | 2 +- .../optimizers/mlos_core_default_opt.jsonc | 2 +- .../config/optimizers/mlos_core_flaml.jsonc | 2 +- .../config/optimizers/mlos_core_smac.jsonc | 2 +- .../config/optimizers/mock_opt.jsonc | 2 +- .../schemas/optimizers/optimizer-schema.json | 4 ++-- .../mlos_bench/optimizers/base_optimizer.py | 23 +++++++++---------- .../optimizers/grid_search_optimizer.py | 11 ++++----- .../optimizers/mlos_core_optimizer.py | 9 ++++---- .../mlos_bench/optimizers/mock_optimizer.py | 11 ++++----- .../optimizers/one_shot_optimizer.py | 4 ---- .../mlos_bench/schedulers/base_scheduler.py | 18 +++++++++------ .../mlos_bench/schedulers/sync_scheduler.py | 9 ++++---- .../config/globals/global_test_config.jsonc | 4 ++-- ...id_search_opt_invalid_max_iterations.jsonc | 2 +- .../invalid/missing_opt_class_config.jsonc | 2 +- .../mlos_opt_smac_bad_probability.jsonc | 2 +- .../mlos_opt_smac_llamatune_extra.jsonc | 2 +- .../bad/unhandled/mock_opt_extra_outer.jsonc | 2 +- .../good/full/grid_search_opt_full.jsonc | 2 +- .../good/full/mlos_opt_flaml_null_full.jsonc | 2 +- .../full/mlos_opt_random_identity_full.jsonc | 2 +- .../full/mlos_opt_smac_llamatune_full.jsonc | 2 +- .../good/full/mlos_opt_smac_null_full.jsonc | 2 +- .../test-cases/good/full/mock_opt_full.jsonc | 2 +- .../good/full/one_shot_opt_full.jsonc | 2 +- .../mlos_opt_smac_llamatune_partial.jsonc | 2 +- .../tests/launcher_in_process_test.py | 2 +- .../tests/launcher_parse_args_test.py | 4 ++-- .../mlos_bench/tests/launcher_run_test.py | 3 +-- .../mlos_bench/tests/optimizers/conftest.py | 14 +++++------ .../optimizers/grid_search_optimizer_test.py | 4 ++-- .../tests/optimizers/llamatune_opt_test.py | 2 +- .../tests/optimizers/mlos_core_opt_df_test.py | 2 +- .../optimizers/mlos_core_opt_smac_test.py | 6 ++--- 37 files changed, 83 insertions(+), 91 deletions(-) diff --git a/mlos_bench/README.md b/mlos_bench/README.md index ca690194ed..2a18d6bbad 100644 --- a/mlos_bench/README.md +++ b/mlos_bench/README.md @@ -195,10 +195,12 @@ Searching for an optimal set of tunable parameters is very similar to running a All we have to do is specifying the [`Optimizer`](./mlos_bench/optimizers/) in the top-level configuration, like in our [`azure-redis-opt.jsonc`](./mlos_bench/config/cli/azure-redis-opt.jsonc) example. ```sh -mlos_bench --config "./mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc" --globals "experiment_MyBenchmark.jsonc" --max_iterations 10 +mlos_bench --config "./mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc" --globals "experiment_MyBenchmark.jsonc" --max_suggestions 10 --trial-config-repeat-count 3 ``` -Note that again we use the command line option `--max_iterations` to override the default value from [`mlos_core_flaml.jsonc`](./mlos_bench/config/optimizers/mlos_core_flaml.jsonc). +Note that again we use the command line option `--max_suggestions` to override the max. number of suggested configurations to trial from [`mlos_core_flaml.jsonc`](./mlos_bench/config/optimizers/mlos_core_flaml.jsonc). +We also use `--trial-config-repeat-count` to benchmark each suggested configuration 3 times. +That means, we will run 30 trials in total, 3 for each of the 10 suggested configurations. We don't have to specify the `"tunable_values"` for the optimization: the optimizer will suggest new values on each iteration and the framework will feed this data into the benchmarking environment. diff --git a/mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc b/mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc index 71245054ad..7d2c5c6cbe 100644 --- a/mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc +++ b/mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc @@ -3,7 +3,7 @@ // Licensed under the MIT License. // // Run: -// mlos_bench --config mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc --globals experiment_RedisBench.jsonc --max_iterations 10 +// mlos_bench --config mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc --globals experiment_RedisBench.jsonc --max_suggestions 10 { "config_path": [ "mlos_bench/mlos_bench/config", diff --git a/mlos_bench/mlos_bench/config/experiments/README.md b/mlos_bench/mlos_bench/config/experiments/README.md index f34634ff13..ae1094739c 100644 --- a/mlos_bench/mlos_bench/config/experiments/README.md +++ b/mlos_bench/mlos_bench/config/experiments/README.md @@ -92,7 +92,7 @@ will be pushed down to the `Optimizer` configuration, e.g., [`mlos_core_flaml.js > NOTE: it is perfectly ok to have several files with the experiment-specific parameters (say, one for Azure, another one for Storage, and so on) and either include them in the `"globals"` section of the CLI config, and/or specify them in the command line when running the experiment, e.g. > > ```bash -> mlos_bench --config mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc --globals experiment_Redis_Azure.jsonc experiment_Redis_Tunables.jsonc --max_iterations 10 +> mlos_bench --config mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc --globals experiment_Redis_Azure.jsonc experiment_Redis_Tunables.jsonc --max_suggestions 10 > ``` > > (Note several files after the `--globals` option). diff --git a/mlos_bench/mlos_bench/config/optimizers/mlos_core_default_opt.jsonc b/mlos_bench/mlos_bench/config/optimizers/mlos_core_default_opt.jsonc index 7c16c8200a..f48d2f2e14 100644 --- a/mlos_bench/mlos_bench/config/optimizers/mlos_core_default_opt.jsonc +++ b/mlos_bench/mlos_bench/config/optimizers/mlos_core_default_opt.jsonc @@ -7,6 +7,6 @@ "config": { "optimization_target": "score", "optimization_direction": "min", - "max_iterations": 100 + "max_suggestions": 100 } } diff --git a/mlos_bench/mlos_bench/config/optimizers/mlos_core_flaml.jsonc b/mlos_bench/mlos_bench/config/optimizers/mlos_core_flaml.jsonc index 4b9db3be77..47d92aff6e 100644 --- a/mlos_bench/mlos_bench/config/optimizers/mlos_core_flaml.jsonc +++ b/mlos_bench/mlos_bench/config/optimizers/mlos_core_flaml.jsonc @@ -7,7 +7,7 @@ "config": { "optimization_target": "score", "optimization_direction": "min", - "max_iterations": 100, + "max_suggestions": 100, "optimizer_type": "FLAML" } } diff --git a/mlos_bench/mlos_bench/config/optimizers/mlos_core_smac.jsonc b/mlos_bench/mlos_bench/config/optimizers/mlos_core_smac.jsonc index 324e694324..3eb40c1146 100644 --- a/mlos_bench/mlos_bench/config/optimizers/mlos_core_smac.jsonc +++ b/mlos_bench/mlos_bench/config/optimizers/mlos_core_smac.jsonc @@ -7,7 +7,7 @@ "config": { "optimization_target": "score", "optimization_direction": "min", - "max_iterations": 100, + "max_suggestions": 100, "optimizer_type": "SMAC", "output_directory": null // Override to have a permanent output with SMAC history etc. } diff --git a/mlos_bench/mlos_bench/config/optimizers/mock_opt.jsonc b/mlos_bench/mlos_bench/config/optimizers/mock_opt.jsonc index 7d8b321b11..1d3dec27cb 100644 --- a/mlos_bench/mlos_bench/config/optimizers/mock_opt.jsonc +++ b/mlos_bench/mlos_bench/config/optimizers/mock_opt.jsonc @@ -6,7 +6,7 @@ "config": { "optimization_target": "score", - "max_iterations": 5, + "max_suggestions": 5, "seed": 42 } } diff --git a/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json b/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json index 0fa39a6d2f..32db42f9e6 100644 --- a/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json @@ -22,8 +22,8 @@ "enum": ["min", "max"], "example": "min" }, - "max_iterations": { - "description": "The maximum number of additional (in the case of merging experiment data or resuming experiments) iterations to run when we launch the app.", + "max_suggestions": { + "description": "The maximum number of additional (in the case of merging experiment data or resuming experiments) config suggestions to run when we launch the app, or no limit if 0 is provided. Note: configs may be repeated in more than one trial.", "type": "integer", "minimum": 0, "example": 100 diff --git a/mlos_bench/mlos_bench/optimizers/base_optimizer.py b/mlos_bench/mlos_bench/optimizers/base_optimizer.py index c156a57faf..2181869519 100644 --- a/mlos_bench/mlos_bench/optimizers/base_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/base_optimizer.py @@ -35,7 +35,7 @@ class Optimizer(metaclass=ABCMeta): # pylint: disable=too-many-instance-attr BASE_SUPPORTED_CONFIG_PROPS = { "optimization_target", "optimization_direction", - "max_iterations", + "max_suggestions", "seed", "start_with_defaults", } @@ -71,12 +71,12 @@ def __init__(self, experiment_id = self._global_config.get('experiment_id') self.experiment_id = str(experiment_id).strip() if experiment_id else None - self._iter = 1 + self._iter = 0 # If False, use the optimizer to suggest the initial configuration; # if True (default), use the already initialized values for the first iteration. self._start_with_defaults: bool = bool( strtobool(str(self._config.pop('start_with_defaults', True)))) - self._max_iter = int(self._config.pop('max_iterations', 100)) + self._max_iter = int(self._config.pop('max_suggestions', 100)) self._opt_target = str(self._config.pop('optimization_target', 'score')) self._opt_sign = {"min": 1, "max": -1}[self._config.pop('optimization_direction', 'min')] @@ -224,7 +224,7 @@ def supports_preload(self) -> bool: @abstractmethod def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]], - status: Optional[Sequence[Status]] = None, is_warm_up: bool = False) -> bool: + status: Optional[Sequence[Status]] = None) -> bool: """ Pre-load the optimizer with the bulk data from previous experiments. @@ -236,16 +236,13 @@ def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float Benchmark results from experiments that correspond to `configs`. status : Optional[Sequence[float]] Status of the experiments that correspond to `configs`. - is_warm_up : bool - True for the initial load, False for subsequent calls. Returns ------- is_not_empty : bool True if there is data to register, false otherwise. """ - _LOG.info("%s the optimizer with: %d configs, %d scores, %d status values", - "Warm-up" if is_warm_up else "Load", + _LOG.info("Update the optimizer with: %d configs, %d scores, %d status values", len(configs or []), len(scores or []), len(status or [])) if len(configs or []) != len(scores or []): raise ValueError("Numbers of configs and scores do not match.") @@ -257,10 +254,11 @@ def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float self._start_with_defaults = False return has_data - @abstractmethod def suggest(self) -> TunableGroups: """ Generate the next suggestion. + Base class' implementation increments the iteration count + and returns the current values of the tunables. Returns ------- @@ -269,13 +267,15 @@ def suggest(self) -> TunableGroups: These are the same tunables we pass to the constructor, but with the values set to the next suggestion. """ + self._iter += 1 + _LOG.debug("Iteration %d :: Suggest", self._iter) + return self._tunables.copy() @abstractmethod def register(self, tunables: TunableGroups, status: Status, score: Optional[Union[float, Dict[str, float]]] = None) -> Optional[float]: """ Register the observation for the given configuration. - Base class' implementations logs and increments the iteration count. Parameters ---------- @@ -295,7 +295,6 @@ def register(self, tunables: TunableGroups, status: Status, """ _LOG.info("Iteration %d :: Register: %s = %s score: %s", self._iter, tunables, status, score) - self._iter += 1 if status.is_succeeded() == (score is None): # XOR raise ValueError("Status and score must be consistent.") return self._get_score(status, score) @@ -336,7 +335,7 @@ def not_converged(self) -> bool: Return True if not converged, False otherwise. Base implementation just checks the iteration count. """ - return self._iter <= self._max_iter + return self._iter < self._max_iter @abstractmethod def get_best_observation(self) -> Union[Tuple[float, TunableGroups], Tuple[None, None]]: diff --git a/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py b/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py index ac11758a54..72385d7504 100644 --- a/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py @@ -109,27 +109,24 @@ def suggested_configs(self) -> Iterable[Dict[str, TunableValue]]: return (dict(zip(self._config_keys, config)) for config in self._suggested_configs) def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]], - status: Optional[Sequence[Status]] = None, is_warm_up: bool = False) -> bool: - if not super().bulk_register(configs, scores, status, is_warm_up): + status: Optional[Sequence[Status]] = None) -> bool: + if not super().bulk_register(configs, scores, status): return False if status is None: status = [Status.SUCCEEDED] * len(configs) for (params, score, trial_status) in zip(configs, scores, status): tunables = self._tunables.copy().assign(params) self.register(tunables, trial_status, nullable(float, score)) - if is_warm_up: - # Do not advance the iteration counter during warm-up. - self._iter -= 1 if _LOG.isEnabledFor(logging.DEBUG): (score, _) = self.get_best_observation() - _LOG.debug("%s end: %s = %s", "Warm-up" if is_warm_up else "Update", self.target, score) + _LOG.debug("Update end: %s = %s", self.target, score) return True def suggest(self) -> TunableGroups: """ Generate the next grid search suggestion. """ - tunables = self._tunables.copy() + tunables = super().suggest() if self._start_with_defaults: _LOG.info("Use default values for the first trial") self._start_with_defaults = False diff --git a/mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py b/mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py index 141da6dbc5..dfc51d4432 100644 --- a/mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py @@ -91,8 +91,8 @@ def name(self) -> str: return f"{self.__class__.__name__}:{self._opt.__class__.__name__}" def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]], - status: Optional[Sequence[Status]] = None, is_warm_up: bool = False) -> bool: - if not super().bulk_register(configs, scores, status, is_warm_up): + status: Optional[Sequence[Status]] = None) -> bool: + if not super().bulk_register(configs, scores, status): return False df_configs = self._to_df(configs) # Impute missing values, if necessary df_scores = pd.Series(scores, dtype=float) * self._opt_sign @@ -103,8 +103,6 @@ def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float df_configs = df_configs[df_status_completed] df_scores = df_scores[df_status_completed] self._opt.register(df_configs, df_scores) - if not is_warm_up: - self._iter += len(df_scores) if _LOG.isEnabledFor(logging.DEBUG): (score, _) = self.get_best_observation() _LOG.debug("Warm-up end: %s = %s", self.target, score) @@ -154,12 +152,13 @@ def _to_df(self, configs: Sequence[Dict[str, TunableValue]]) -> pd.DataFrame: return df_configs def suggest(self) -> TunableGroups: + tunables = super().suggest() if self._start_with_defaults: _LOG.info("Use default values for the first trial") df_config = self._opt.suggest(defaults=self._start_with_defaults) self._start_with_defaults = False _LOG.info("Iteration %d :: Suggest:\n%s", self._iter, df_config) - return self._tunables.copy().assign( + return tunables.assign( configspace_data_to_tunable_values(df_config.loc[0].to_dict())) def register(self, tunables: TunableGroups, status: Status, diff --git a/mlos_bench/mlos_bench/optimizers/mock_optimizer.py b/mlos_bench/mlos_bench/optimizers/mock_optimizer.py index 563414e019..1f73877b1b 100644 --- a/mlos_bench/mlos_bench/optimizers/mock_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/mock_optimizer.py @@ -41,27 +41,24 @@ def __init__(self, } def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]], - status: Optional[Sequence[Status]] = None, is_warm_up: bool = False) -> bool: - if not super().bulk_register(configs, scores, status, is_warm_up): + status: Optional[Sequence[Status]] = None) -> bool: + if not super().bulk_register(configs, scores, status): return False if status is None: status = [Status.SUCCEEDED] * len(configs) for (params, score, trial_status) in zip(configs, scores, status): tunables = self._tunables.copy().assign(params) self.register(tunables, trial_status, nullable(float, score)) - if is_warm_up: - # Do not advance the iteration counter during warm-up. - self._iter -= 1 if _LOG.isEnabledFor(logging.DEBUG): (score, _) = self.get_best_observation() - _LOG.debug("Warm-up end: %s = %s", self.target, score) + _LOG.debug("Bulk register end: %s = %s", self.target, score) return True def suggest(self) -> TunableGroups: """ Generate the next (random) suggestion. """ - tunables = self._tunables.copy() + tunables = super().suggest() if self._start_with_defaults: _LOG.info("Use default values for the first trial") self._start_with_defaults = False diff --git a/mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py b/mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py index 9929d5d181..088ed03bdf 100644 --- a/mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py @@ -36,7 +36,3 @@ def __init__(self, @property def supports_preload(self) -> bool: return False - - def suggest(self) -> TunableGroups: - _LOG.info("Suggest: %s", self._tunables) - return self._tunables.copy() diff --git a/mlos_bench/mlos_bench/schedulers/base_scheduler.py b/mlos_bench/mlos_bench/schedulers/base_scheduler.py index f623161327..77e7ccbc95 100644 --- a/mlos_bench/mlos_bench/schedulers/base_scheduler.py +++ b/mlos_bench/mlos_bench/schedulers/base_scheduler.py @@ -79,6 +79,7 @@ def __init__(self, *, self.optimizer = optimizer self.storage = storage self._root_env_config = root_env_config + self._last_trial_id = -1 _LOG.debug("Scheduler instantiated: %s :: %s", self, config) @@ -179,21 +180,24 @@ def load_config(self, config_id: int) -> TunableGroups: _LOG.debug("Config %d ::\n%s", config_id, json.dumps(tunable_values, indent=2)) return tunables - def _get_optimizer_suggestions(self, last_trial_id: int = -1, is_warm_up: bool = False) -> int: + def _schedule_new_optimizer_suggestions(self) -> bool: """ Optimizer part of the loop. Load the results of the executed trials into the optimizer, suggest new configurations, and add them to the queue. - Return the last trial ID processed by the optimizer. + Return True if optimization is not over, False otherwise. """ assert self.experiment is not None - (trial_ids, configs, scores, status) = self.experiment.load(last_trial_id) + (trial_ids, configs, scores, status) = self.experiment.load(self._last_trial_id) _LOG.info("QUEUE: Update the optimizer with trial results: %s", trial_ids) - self.optimizer.bulk_register(configs, scores, status, is_warm_up) + self.optimizer.bulk_register(configs, scores, status) + self._last_trial_id = max(trial_ids, default=self._last_trial_id) - tunables = self.optimizer.suggest() - self.schedule_trial(tunables) + not_converged = self.optimizer.not_converged() + if not_converged: + tunables = self.optimizer.suggest() + self.schedule_trial(tunables) - return max(trial_ids, default=last_trial_id) + return not_converged def schedule_trial(self, tunables: TunableGroups) -> None: """ diff --git a/mlos_bench/mlos_bench/schedulers/sync_scheduler.py b/mlos_bench/mlos_bench/schedulers/sync_scheduler.py index 7aa263ce37..557e64ffa4 100644 --- a/mlos_bench/mlos_bench/schedulers/sync_scheduler.py +++ b/mlos_bench/mlos_bench/schedulers/sync_scheduler.py @@ -29,16 +29,15 @@ def start(self) -> None: """ super().start() - last_trial_id = -1 is_warm_up = self.optimizer.supports_preload if not is_warm_up: _LOG.warning("Skip pending trials and warm-up: %s", self.optimizer) - while self.optimizer.not_converged(): - _LOG.info("Optimization loop: %s Last trial ID: %d", - "Warm-up" if is_warm_up else "Run", last_trial_id) + not_converged = True + while not_converged: + _LOG.info("Optimization loop: Last trial ID: %d", self._last_trial_id) self._run_schedule(is_warm_up) - last_trial_id = self._get_optimizer_suggestions(last_trial_id, is_warm_up) + not_converged = self._schedule_new_optimizer_suggestions() is_warm_up = False def run_trial(self, trial: Storage.Trial) -> None: diff --git a/mlos_bench/mlos_bench/tests/config/globals/global_test_config.jsonc b/mlos_bench/mlos_bench/tests/config/globals/global_test_config.jsonc index 51bc38979c..e010298fff 100644 --- a/mlos_bench/mlos_bench/tests/config/globals/global_test_config.jsonc +++ b/mlos_bench/mlos_bench/tests/config/globals/global_test_config.jsonc @@ -22,7 +22,7 @@ "pathVarWithEnvVarRef": "$CUSTOM_PATH_FROM_ENV/foo", "varWithEnvVarRef": "user:$USER", - // Override the default value of the "max_iterations" parameter + // Override the default value of the "max_suggestions" parameter // of the optimizer when running local tests: - "max_iterations": 5 + "max_suggestions": 5 } diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/grid_search_opt_invalid_max_iterations.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/grid_search_opt_invalid_max_iterations.jsonc index 7d72c13a40..934fe9966b 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/grid_search_opt_invalid_max_iterations.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/grid_search_opt_invalid_max_iterations.jsonc @@ -1,6 +1,6 @@ { "class": "mlos_bench.optimizers.grid_search_optimizer.GridSearchOptimizer", "config": { - "max_iterations": null, + "max_suggestions": null, } } \ No newline at end of file diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/missing_opt_class_config.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/missing_opt_class_config.jsonc index 7ee27e4562..231f50034c 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/missing_opt_class_config.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/missing_opt_class_config.jsonc @@ -2,7 +2,7 @@ "config": { "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false } diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/mlos_opt_smac_bad_probability.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/mlos_opt_smac_bad_probability.jsonc index 3b27ac1b4a..9498c5ae20 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/mlos_opt_smac_bad_probability.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/mlos_opt_smac_bad_probability.jsonc @@ -3,7 +3,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false, "optimizer_type": "SMAC", diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mlos_opt_smac_llamatune_extra.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mlos_opt_smac_llamatune_extra.jsonc index c0a81e3a5a..6891319e94 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mlos_opt_smac_llamatune_extra.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mlos_opt_smac_llamatune_extra.jsonc @@ -3,7 +3,7 @@ "config": { "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false, "optimizer_type": "SMAC", diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mock_opt_extra_outer.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mock_opt_extra_outer.jsonc index 1c07aee853..5b1259232f 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mock_opt_extra_outer.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mock_opt_extra_outer.jsonc @@ -6,7 +6,7 @@ "config": { "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false } diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/grid_search_opt_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/grid_search_opt_full.jsonc index faac27e03b..89a782a973 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/grid_search_opt_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/grid_search_opt_full.jsonc @@ -5,7 +5,7 @@ "some/path/to/tunables.jsonc" ], "config": { - "max_iterations": 100, + "max_suggestions": 100, "optimization_direction": "max", "optimization_target": "score", "seed": 12345, diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_flaml_null_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_flaml_null_full.jsonc index cd493c0c79..0be5f47165 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_flaml_null_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_flaml_null_full.jsonc @@ -4,7 +4,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 10, + "max_suggestions": 10, "seed": 12345, "start_with_defaults": false, "optimizer_type": "FLAML", diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_random_identity_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_random_identity_full.jsonc index 06d4570bee..01f13fc8a7 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_random_identity_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_random_identity_full.jsonc @@ -4,7 +4,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false, "optimizer_type": "RANDOM", diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_llamatune_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_llamatune_full.jsonc index 96c1e4afa1..24fdbda7aa 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_llamatune_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_llamatune_full.jsonc @@ -4,7 +4,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false, "optimizer_type": "SMAC", diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_null_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_null_full.jsonc index 1518004aa5..dd4a93c2cb 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_null_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_null_full.jsonc @@ -3,7 +3,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false, "optimizer_type": "SMAC", diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mock_opt_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mock_opt_full.jsonc index 49546527a0..3dfd052fc1 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mock_opt_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mock_opt_full.jsonc @@ -4,7 +4,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false } diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/one_shot_opt_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/one_shot_opt_full.jsonc index 3e6df46ecb..3b5d2f34a4 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/one_shot_opt_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/one_shot_opt_full.jsonc @@ -11,7 +11,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false } diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/partial/mlos_opt_smac_llamatune_partial.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/partial/mlos_opt_smac_llamatune_partial.jsonc index 1a2cdc4811..1159d982ad 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/partial/mlos_opt_smac_llamatune_partial.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/partial/mlos_opt_smac_llamatune_partial.jsonc @@ -4,7 +4,7 @@ "config": { "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "optimizer_type": "SMAC", "space_adapter_type": "LLAMATUNE" diff --git a/mlos_bench/mlos_bench/tests/launcher_in_process_test.py b/mlos_bench/mlos_bench/tests/launcher_in_process_test.py index 5f7ac7f437..5954aed802 100644 --- a/mlos_bench/mlos_bench/tests/launcher_in_process_test.py +++ b/mlos_bench/mlos_bench/tests/launcher_in_process_test.py @@ -21,7 +21,7 @@ ([ "--config", "mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc", "--trial_config_repeat_count", "3", - "--max_iterations", "3", + "--max_suggestions", "3", ], 64.53897), ] ) diff --git a/mlos_bench/mlos_bench/tests/launcher_parse_args_test.py b/mlos_bench/mlos_bench/tests/launcher_parse_args_test.py index 8db8dab478..bddee5729e 100644 --- a/mlos_bench/mlos_bench/tests/launcher_parse_args_test.py +++ b/mlos_bench/mlos_bench/tests/launcher_parse_args_test.py @@ -171,10 +171,10 @@ def test_launcher_args_parse_2(config_paths: List[str]) -> None: globals_file_config = launcher.config_loader.load_config(globals_file, ConfigSchema.GLOBALS) # The actual global_config gets overwritten as a part of processing, so to test # this we read the original value out of the source files. - orig_max_iters = globals_file_config.get('max_iterations', opt_config.get('config', {}).get('max_iterations', 100)) + orig_max_iters = globals_file_config.get('max_suggestions', opt_config.get('config', {}).get('max_suggestions', 100)) assert launcher.optimizer.max_iterations \ == orig_max_iters \ - == launcher.global_config['max_iterations'] + == launcher.global_config['max_suggestions'] # Check that the optimizer got initialized with random values instead of the defaults. # Note: the environment doesn't get updated until suggest() is called to diff --git a/mlos_bench/mlos_bench/tests/launcher_run_test.py b/mlos_bench/mlos_bench/tests/launcher_run_test.py index b088068a6d..85f2235da4 100644 --- a/mlos_bench/mlos_bench/tests/launcher_run_test.py +++ b/mlos_bench/mlos_bench/tests/launcher_run_test.py @@ -93,8 +93,7 @@ def test_launch_main_app_opt(root_path: str, local_exec_service: LocalExecServic """ _launch_main_app( root_path, local_exec_service, - # TODO: Reset --max_iterations to 3 after fixing the optimizer - "--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --trial_config_repeat_count 3 --max_iterations 9", + "--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --trial_config_repeat_count 3 --max_suggestions 3", [ # Iteration 1: Expect first value to be the baseline f"^{_RE_DATE} mlos_core_optimizer\\.py:\\d+ " + diff --git a/mlos_bench/mlos_bench/tests/optimizers/conftest.py b/mlos_bench/mlos_bench/tests/optimizers/conftest.py index 468edc42a3..907a2d98a5 100644 --- a/mlos_bench/mlos_bench/tests/optimizers/conftest.py +++ b/mlos_bench/mlos_bench/tests/optimizers/conftest.py @@ -61,7 +61,7 @@ def mock_opt_no_defaults(tunable_groups: TunableGroups) -> MockOptimizer: config={ "optimization_target": "score", "optimization_direction": "min", - "max_iterations": 5, + "max_suggestions": 5, "start_with_defaults": False, "seed": SEED }, @@ -79,7 +79,7 @@ def mock_opt(tunable_groups: TunableGroups) -> MockOptimizer: config={ "optimization_target": "score", "optimization_direction": "min", - "max_iterations": 5, + "max_suggestions": 5, "seed": SEED }, ) @@ -96,7 +96,7 @@ def mock_opt_max(tunable_groups: TunableGroups) -> MockOptimizer: config={ "optimization_target": "score", "optimization_direction": "max", - "max_iterations": 10, + "max_suggestions": 10, "seed": SEED }, ) @@ -113,7 +113,7 @@ def flaml_opt(tunable_groups: TunableGroups) -> MlosCoreOptimizer: config={ "optimization_target": "score", "optimization_direction": "min", - "max_iterations": 15, + "max_suggestions": 15, "optimizer_type": "FLAML", "seed": SEED, }, @@ -131,7 +131,7 @@ def flaml_opt_max(tunable_groups: TunableGroups) -> MlosCoreOptimizer: config={ "optimization_target": "score", "optimization_direction": "max", - "max_iterations": 15, + "max_suggestions": 15, "optimizer_type": "FLAML", "seed": SEED, }, @@ -157,7 +157,7 @@ def smac_opt(tunable_groups: TunableGroups) -> MlosCoreOptimizer: config={ "optimization_target": "score", "optimization_direction": "min", - "max_iterations": SMAC_ITERATIONS, + "max_suggestions": SMAC_ITERATIONS, "optimizer_type": "SMAC", "seed": SEED, "output_directory": None, @@ -179,7 +179,7 @@ def smac_opt_max(tunable_groups: TunableGroups) -> MlosCoreOptimizer: config={ "optimization_target": "score", "optimization_direction": "max", - "max_iterations": SMAC_ITERATIONS, + "max_suggestions": SMAC_ITERATIONS, "optimizer_type": "SMAC", "seed": SEED, "output_directory": None, diff --git a/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py b/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py index c1d714ba40..878e6f6c0d 100644 --- a/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py +++ b/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py @@ -82,7 +82,7 @@ def grid_search_opt(grid_search_tunables: TunableGroups, # multiple of the number of elements in the grid. max_iterations = len(grid_search_tunables_grid) * 2 - 3 return GridSearchOptimizer(tunables=grid_search_tunables, config={ - "max_iterations": max_iterations, + "max_suggestions": max_iterations, "optimization_direction": "max", }) @@ -161,7 +161,7 @@ def test_grid_search(grid_search_opt: GridSearchOptimizer, assert all(config in grid_search_tunables_grid for config in grid_search_opt.pending_configs) assert all(config in list(grid_search_opt.pending_configs) for config in grid_search_tunables_grid) - # FIXME: Should we consider not_converged as the "max_iterations", an empty grid, or both? + # We consider not_converged as either having reached "max_suggestions" or an empty grid? # Try to empty the rest of the grid. while grid_search_opt.not_converged(): diff --git a/mlos_bench/mlos_bench/tests/optimizers/llamatune_opt_test.py b/mlos_bench/mlos_bench/tests/optimizers/llamatune_opt_test.py index 39735ae9d1..da2acfb636 100644 --- a/mlos_bench/mlos_bench/tests/optimizers/llamatune_opt_test.py +++ b/mlos_bench/mlos_bench/tests/optimizers/llamatune_opt_test.py @@ -31,7 +31,7 @@ def llamatune_opt(tunable_groups: TunableGroups) -> MlosCoreOptimizer: "num_low_dims": 2, }, "optimization_target": "score", - "max_iterations": 10, + "max_suggestions": 10, "optimizer_type": "SMAC", "seed": SEED, # "start_with_defaults": False, diff --git a/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_df_test.py b/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_df_test.py index 4f716083ca..f36e3c149c 100644 --- a/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_df_test.py +++ b/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_df_test.py @@ -26,7 +26,7 @@ def mlos_core_optimizer(tunable_groups: TunableGroups) -> MlosCoreOptimizer: """ test_opt_config = { 'optimizer_type': 'FLAML', - 'max_iterations': 10, + 'max_suggestions': 10, 'seed': SEED, } return MlosCoreOptimizer(tunable_groups, test_opt_config) diff --git a/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_smac_test.py b/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_smac_test.py index 779c946625..b10571095b 100644 --- a/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_smac_test.py +++ b/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_smac_test.py @@ -30,7 +30,7 @@ def test_init_mlos_core_smac_opt_bad_trial_count(tunable_groups: TunableGroups) test_opt_config = { 'optimizer_type': 'SMAC', 'max_trials': 10, - 'max_iterations': 11, + 'max_suggestions': 11, 'seed': SEED, } with pytest.raises(AssertionError): @@ -44,13 +44,13 @@ def test_init_mlos_core_smac_opt_max_trials(tunable_groups: TunableGroups) -> No """ test_opt_config = { 'optimizer_type': 'SMAC', - 'max_iterations': 123, + 'max_suggestions': 123, 'seed': SEED, } opt = MlosCoreOptimizer(tunable_groups, test_opt_config) # pylint: disable=protected-access assert isinstance(opt._opt, SmacOptimizer) - assert opt._opt.base_optimizer.scenario.n_trials == test_opt_config['max_iterations'] + assert opt._opt.base_optimizer.scenario.n_trials == test_opt_config['max_suggestions'] def test_init_mlos_core_smac_absolute_output_directory(tunable_groups: TunableGroups) -> None: