From f5f224ec413c01af7939457efcac2d7904773829 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Wed, 13 Mar 2024 14:29:05 -0700 Subject: [PATCH 01/11] increment the interation counter in suggest() instead of register() and remove the is_warm_up flag --- mlos_bench/mlos_bench/optimizers/base_optimizer.py | 13 ++++++------- .../mlos_bench/optimizers/grid_search_optimizer.py | 11 ++++------- .../mlos_bench/optimizers/mlos_core_optimizer.py | 9 ++++----- mlos_bench/mlos_bench/optimizers/mock_optimizer.py | 11 ++++------- .../mlos_bench/optimizers/one_shot_optimizer.py | 4 ---- mlos_bench/mlos_bench/schedulers/base_scheduler.py | 4 ++-- mlos_bench/mlos_bench/schedulers/sync_scheduler.py | 2 +- 7 files changed, 21 insertions(+), 33 deletions(-) diff --git a/mlos_bench/mlos_bench/optimizers/base_optimizer.py b/mlos_bench/mlos_bench/optimizers/base_optimizer.py index c156a57faf6..ca10ef3ece0 100644 --- a/mlos_bench/mlos_bench/optimizers/base_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/base_optimizer.py @@ -224,7 +224,7 @@ def supports_preload(self) -> bool: @abstractmethod def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]], - status: Optional[Sequence[Status]] = None, is_warm_up: bool = False) -> bool: + status: Optional[Sequence[Status]] = None) -> bool: """ Pre-load the optimizer with the bulk data from previous experiments. @@ -236,16 +236,13 @@ def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float Benchmark results from experiments that correspond to `configs`. status : Optional[Sequence[float]] Status of the experiments that correspond to `configs`. - is_warm_up : bool - True for the initial load, False for subsequent calls. Returns ------- is_not_empty : bool True if there is data to register, false otherwise. """ - _LOG.info("%s the optimizer with: %d configs, %d scores, %d status values", - "Warm-up" if is_warm_up else "Load", + _LOG.info("Update the optimizer with: %d configs, %d scores, %d status values", len(configs or []), len(scores or []), len(status or [])) if len(configs or []) != len(scores or []): raise ValueError("Numbers of configs and scores do not match.") @@ -261,6 +258,7 @@ def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float def suggest(self) -> TunableGroups: """ Generate the next suggestion. + Base class' implementation increments the iteration count. Returns ------- @@ -269,13 +267,15 @@ def suggest(self) -> TunableGroups: These are the same tunables we pass to the constructor, but with the values set to the next suggestion. """ + _LOG.debug("Iteration %d :: Suggest", self._iter) + self._iter += 1 + return self._tunables.copy() @abstractmethod def register(self, tunables: TunableGroups, status: Status, score: Optional[Union[float, Dict[str, float]]] = None) -> Optional[float]: """ Register the observation for the given configuration. - Base class' implementations logs and increments the iteration count. Parameters ---------- @@ -295,7 +295,6 @@ def register(self, tunables: TunableGroups, status: Status, """ _LOG.info("Iteration %d :: Register: %s = %s score: %s", self._iter, tunables, status, score) - self._iter += 1 if status.is_succeeded() == (score is None): # XOR raise ValueError("Status and score must be consistent.") return self._get_score(status, score) diff --git a/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py b/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py index dabee0ad156..8d65013146e 100644 --- a/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py @@ -108,27 +108,24 @@ def suggested_configs(self) -> Iterable[Dict[str, TunableValue]]: return (dict(zip(self._config_keys, config)) for config in self._suggested_configs) def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]], - status: Optional[Sequence[Status]] = None, is_warm_up: bool = False) -> bool: - if not super().bulk_register(configs, scores, status, is_warm_up): + status: Optional[Sequence[Status]] = None) -> bool: + if not super().bulk_register(configs, scores, status): return False if status is None: status = [Status.SUCCEEDED] * len(configs) for (params, score, trial_status) in zip(configs, scores, status): tunables = self._tunables.copy().assign(params) self.register(tunables, trial_status, None if score is None else float(score)) - if is_warm_up: - # Do not advance the iteration counter during warm-up. - self._iter -= 1 if _LOG.isEnabledFor(logging.DEBUG): (score, _) = self.get_best_observation() - _LOG.debug("%s end: %s = %s", "Warm-up" if is_warm_up else "Update", self.target, score) + _LOG.debug("Update end: %s = %s", self.target, score) return True def suggest(self) -> TunableGroups: """ Generate the next grid search suggestion. """ - tunables = self._tunables.copy() + tunables = super().suggest() if self._start_with_defaults: _LOG.info("Use default values for the first trial") self._start_with_defaults = False diff --git a/mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py b/mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py index 141da6dbc57..dfc51d44321 100644 --- a/mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py @@ -91,8 +91,8 @@ def name(self) -> str: return f"{self.__class__.__name__}:{self._opt.__class__.__name__}" def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]], - status: Optional[Sequence[Status]] = None, is_warm_up: bool = False) -> bool: - if not super().bulk_register(configs, scores, status, is_warm_up): + status: Optional[Sequence[Status]] = None) -> bool: + if not super().bulk_register(configs, scores, status): return False df_configs = self._to_df(configs) # Impute missing values, if necessary df_scores = pd.Series(scores, dtype=float) * self._opt_sign @@ -103,8 +103,6 @@ def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float df_configs = df_configs[df_status_completed] df_scores = df_scores[df_status_completed] self._opt.register(df_configs, df_scores) - if not is_warm_up: - self._iter += len(df_scores) if _LOG.isEnabledFor(logging.DEBUG): (score, _) = self.get_best_observation() _LOG.debug("Warm-up end: %s = %s", self.target, score) @@ -154,12 +152,13 @@ def _to_df(self, configs: Sequence[Dict[str, TunableValue]]) -> pd.DataFrame: return df_configs def suggest(self) -> TunableGroups: + tunables = super().suggest() if self._start_with_defaults: _LOG.info("Use default values for the first trial") df_config = self._opt.suggest(defaults=self._start_with_defaults) self._start_with_defaults = False _LOG.info("Iteration %d :: Suggest:\n%s", self._iter, df_config) - return self._tunables.copy().assign( + return tunables.assign( configspace_data_to_tunable_values(df_config.loc[0].to_dict())) def register(self, tunables: TunableGroups, status: Status, diff --git a/mlos_bench/mlos_bench/optimizers/mock_optimizer.py b/mlos_bench/mlos_bench/optimizers/mock_optimizer.py index f2ab4b63030..79f29c3a7dc 100644 --- a/mlos_bench/mlos_bench/optimizers/mock_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/mock_optimizer.py @@ -40,17 +40,14 @@ def __init__(self, } def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float]], - status: Optional[Sequence[Status]] = None, is_warm_up: bool = False) -> bool: - if not super().bulk_register(configs, scores, status, is_warm_up): + status: Optional[Sequence[Status]] = None) -> bool: + if not super().bulk_register(configs, scores, status): return False if status is None: status = [Status.SUCCEEDED] * len(configs) for (params, score, trial_status) in zip(configs, scores, status): tunables = self._tunables.copy().assign(params) self.register(tunables, trial_status, None if score is None else float(score)) - if is_warm_up: - # Do not advance the iteration counter during warm-up. - self._iter -= 1 if _LOG.isEnabledFor(logging.DEBUG): (score, _) = self.get_best_observation() _LOG.debug("Warm-up end: %s = %s", self.target, score) @@ -60,12 +57,12 @@ def suggest(self) -> TunableGroups: """ Generate the next (random) suggestion. """ - tunables = self._tunables.copy() + tunables = super().suggest() if self._start_with_defaults: _LOG.info("Use default values for the first trial") self._start_with_defaults = False else: for (tunable, _group) in tunables: tunable.value = self._random[tunable.type](tunable) - _LOG.info("Iteration %d :: Suggest: %s", self._iter, tunables) + _LOG.info("Iteration %d :: Suggest: %s", self._iter - 1, tunables) return tunables diff --git a/mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py b/mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py index 9929d5d1813..088ed03bdf0 100644 --- a/mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py @@ -36,7 +36,3 @@ def __init__(self, @property def supports_preload(self) -> bool: return False - - def suggest(self) -> TunableGroups: - _LOG.info("Suggest: %s", self._tunables) - return self._tunables.copy() diff --git a/mlos_bench/mlos_bench/schedulers/base_scheduler.py b/mlos_bench/mlos_bench/schedulers/base_scheduler.py index c92ae22896a..4e7a797b996 100644 --- a/mlos_bench/mlos_bench/schedulers/base_scheduler.py +++ b/mlos_bench/mlos_bench/schedulers/base_scheduler.py @@ -144,7 +144,7 @@ def load_config(self, config_id: int) -> TunableGroups: _LOG.debug("Config %d ::\n%s", config_id, json.dumps(tunable_values, indent=2)) return tunables - def _get_optimizer_suggestions(self, last_trial_id: int = -1, is_warm_up: bool = False) -> int: + def _get_optimizer_suggestions(self, last_trial_id: int = -1) -> int: """ Optimizer part of the loop. Load the results of the executed trials into the optimizer, suggest new configurations, and add them to the queue. @@ -153,7 +153,7 @@ def _get_optimizer_suggestions(self, last_trial_id: int = -1, is_warm_up: bool = assert self.experiment is not None (trial_ids, configs, scores, status) = self.experiment.load(last_trial_id) _LOG.info("QUEUE: Update the optimizer with trial results: %s", trial_ids) - self.optimizer.bulk_register(configs, scores, status, is_warm_up) + self.optimizer.bulk_register(configs, scores, status) tunables = self.optimizer.suggest() self.schedule_trial(tunables) diff --git a/mlos_bench/mlos_bench/schedulers/sync_scheduler.py b/mlos_bench/mlos_bench/schedulers/sync_scheduler.py index baa058bd57a..9d8f16b0a4a 100644 --- a/mlos_bench/mlos_bench/schedulers/sync_scheduler.py +++ b/mlos_bench/mlos_bench/schedulers/sync_scheduler.py @@ -36,7 +36,7 @@ def start(self) -> None: _LOG.info("Optimization loop: %s Last trial ID: %d", "Warm-up" if is_warm_up else "Run", last_trial_id) self._run_schedule(is_warm_up) - last_trial_id = self._get_optimizer_suggestions(last_trial_id, is_warm_up) + last_trial_id = self._get_optimizer_suggestions(last_trial_id) is_warm_up = False def run_trial(self, trial: Storage.Trial) -> None: From 320dd5385e92fd7d0c0ab59f78d785c221635b58 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Wed, 13 Mar 2024 15:59:23 -0700 Subject: [PATCH 02/11] explicit warm-up run in the scheduler; minor fixes to logging --- mlos_bench/mlos_bench/optimizers/base_optimizer.py | 4 ++-- mlos_bench/mlos_bench/optimizers/mock_optimizer.py | 4 ++-- mlos_bench/mlos_bench/schedulers/sync_scheduler.py | 13 +++++++------ 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/mlos_bench/mlos_bench/optimizers/base_optimizer.py b/mlos_bench/mlos_bench/optimizers/base_optimizer.py index ca10ef3ece0..65ed98fb077 100644 --- a/mlos_bench/mlos_bench/optimizers/base_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/base_optimizer.py @@ -71,7 +71,7 @@ def __init__(self, experiment_id = self._global_config.get('experiment_id') self.experiment_id = str(experiment_id).strip() if experiment_id else None - self._iter = 1 + self._iter = 0 # If False, use the optimizer to suggest the initial configuration; # if True (default), use the already initialized values for the first iteration. self._start_with_defaults: bool = bool( @@ -267,8 +267,8 @@ def suggest(self) -> TunableGroups: These are the same tunables we pass to the constructor, but with the values set to the next suggestion. """ - _LOG.debug("Iteration %d :: Suggest", self._iter) self._iter += 1 + _LOG.debug("Iteration %d :: Suggest", self._iter) return self._tunables.copy() @abstractmethod diff --git a/mlos_bench/mlos_bench/optimizers/mock_optimizer.py b/mlos_bench/mlos_bench/optimizers/mock_optimizer.py index 79f29c3a7dc..d26ae26f235 100644 --- a/mlos_bench/mlos_bench/optimizers/mock_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/mock_optimizer.py @@ -50,7 +50,7 @@ def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float self.register(tunables, trial_status, None if score is None else float(score)) if _LOG.isEnabledFor(logging.DEBUG): (score, _) = self.get_best_observation() - _LOG.debug("Warm-up end: %s = %s", self.target, score) + _LOG.debug("Bulk register end: %s = %s", self.target, score) return True def suggest(self) -> TunableGroups: @@ -64,5 +64,5 @@ def suggest(self) -> TunableGroups: else: for (tunable, _group) in tunables: tunable.value = self._random[tunable.type](tunable) - _LOG.info("Iteration %d :: Suggest: %s", self._iter - 1, tunables) + _LOG.info("Iteration %d :: Suggest: %s", self._iter, tunables) return tunables diff --git a/mlos_bench/mlos_bench/schedulers/sync_scheduler.py b/mlos_bench/mlos_bench/schedulers/sync_scheduler.py index 9d8f16b0a4a..d23ea96a8e8 100644 --- a/mlos_bench/mlos_bench/schedulers/sync_scheduler.py +++ b/mlos_bench/mlos_bench/schedulers/sync_scheduler.py @@ -28,16 +28,17 @@ def start(self) -> None: super().start() last_trial_id = -1 - is_warm_up = self.optimizer.supports_preload - if not is_warm_up: + if self.optimizer.supports_preload: + # Finish pending/running trials before warming up the optimizer. + _LOG.info("Run pending trials") + self._run_schedule(running=True) + else: _LOG.warning("Skip pending trials and warm-up: %s", self.optimizer) while self.optimizer.not_converged(): - _LOG.info("Optimization loop: %s Last trial ID: %d", - "Warm-up" if is_warm_up else "Run", last_trial_id) - self._run_schedule(is_warm_up) + _LOG.info("Optimization loop: Last trial ID: %d", last_trial_id) last_trial_id = self._get_optimizer_suggestions(last_trial_id) - is_warm_up = False + self._run_schedule() def run_trial(self, trial: Storage.Trial) -> None: """ From 8e15ac5a3a9a129f64f326c8637760671e04cd5e Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Fri, 15 Mar 2024 14:49:59 -0700 Subject: [PATCH 03/11] bugfix: check the itneration count in .not_converged() --- mlos_bench/mlos_bench/optimizers/base_optimizer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mlos_bench/mlos_bench/optimizers/base_optimizer.py b/mlos_bench/mlos_bench/optimizers/base_optimizer.py index 65ed98fb077..ea5e904f132 100644 --- a/mlos_bench/mlos_bench/optimizers/base_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/base_optimizer.py @@ -254,11 +254,11 @@ def bulk_register(self, configs: Sequence[dict], scores: Sequence[Optional[float self._start_with_defaults = False return has_data - @abstractmethod def suggest(self) -> TunableGroups: """ Generate the next suggestion. - Base class' implementation increments the iteration count. + Base class' implementation increments the iteration count + and returns the current values of the tunables. Returns ------- @@ -335,7 +335,7 @@ def not_converged(self) -> bool: Return True if not converged, False otherwise. Base implementation just checks the iteration count. """ - return self._iter <= self._max_iter + return self._iter < self._max_iter @abstractmethod def get_best_observation(self) -> Union[Tuple[float, TunableGroups], Tuple[None, None]]: From 5c21fcccfc55a17321b1bfb1eea1865e94bbb32f Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Fri, 15 Mar 2024 16:16:20 -0700 Subject: [PATCH 04/11] all unit tests pass --- .../mlos_bench/schedulers/base_scheduler.py | 16 ++++++++++------ .../mlos_bench/schedulers/sync_scheduler.py | 18 ++++++++---------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/mlos_bench/mlos_bench/schedulers/base_scheduler.py b/mlos_bench/mlos_bench/schedulers/base_scheduler.py index 6b6a5ede0c9..5d0dba731d4 100644 --- a/mlos_bench/mlos_bench/schedulers/base_scheduler.py +++ b/mlos_bench/mlos_bench/schedulers/base_scheduler.py @@ -77,6 +77,7 @@ def __init__(self, *, self.optimizer = optimizer self.storage = storage self._root_env_config = root_env_config + self._last_trial_id = -1 _LOG.debug("Scheduler instantiated: %s :: %s", self, config) @@ -177,21 +178,24 @@ def load_config(self, config_id: int) -> TunableGroups: _LOG.debug("Config %d ::\n%s", config_id, json.dumps(tunable_values, indent=2)) return tunables - def _get_optimizer_suggestions(self, last_trial_id: int = -1) -> int: + def _get_optimizer_suggestions(self) -> bool: """ Optimizer part of the loop. Load the results of the executed trials into the optimizer, suggest new configurations, and add them to the queue. - Return the last trial ID processed by the optimizer. + Return True if optimization is not over, False otherwise. """ assert self.experiment is not None - (trial_ids, configs, scores, status) = self.experiment.load(last_trial_id) + (trial_ids, configs, scores, status) = self.experiment.load(self._last_trial_id) _LOG.info("QUEUE: Update the optimizer with trial results: %s", trial_ids) self.optimizer.bulk_register(configs, scores, status) + self._last_trial_id = max(trial_ids, default=self._last_trial_id) - tunables = self.optimizer.suggest() - self.schedule_trial(tunables) + not_converged = self.optimizer.not_converged() + if not_converged: + tunables = self.optimizer.suggest() + self.schedule_trial(tunables) - return max(trial_ids, default=last_trial_id) + return not_converged def schedule_trial(self, tunables: TunableGroups) -> None: """ diff --git a/mlos_bench/mlos_bench/schedulers/sync_scheduler.py b/mlos_bench/mlos_bench/schedulers/sync_scheduler.py index d23ea96a8e8..91d21e996e2 100644 --- a/mlos_bench/mlos_bench/schedulers/sync_scheduler.py +++ b/mlos_bench/mlos_bench/schedulers/sync_scheduler.py @@ -27,18 +27,16 @@ def start(self) -> None: """ super().start() - last_trial_id = -1 - if self.optimizer.supports_preload: - # Finish pending/running trials before warming up the optimizer. - _LOG.info("Run pending trials") - self._run_schedule(running=True) - else: + is_warm_up = self.optimizer.supports_preload + if not is_warm_up: _LOG.warning("Skip pending trials and warm-up: %s", self.optimizer) - while self.optimizer.not_converged(): - _LOG.info("Optimization loop: Last trial ID: %d", last_trial_id) - last_trial_id = self._get_optimizer_suggestions(last_trial_id) - self._run_schedule() + not_converged = True + while not_converged: + _LOG.info("Optimization loop: Last trial ID: %d", self._last_trial_id) + self._run_schedule(is_warm_up) + not_converged = self._get_optimizer_suggestions() + is_warm_up = False def run_trial(self, trial: Storage.Trial) -> None: """ From 385ba842075f2f34db3ebe5e5b78bba2b27f1ea2 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Fri, 15 Mar 2024 16:32:12 -0700 Subject: [PATCH 05/11] fix max_iterations command-line option --- mlos_bench/mlos_bench/tests/launcher_run_test.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mlos_bench/mlos_bench/tests/launcher_run_test.py b/mlos_bench/mlos_bench/tests/launcher_run_test.py index b088068a6d3..965be6ce5ae 100644 --- a/mlos_bench/mlos_bench/tests/launcher_run_test.py +++ b/mlos_bench/mlos_bench/tests/launcher_run_test.py @@ -93,8 +93,7 @@ def test_launch_main_app_opt(root_path: str, local_exec_service: LocalExecServic """ _launch_main_app( root_path, local_exec_service, - # TODO: Reset --max_iterations to 3 after fixing the optimizer - "--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --trial_config_repeat_count 3 --max_iterations 9", + "--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --trial_config_repeat_count 3 --max_iterations 3", [ # Iteration 1: Expect first value to be the baseline f"^{_RE_DATE} mlos_core_optimizer\\.py:\\d+ " + From c50ff1986f4550da6af24ef4e912bb623cf52400 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Mon, 18 Mar 2024 15:29:46 -0700 Subject: [PATCH 06/11] use --max_suggestions instead of --max_iterations in all configs --- mlos_bench/README.md | 4 ++-- .../mlos_bench/config/cli/azure-redis-opt.jsonc | 2 +- mlos_bench/mlos_bench/config/experiments/README.md | 2 +- .../config/optimizers/mlos_core_default_opt.jsonc | 2 +- .../config/optimizers/mlos_core_flaml.jsonc | 2 +- .../config/optimizers/mlos_core_smac.jsonc | 2 +- .../mlos_bench/config/optimizers/mock_opt.jsonc | 2 +- .../schemas/optimizers/optimizer-schema.json | 4 ++-- mlos_bench/mlos_bench/optimizers/base_optimizer.py | 4 ++-- .../tests/config/globals/global_test_config.jsonc | 4 ++-- .../grid_search_opt_invalid_max_iterations.jsonc | 2 +- .../bad/invalid/missing_opt_class_config.jsonc | 2 +- .../invalid/mlos_opt_smac_bad_probability.jsonc | 2 +- .../unhandled/mlos_opt_smac_llamatune_extra.jsonc | 2 +- .../bad/unhandled/mock_opt_extra_outer.jsonc | 2 +- .../good/full/grid_search_opt_full.jsonc | 2 +- .../good/full/mlos_opt_flaml_null_full.jsonc | 2 +- .../good/full/mlos_opt_random_identity_full.jsonc | 2 +- .../good/full/mlos_opt_smac_llamatune_full.jsonc | 2 +- .../good/full/mlos_opt_smac_null_full.jsonc | 2 +- .../test-cases/good/full/mock_opt_full.jsonc | 2 +- .../test-cases/good/full/one_shot_opt_full.jsonc | 2 +- .../partial/mlos_opt_smac_llamatune_partial.jsonc | 2 +- .../mlos_bench/tests/launcher_in_process_test.py | 2 +- .../mlos_bench/tests/launcher_parse_args_test.py | 4 ++-- mlos_bench/mlos_bench/tests/launcher_run_test.py | 2 +- mlos_bench/mlos_bench/tests/optimizers/conftest.py | 14 +++++++------- .../tests/optimizers/grid_search_optimizer_test.py | 4 ++-- .../tests/optimizers/llamatune_opt_test.py | 2 +- .../tests/optimizers/mlos_core_opt_df_test.py | 2 +- .../tests/optimizers/mlos_core_opt_smac_test.py | 6 +++--- 31 files changed, 45 insertions(+), 45 deletions(-) diff --git a/mlos_bench/README.md b/mlos_bench/README.md index ca690194ed5..16b9388b708 100644 --- a/mlos_bench/README.md +++ b/mlos_bench/README.md @@ -195,10 +195,10 @@ Searching for an optimal set of tunable parameters is very similar to running a All we have to do is specifying the [`Optimizer`](./mlos_bench/optimizers/) in the top-level configuration, like in our [`azure-redis-opt.jsonc`](./mlos_bench/config/cli/azure-redis-opt.jsonc) example. ```sh -mlos_bench --config "./mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc" --globals "experiment_MyBenchmark.jsonc" --max_iterations 10 +mlos_bench --config "./mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc" --globals "experiment_MyBenchmark.jsonc" --max_suggestions 10 ``` -Note that again we use the command line option `--max_iterations` to override the default value from [`mlos_core_flaml.jsonc`](./mlos_bench/config/optimizers/mlos_core_flaml.jsonc). +Note that again we use the command line option `--max_suggestions` to override the default value from [`mlos_core_flaml.jsonc`](./mlos_bench/config/optimizers/mlos_core_flaml.jsonc). We don't have to specify the `"tunable_values"` for the optimization: the optimizer will suggest new values on each iteration and the framework will feed this data into the benchmarking environment. diff --git a/mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc b/mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc index 71245054ad1..7d2c5c6cbe6 100644 --- a/mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc +++ b/mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc @@ -3,7 +3,7 @@ // Licensed under the MIT License. // // Run: -// mlos_bench --config mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc --globals experiment_RedisBench.jsonc --max_iterations 10 +// mlos_bench --config mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc --globals experiment_RedisBench.jsonc --max_suggestions 10 { "config_path": [ "mlos_bench/mlos_bench/config", diff --git a/mlos_bench/mlos_bench/config/experiments/README.md b/mlos_bench/mlos_bench/config/experiments/README.md index f34634ff132..ae1094739c0 100644 --- a/mlos_bench/mlos_bench/config/experiments/README.md +++ b/mlos_bench/mlos_bench/config/experiments/README.md @@ -92,7 +92,7 @@ will be pushed down to the `Optimizer` configuration, e.g., [`mlos_core_flaml.js > NOTE: it is perfectly ok to have several files with the experiment-specific parameters (say, one for Azure, another one for Storage, and so on) and either include them in the `"globals"` section of the CLI config, and/or specify them in the command line when running the experiment, e.g. > > ```bash -> mlos_bench --config mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc --globals experiment_Redis_Azure.jsonc experiment_Redis_Tunables.jsonc --max_iterations 10 +> mlos_bench --config mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc --globals experiment_Redis_Azure.jsonc experiment_Redis_Tunables.jsonc --max_suggestions 10 > ``` > > (Note several files after the `--globals` option). diff --git a/mlos_bench/mlos_bench/config/optimizers/mlos_core_default_opt.jsonc b/mlos_bench/mlos_bench/config/optimizers/mlos_core_default_opt.jsonc index 7c16c8200a4..f48d2f2e149 100644 --- a/mlos_bench/mlos_bench/config/optimizers/mlos_core_default_opt.jsonc +++ b/mlos_bench/mlos_bench/config/optimizers/mlos_core_default_opt.jsonc @@ -7,6 +7,6 @@ "config": { "optimization_target": "score", "optimization_direction": "min", - "max_iterations": 100 + "max_suggestions": 100 } } diff --git a/mlos_bench/mlos_bench/config/optimizers/mlos_core_flaml.jsonc b/mlos_bench/mlos_bench/config/optimizers/mlos_core_flaml.jsonc index 4b9db3be772..47d92aff6ee 100644 --- a/mlos_bench/mlos_bench/config/optimizers/mlos_core_flaml.jsonc +++ b/mlos_bench/mlos_bench/config/optimizers/mlos_core_flaml.jsonc @@ -7,7 +7,7 @@ "config": { "optimization_target": "score", "optimization_direction": "min", - "max_iterations": 100, + "max_suggestions": 100, "optimizer_type": "FLAML" } } diff --git a/mlos_bench/mlos_bench/config/optimizers/mlos_core_smac.jsonc b/mlos_bench/mlos_bench/config/optimizers/mlos_core_smac.jsonc index 324e694324b..3eb40c11468 100644 --- a/mlos_bench/mlos_bench/config/optimizers/mlos_core_smac.jsonc +++ b/mlos_bench/mlos_bench/config/optimizers/mlos_core_smac.jsonc @@ -7,7 +7,7 @@ "config": { "optimization_target": "score", "optimization_direction": "min", - "max_iterations": 100, + "max_suggestions": 100, "optimizer_type": "SMAC", "output_directory": null // Override to have a permanent output with SMAC history etc. } diff --git a/mlos_bench/mlos_bench/config/optimizers/mock_opt.jsonc b/mlos_bench/mlos_bench/config/optimizers/mock_opt.jsonc index 7d8b321b111..1d3dec27cb2 100644 --- a/mlos_bench/mlos_bench/config/optimizers/mock_opt.jsonc +++ b/mlos_bench/mlos_bench/config/optimizers/mock_opt.jsonc @@ -6,7 +6,7 @@ "config": { "optimization_target": "score", - "max_iterations": 5, + "max_suggestions": 5, "seed": 42 } } diff --git a/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json b/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json index 0fa39a6d2f2..67e94bb4a3e 100644 --- a/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json @@ -22,8 +22,8 @@ "enum": ["min", "max"], "example": "min" }, - "max_iterations": { - "description": "The maximum number of additional (in the case of merging experiment data or resuming experiments) iterations to run when we launch the app.", + "max_suggestions": { + "description": "The maximum number of additional (in the case of merging experiment data or resuming experiments) suggestions/trials to run when we launch the app.", "type": "integer", "minimum": 0, "example": 100 diff --git a/mlos_bench/mlos_bench/optimizers/base_optimizer.py b/mlos_bench/mlos_bench/optimizers/base_optimizer.py index ea5e904f132..21818695190 100644 --- a/mlos_bench/mlos_bench/optimizers/base_optimizer.py +++ b/mlos_bench/mlos_bench/optimizers/base_optimizer.py @@ -35,7 +35,7 @@ class Optimizer(metaclass=ABCMeta): # pylint: disable=too-many-instance-attr BASE_SUPPORTED_CONFIG_PROPS = { "optimization_target", "optimization_direction", - "max_iterations", + "max_suggestions", "seed", "start_with_defaults", } @@ -76,7 +76,7 @@ def __init__(self, # if True (default), use the already initialized values for the first iteration. self._start_with_defaults: bool = bool( strtobool(str(self._config.pop('start_with_defaults', True)))) - self._max_iter = int(self._config.pop('max_iterations', 100)) + self._max_iter = int(self._config.pop('max_suggestions', 100)) self._opt_target = str(self._config.pop('optimization_target', 'score')) self._opt_sign = {"min": 1, "max": -1}[self._config.pop('optimization_direction', 'min')] diff --git a/mlos_bench/mlos_bench/tests/config/globals/global_test_config.jsonc b/mlos_bench/mlos_bench/tests/config/globals/global_test_config.jsonc index 51bc38979ca..e010298ffff 100644 --- a/mlos_bench/mlos_bench/tests/config/globals/global_test_config.jsonc +++ b/mlos_bench/mlos_bench/tests/config/globals/global_test_config.jsonc @@ -22,7 +22,7 @@ "pathVarWithEnvVarRef": "$CUSTOM_PATH_FROM_ENV/foo", "varWithEnvVarRef": "user:$USER", - // Override the default value of the "max_iterations" parameter + // Override the default value of the "max_suggestions" parameter // of the optimizer when running local tests: - "max_iterations": 5 + "max_suggestions": 5 } diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/grid_search_opt_invalid_max_iterations.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/grid_search_opt_invalid_max_iterations.jsonc index 7d72c13a400..934fe9966b5 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/grid_search_opt_invalid_max_iterations.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/grid_search_opt_invalid_max_iterations.jsonc @@ -1,6 +1,6 @@ { "class": "mlos_bench.optimizers.grid_search_optimizer.GridSearchOptimizer", "config": { - "max_iterations": null, + "max_suggestions": null, } } \ No newline at end of file diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/missing_opt_class_config.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/missing_opt_class_config.jsonc index 7ee27e4562b..231f50034cb 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/missing_opt_class_config.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/missing_opt_class_config.jsonc @@ -2,7 +2,7 @@ "config": { "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false } diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/mlos_opt_smac_bad_probability.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/mlos_opt_smac_bad_probability.jsonc index 3b27ac1b4a5..9498c5ae205 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/mlos_opt_smac_bad_probability.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/invalid/mlos_opt_smac_bad_probability.jsonc @@ -3,7 +3,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false, "optimizer_type": "SMAC", diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mlos_opt_smac_llamatune_extra.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mlos_opt_smac_llamatune_extra.jsonc index c0a81e3a5ad..6891319e947 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mlos_opt_smac_llamatune_extra.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mlos_opt_smac_llamatune_extra.jsonc @@ -3,7 +3,7 @@ "config": { "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false, "optimizer_type": "SMAC", diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mock_opt_extra_outer.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mock_opt_extra_outer.jsonc index 1c07aee853a..5b1259232f2 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mock_opt_extra_outer.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/bad/unhandled/mock_opt_extra_outer.jsonc @@ -6,7 +6,7 @@ "config": { "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false } diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/grid_search_opt_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/grid_search_opt_full.jsonc index faac27e03b7..89a782a9735 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/grid_search_opt_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/grid_search_opt_full.jsonc @@ -5,7 +5,7 @@ "some/path/to/tunables.jsonc" ], "config": { - "max_iterations": 100, + "max_suggestions": 100, "optimization_direction": "max", "optimization_target": "score", "seed": 12345, diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_flaml_null_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_flaml_null_full.jsonc index cd493c0c795..0be5f47165a 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_flaml_null_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_flaml_null_full.jsonc @@ -4,7 +4,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 10, + "max_suggestions": 10, "seed": 12345, "start_with_defaults": false, "optimizer_type": "FLAML", diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_random_identity_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_random_identity_full.jsonc index 06d4570bee1..01f13fc8a77 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_random_identity_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_random_identity_full.jsonc @@ -4,7 +4,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false, "optimizer_type": "RANDOM", diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_llamatune_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_llamatune_full.jsonc index 96c1e4afa11..24fdbda7aaa 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_llamatune_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_llamatune_full.jsonc @@ -4,7 +4,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false, "optimizer_type": "SMAC", diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_null_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_null_full.jsonc index 1518004aa57..dd4a93c2cbf 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_null_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mlos_opt_smac_null_full.jsonc @@ -3,7 +3,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false, "optimizer_type": "SMAC", diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mock_opt_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mock_opt_full.jsonc index 49546527a04..3dfd052fc10 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mock_opt_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/mock_opt_full.jsonc @@ -4,7 +4,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false } diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/one_shot_opt_full.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/one_shot_opt_full.jsonc index 3e6df46ecb2..3b5d2f34a4e 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/one_shot_opt_full.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/full/one_shot_opt_full.jsonc @@ -11,7 +11,7 @@ "config": { // Here we do our best to list the exhaustive set of full configs available for the base optimizer config. "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "start_with_defaults": false } diff --git a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/partial/mlos_opt_smac_llamatune_partial.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/partial/mlos_opt_smac_llamatune_partial.jsonc index 1a2cdc48116..1159d982adb 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/partial/mlos_opt_smac_llamatune_partial.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/optimizers/test-cases/good/partial/mlos_opt_smac_llamatune_partial.jsonc @@ -4,7 +4,7 @@ "config": { "optimization_target": "score", - "max_iterations": 20, + "max_suggestions": 20, "seed": 12345, "optimizer_type": "SMAC", "space_adapter_type": "LLAMATUNE" diff --git a/mlos_bench/mlos_bench/tests/launcher_in_process_test.py b/mlos_bench/mlos_bench/tests/launcher_in_process_test.py index 5f7ac7f4379..5954aed802d 100644 --- a/mlos_bench/mlos_bench/tests/launcher_in_process_test.py +++ b/mlos_bench/mlos_bench/tests/launcher_in_process_test.py @@ -21,7 +21,7 @@ ([ "--config", "mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc", "--trial_config_repeat_count", "3", - "--max_iterations", "3", + "--max_suggestions", "3", ], 64.53897), ] ) diff --git a/mlos_bench/mlos_bench/tests/launcher_parse_args_test.py b/mlos_bench/mlos_bench/tests/launcher_parse_args_test.py index 8db8dab4787..bddee5729e3 100644 --- a/mlos_bench/mlos_bench/tests/launcher_parse_args_test.py +++ b/mlos_bench/mlos_bench/tests/launcher_parse_args_test.py @@ -171,10 +171,10 @@ def test_launcher_args_parse_2(config_paths: List[str]) -> None: globals_file_config = launcher.config_loader.load_config(globals_file, ConfigSchema.GLOBALS) # The actual global_config gets overwritten as a part of processing, so to test # this we read the original value out of the source files. - orig_max_iters = globals_file_config.get('max_iterations', opt_config.get('config', {}).get('max_iterations', 100)) + orig_max_iters = globals_file_config.get('max_suggestions', opt_config.get('config', {}).get('max_suggestions', 100)) assert launcher.optimizer.max_iterations \ == orig_max_iters \ - == launcher.global_config['max_iterations'] + == launcher.global_config['max_suggestions'] # Check that the optimizer got initialized with random values instead of the defaults. # Note: the environment doesn't get updated until suggest() is called to diff --git a/mlos_bench/mlos_bench/tests/launcher_run_test.py b/mlos_bench/mlos_bench/tests/launcher_run_test.py index 965be6ce5ae..85f2235da49 100644 --- a/mlos_bench/mlos_bench/tests/launcher_run_test.py +++ b/mlos_bench/mlos_bench/tests/launcher_run_test.py @@ -93,7 +93,7 @@ def test_launch_main_app_opt(root_path: str, local_exec_service: LocalExecServic """ _launch_main_app( root_path, local_exec_service, - "--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --trial_config_repeat_count 3 --max_iterations 3", + "--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --trial_config_repeat_count 3 --max_suggestions 3", [ # Iteration 1: Expect first value to be the baseline f"^{_RE_DATE} mlos_core_optimizer\\.py:\\d+ " + diff --git a/mlos_bench/mlos_bench/tests/optimizers/conftest.py b/mlos_bench/mlos_bench/tests/optimizers/conftest.py index 468edc42a3f..907a2d98a57 100644 --- a/mlos_bench/mlos_bench/tests/optimizers/conftest.py +++ b/mlos_bench/mlos_bench/tests/optimizers/conftest.py @@ -61,7 +61,7 @@ def mock_opt_no_defaults(tunable_groups: TunableGroups) -> MockOptimizer: config={ "optimization_target": "score", "optimization_direction": "min", - "max_iterations": 5, + "max_suggestions": 5, "start_with_defaults": False, "seed": SEED }, @@ -79,7 +79,7 @@ def mock_opt(tunable_groups: TunableGroups) -> MockOptimizer: config={ "optimization_target": "score", "optimization_direction": "min", - "max_iterations": 5, + "max_suggestions": 5, "seed": SEED }, ) @@ -96,7 +96,7 @@ def mock_opt_max(tunable_groups: TunableGroups) -> MockOptimizer: config={ "optimization_target": "score", "optimization_direction": "max", - "max_iterations": 10, + "max_suggestions": 10, "seed": SEED }, ) @@ -113,7 +113,7 @@ def flaml_opt(tunable_groups: TunableGroups) -> MlosCoreOptimizer: config={ "optimization_target": "score", "optimization_direction": "min", - "max_iterations": 15, + "max_suggestions": 15, "optimizer_type": "FLAML", "seed": SEED, }, @@ -131,7 +131,7 @@ def flaml_opt_max(tunable_groups: TunableGroups) -> MlosCoreOptimizer: config={ "optimization_target": "score", "optimization_direction": "max", - "max_iterations": 15, + "max_suggestions": 15, "optimizer_type": "FLAML", "seed": SEED, }, @@ -157,7 +157,7 @@ def smac_opt(tunable_groups: TunableGroups) -> MlosCoreOptimizer: config={ "optimization_target": "score", "optimization_direction": "min", - "max_iterations": SMAC_ITERATIONS, + "max_suggestions": SMAC_ITERATIONS, "optimizer_type": "SMAC", "seed": SEED, "output_directory": None, @@ -179,7 +179,7 @@ def smac_opt_max(tunable_groups: TunableGroups) -> MlosCoreOptimizer: config={ "optimization_target": "score", "optimization_direction": "max", - "max_iterations": SMAC_ITERATIONS, + "max_suggestions": SMAC_ITERATIONS, "optimizer_type": "SMAC", "seed": SEED, "output_directory": None, diff --git a/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py b/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py index c1d714ba401..3eac2b40abf 100644 --- a/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py +++ b/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py @@ -82,7 +82,7 @@ def grid_search_opt(grid_search_tunables: TunableGroups, # multiple of the number of elements in the grid. max_iterations = len(grid_search_tunables_grid) * 2 - 3 return GridSearchOptimizer(tunables=grid_search_tunables, config={ - "max_iterations": max_iterations, + "max_suggestions": max_iterations, "optimization_direction": "max", }) @@ -161,7 +161,7 @@ def test_grid_search(grid_search_opt: GridSearchOptimizer, assert all(config in grid_search_tunables_grid for config in grid_search_opt.pending_configs) assert all(config in list(grid_search_opt.pending_configs) for config in grid_search_tunables_grid) - # FIXME: Should we consider not_converged as the "max_iterations", an empty grid, or both? + # FIXME: Should we consider not_converged as the "max_suggestions", an empty grid, or both? # Try to empty the rest of the grid. while grid_search_opt.not_converged(): diff --git a/mlos_bench/mlos_bench/tests/optimizers/llamatune_opt_test.py b/mlos_bench/mlos_bench/tests/optimizers/llamatune_opt_test.py index 39735ae9d19..da2acfb6363 100644 --- a/mlos_bench/mlos_bench/tests/optimizers/llamatune_opt_test.py +++ b/mlos_bench/mlos_bench/tests/optimizers/llamatune_opt_test.py @@ -31,7 +31,7 @@ def llamatune_opt(tunable_groups: TunableGroups) -> MlosCoreOptimizer: "num_low_dims": 2, }, "optimization_target": "score", - "max_iterations": 10, + "max_suggestions": 10, "optimizer_type": "SMAC", "seed": SEED, # "start_with_defaults": False, diff --git a/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_df_test.py b/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_df_test.py index 4f716083cac..f36e3c149cc 100644 --- a/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_df_test.py +++ b/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_df_test.py @@ -26,7 +26,7 @@ def mlos_core_optimizer(tunable_groups: TunableGroups) -> MlosCoreOptimizer: """ test_opt_config = { 'optimizer_type': 'FLAML', - 'max_iterations': 10, + 'max_suggestions': 10, 'seed': SEED, } return MlosCoreOptimizer(tunable_groups, test_opt_config) diff --git a/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_smac_test.py b/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_smac_test.py index 779c9466250..b10571095b5 100644 --- a/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_smac_test.py +++ b/mlos_bench/mlos_bench/tests/optimizers/mlos_core_opt_smac_test.py @@ -30,7 +30,7 @@ def test_init_mlos_core_smac_opt_bad_trial_count(tunable_groups: TunableGroups) test_opt_config = { 'optimizer_type': 'SMAC', 'max_trials': 10, - 'max_iterations': 11, + 'max_suggestions': 11, 'seed': SEED, } with pytest.raises(AssertionError): @@ -44,13 +44,13 @@ def test_init_mlos_core_smac_opt_max_trials(tunable_groups: TunableGroups) -> No """ test_opt_config = { 'optimizer_type': 'SMAC', - 'max_iterations': 123, + 'max_suggestions': 123, 'seed': SEED, } opt = MlosCoreOptimizer(tunable_groups, test_opt_config) # pylint: disable=protected-access assert isinstance(opt._opt, SmacOptimizer) - assert opt._opt.base_optimizer.scenario.n_trials == test_opt_config['max_iterations'] + assert opt._opt.base_optimizer.scenario.n_trials == test_opt_config['max_suggestions'] def test_init_mlos_core_smac_absolute_output_directory(tunable_groups: TunableGroups) -> None: From a79e1192461cde118804f04dfe40bf33c7ca4144 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Tue, 19 Mar 2024 13:03:00 -0700 Subject: [PATCH 07/11] Update mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json Co-authored-by: Brian Kroth --- .../mlos_bench/config/schemas/optimizers/optimizer-schema.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json b/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json index 67e94bb4a3e..7e190d48304 100644 --- a/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json @@ -23,7 +23,7 @@ "example": "min" }, "max_suggestions": { - "description": "The maximum number of additional (in the case of merging experiment data or resuming experiments) suggestions/trials to run when we launch the app.", + "description": "The maximum number of additional (in the case of merging experiment data or resuming experiments) config suggestions to run when we launch the app. Note: configs may be repeated in more than one trial.", "type": "integer", "minimum": 0, "example": 100 From b4cbd1755851677194b35d942c38d7e64bee226e Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Tue, 19 Mar 2024 13:03:31 -0700 Subject: [PATCH 08/11] Update mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py Co-authored-by: Brian Kroth --- .../mlos_bench/tests/optimizers/grid_search_optimizer_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py b/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py index 3eac2b40abf..878e6f6c0d8 100644 --- a/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py +++ b/mlos_bench/mlos_bench/tests/optimizers/grid_search_optimizer_test.py @@ -161,7 +161,7 @@ def test_grid_search(grid_search_opt: GridSearchOptimizer, assert all(config in grid_search_tunables_grid for config in grid_search_opt.pending_configs) assert all(config in list(grid_search_opt.pending_configs) for config in grid_search_tunables_grid) - # FIXME: Should we consider not_converged as the "max_suggestions", an empty grid, or both? + # We consider not_converged as either having reached "max_suggestions" or an empty grid? # Try to empty the rest of the grid. while grid_search_opt.not_converged(): From 8f82beaacae5bd8e720c0ff2fddb0aa1f466e3d7 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Tue, 19 Mar 2024 13:07:20 -0700 Subject: [PATCH 09/11] rename _get_optimizer_suggestions to _schedule_new_optimizer_suggestions --- mlos_bench/mlos_bench/schedulers/base_scheduler.py | 2 +- mlos_bench/mlos_bench/schedulers/sync_scheduler.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mlos_bench/mlos_bench/schedulers/base_scheduler.py b/mlos_bench/mlos_bench/schedulers/base_scheduler.py index 5d0dba731d4..f61f3bcee6f 100644 --- a/mlos_bench/mlos_bench/schedulers/base_scheduler.py +++ b/mlos_bench/mlos_bench/schedulers/base_scheduler.py @@ -178,7 +178,7 @@ def load_config(self, config_id: int) -> TunableGroups: _LOG.debug("Config %d ::\n%s", config_id, json.dumps(tunable_values, indent=2)) return tunables - def _get_optimizer_suggestions(self) -> bool: + def _schedule_new_optimizer_suggestions(self) -> bool: """ Optimizer part of the loop. Load the results of the executed trials into the optimizer, suggest new configurations, and add them to the queue. diff --git a/mlos_bench/mlos_bench/schedulers/sync_scheduler.py b/mlos_bench/mlos_bench/schedulers/sync_scheduler.py index 91d21e996e2..a8e08b7d1db 100644 --- a/mlos_bench/mlos_bench/schedulers/sync_scheduler.py +++ b/mlos_bench/mlos_bench/schedulers/sync_scheduler.py @@ -35,7 +35,7 @@ def start(self) -> None: while not_converged: _LOG.info("Optimization loop: Last trial ID: %d", self._last_trial_id) self._run_schedule(is_warm_up) - not_converged = self._get_optimizer_suggestions() + not_converged = self._schedule_new_optimizer_suggestions() is_warm_up = False def run_trial(self, trial: Storage.Trial) -> None: From 13686590bccdf1d175426babe6454aae8dd83c66 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Tue, 19 Mar 2024 13:21:14 -0700 Subject: [PATCH 10/11] add --trial-config-repeat-count 3 to mlos_bench example in README --- mlos_bench/README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mlos_bench/README.md b/mlos_bench/README.md index 16b9388b708..2a18d6bbad7 100644 --- a/mlos_bench/README.md +++ b/mlos_bench/README.md @@ -195,10 +195,12 @@ Searching for an optimal set of tunable parameters is very similar to running a All we have to do is specifying the [`Optimizer`](./mlos_bench/optimizers/) in the top-level configuration, like in our [`azure-redis-opt.jsonc`](./mlos_bench/config/cli/azure-redis-opt.jsonc) example. ```sh -mlos_bench --config "./mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc" --globals "experiment_MyBenchmark.jsonc" --max_suggestions 10 +mlos_bench --config "./mlos_bench/mlos_bench/config/cli/azure-redis-opt.jsonc" --globals "experiment_MyBenchmark.jsonc" --max_suggestions 10 --trial-config-repeat-count 3 ``` -Note that again we use the command line option `--max_suggestions` to override the default value from [`mlos_core_flaml.jsonc`](./mlos_bench/config/optimizers/mlos_core_flaml.jsonc). +Note that again we use the command line option `--max_suggestions` to override the max. number of suggested configurations to trial from [`mlos_core_flaml.jsonc`](./mlos_bench/config/optimizers/mlos_core_flaml.jsonc). +We also use `--trial-config-repeat-count` to benchmark each suggested configuration 3 times. +That means, we will run 30 trials in total, 3 for each of the 10 suggested configurations. We don't have to specify the `"tunable_values"` for the optimization: the optimizer will suggest new values on each iteration and the framework will feed this data into the benchmarking environment. From 5575718350bee070411fe0b4838b9a0df7792fb9 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Tue, 19 Mar 2024 14:12:21 -0700 Subject: [PATCH 11/11] Update mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json Co-authored-by: Brian Kroth --- .../mlos_bench/config/schemas/optimizers/optimizer-schema.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json b/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json index 7e190d48304..32db42f9e66 100644 --- a/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/optimizers/optimizer-schema.json @@ -23,7 +23,7 @@ "example": "min" }, "max_suggestions": { - "description": "The maximum number of additional (in the case of merging experiment data or resuming experiments) config suggestions to run when we launch the app. Note: configs may be repeated in more than one trial.", + "description": "The maximum number of additional (in the case of merging experiment data or resuming experiments) config suggestions to run when we launch the app, or no limit if 0 is provided. Note: configs may be repeated in more than one trial.", "type": "integer", "minimum": 0, "example": 100