Skip to content

Commit

Permalink
Merge branch 'main' into globals-cli-option-compat-fixup
Browse files Browse the repository at this point in the history
  • Loading branch information
motus authored Aug 19, 2024
2 parents 2493fea + 439df99 commit 9d43651
Show file tree
Hide file tree
Showing 13 changed files with 103 additions and 69 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
"example": 10
},
"max_trials": {
"description": "Influence the budget of max number of trials for SMAC. If omitted, will default to max_iterations.",
"description": "Influence the budget of max number of trials for SMAC. If omitted, will default to max_suggestions.",
"type": "integer",
"minimum": 10,
"example": 100
Expand Down
11 changes: 4 additions & 7 deletions mlos_bench/mlos_bench/optimizers/base_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def __init__(
self._start_with_defaults: bool = bool(
strtobool(str(self._config.pop("start_with_defaults", True)))
)
self._max_iter = int(self._config.pop("max_suggestions", 100))
self._max_suggestions = int(self._config.pop("max_suggestions", 100))

opt_targets: Dict[str, str] = self._config.pop("optimization_targets", {"score": "min"})
self._opt_targets: Dict[str, Literal[1, -1]] = {}
Expand Down Expand Up @@ -142,18 +142,15 @@ def current_iteration(self) -> int:
"""
return self._iter

# TODO: finish renaming iterations to suggestions.
# See Also: https://github.com/microsoft/MLOS/pull/713

@property
def max_iterations(self) -> int:
def max_suggestions(self) -> int:
"""
The maximum number of iterations (suggestions) to run.
Note: this may or may not be the same as the number of configurations.
See Also: Scheduler.trial_config_repeat_count and Scheduler.max_trials.
"""
return self._max_iter
return self._max_suggestions

@property
def seed(self) -> int:
Expand Down Expand Up @@ -362,7 +359,7 @@ def not_converged(self) -> bool:
Base implementation just checks the iteration count.
"""
return self._iter < self._max_iter
return self._iter < self._max_suggestions

@abstractmethod
def get_best_observation(
Expand Down
35 changes: 2 additions & 33 deletions mlos_bench/mlos_bench/optimizers/convert_configspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@
Normal,
Uniform,
)
from ConfigSpace.functional import quantize
from ConfigSpace.hyperparameters import NumericalHyperparameter
from ConfigSpace.types import NotSet

from mlos_bench.tunables.tunable import Tunable, TunableValue
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.util import try_parse_val
from mlos_core.spaces.converters.util import monkey_patch_quantization

_LOG = logging.getLogger(__name__)

Expand All @@ -49,37 +49,6 @@ def _normalize_weights(weights: List[float]) -> List[float]:
return [w / total for w in weights]


def _monkey_patch_quantization(hp: NumericalHyperparameter, quantization_bins: int) -> None:
"""
Monkey-patch quantization into the Hyperparameter.
Parameters
----------
hp : NumericalHyperparameter
ConfigSpace hyperparameter to patch.
quantization_bins : int
Number of bins to quantize the hyperparameter into.
"""
if quantization_bins <= 1:
raise ValueError(f"{quantization_bins=} :: must be greater than 1.")

# Temporary workaround to dropped quantization support in ConfigSpace 1.0
# See Also: https://github.com/automl/ConfigSpace/issues/390
if not hasattr(hp, "sample_value_mlos_orig"):
setattr(hp, "sample_value_mlos_orig", hp.sample_value)

assert hasattr(hp, "sample_value_mlos_orig")
setattr(
hp,
"sample_value",
lambda size=None, **kwargs: quantize(
hp.sample_value_mlos_orig(size, **kwargs),
bounds=(hp.lower, hp.upper),
bins=quantization_bins,
).astype(type(hp.default_value)),
)


def _tunable_to_configspace(
tunable: Tunable,
group_name: Optional[str] = None,
Expand Down Expand Up @@ -171,7 +140,7 @@ def _tunable_to_configspace(
if tunable.quantization_bins:
# Temporary workaround to dropped quantization support in ConfigSpace 1.0
# See Also: https://github.com/automl/ConfigSpace/issues/390
_monkey_patch_quantization(range_hp, tunable.quantization_bins)
monkey_patch_quantization(range_hp, tunable.quantization_bins)

if not tunable.special:
return ConfigurationSpace({tunable.name: range_hp})
Expand Down
8 changes: 4 additions & 4 deletions mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,11 @@ def _sanity_check(self) -> None:
size,
self._tunables,
)
if size > self._max_iter:
if size > self._max_suggestions:
_LOG.warning(
"Grid search size %d, is greater than max iterations %d",
size,
self._max_iter,
self._max_suggestions,
)

def _get_grid(self) -> Tuple[Tuple[str, ...], Dict[Tuple[TunableValue, ...], None]]:
Expand Down Expand Up @@ -147,7 +147,7 @@ def suggest(self) -> TunableGroups:
self._suggested_configs.add(default_config_values)
else:
# Select the first item from the pending configs.
if not self._pending_configs and self._iter <= self._max_iter:
if not self._pending_configs and self._iter <= self._max_suggestions:
_LOG.info("No more pending configs to suggest. Restarting grid.")
self._config_keys, self._pending_configs = self._get_grid()
try:
Expand Down Expand Up @@ -185,7 +185,7 @@ def register(
return registered_score

def not_converged(self) -> bool:
if self._iter > self._max_iter:
if self._iter > self._max_suggestions:
if bool(self._pending_configs):
_LOG.warning(
"Exceeded max iterations, but still have %d pending configs: %s",
Expand Down
11 changes: 6 additions & 5 deletions mlos_bench/mlos_bench/optimizers/mlos_core_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,13 @@ def __init__(
)
)

# Make sure max_trials >= max_iterations.
# Make sure max_trials >= max_suggestions.
if "max_trials" not in self._config:
self._config["max_trials"] = self._max_iter
assert (
int(self._config["max_trials"]) >= self._max_iter
), f"max_trials {self._config.get('max_trials')} <= max_iterations {self._max_iter}"
self._config["max_trials"] = self._max_suggestions
assert int(self._config["max_trials"]) >= self._max_suggestions, (
f"max_trials {self._config.get('max_trials')} "
f"<= max_suggestions{self._max_suggestions}"
)

if "run_name" not in self._config and self.experiment_id:
self._config["run_name"] = self.experiment_id
Expand Down
2 changes: 1 addition & 1 deletion mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def __init__(
):
super().__init__(tunables, config, global_config, service)
_LOG.info("Run a single iteration for: %s", self._tunables)
self._max_iter = 1 # Always run for just one iteration.
self._max_suggestions = 1 # Always run for just one iteration.

def suggest(self) -> TunableGroups:
"""Always produce the same (initial) suggestion."""
Expand Down
6 changes: 3 additions & 3 deletions mlos_bench/mlos_bench/tests/launcher_parse_args_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def test_launcher_args_parse_defaults(config_paths: List[str]) -> None:
assert isinstance(launcher.optimizer, OneShotOptimizer)
# Check that the optimizer got initialized with defaults.
assert launcher.optimizer.tunable_params.is_defaults()
assert launcher.optimizer.max_iterations == 1 # value for OneShotOptimizer
assert launcher.optimizer.max_suggestions == 1 # value for OneShotOptimizer
# Check that we pick up the right scheduler config:
assert isinstance(launcher.scheduler, SyncScheduler)
assert launcher.scheduler.trial_config_repeat_count == 1 # default
Expand Down Expand Up @@ -155,7 +155,7 @@ def test_launcher_args_parse_1(config_paths: List[str]) -> None:
assert isinstance(launcher.optimizer, OneShotOptimizer)
# Check that the optimizer got initialized with defaults.
assert launcher.optimizer.tunable_params.is_defaults()
assert launcher.optimizer.max_iterations == 1 # value for OneShotOptimizer
assert launcher.optimizer.max_suggestions == 1 # value for OneShotOptimizer
# Check that we pick up the right scheduler config:
assert isinstance(launcher.scheduler, SyncScheduler)
assert (
Expand Down Expand Up @@ -223,7 +223,7 @@ def test_launcher_args_parse_2(config_paths: List[str]) -> None:
"max_suggestions", opt_config.get("config", {}).get("max_suggestions", 100)
)
assert (
launcher.optimizer.max_iterations
launcher.optimizer.max_suggestions
== orig_max_iters
== launcher.global_config["max_suggestions"]
)
Expand Down
2 changes: 1 addition & 1 deletion mlos_bench/mlos_bench/tests/optimizers/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def flaml_opt_max(tunable_groups: TunableGroups) -> MlosCoreOptimizer:


# FIXME: SMAC's RF model can be non-deterministic at low iterations, which are
# normally calculated as a percentage of the max_iterations and number of
# normally calculated as a percentage of the max_suggestions and number of
# tunable dimensions, so for now we set the initial random samples equal to the
# number of iterations and control them with a seed.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,11 @@ def grid_search_opt(
assert len(grid_search_tunables) == 3
# Test the convergence logic by controlling the number of iterations to be not a
# multiple of the number of elements in the grid.
max_iterations = len(grid_search_tunables_grid) * 2 - 3
max_suggestions = len(grid_search_tunables_grid) * 2 - 3
return GridSearchOptimizer(
tunables=grid_search_tunables,
config={
"max_suggestions": max_iterations,
"max_suggestions": max_suggestions,
"optimization_targets": {"score": "max", "other_score": "min"},
},
)
Expand Down Expand Up @@ -187,7 +187,7 @@ def test_grid_search(

# But if we still have iterations left, we should be able to suggest again by
# refilling the grid.
assert grid_search_opt.current_iteration < grid_search_opt.max_iterations
assert grid_search_opt.current_iteration < grid_search_opt.max_suggestions
assert grid_search_opt.suggest()
assert list(grid_search_opt.pending_configs)
assert list(grid_search_opt.suggested_configs)
Expand All @@ -198,7 +198,7 @@ def test_grid_search(
suggestion = grid_search_opt.suggest()
grid_search_opt.register(suggestion, status, score)
assert not grid_search_opt.not_converged()
assert grid_search_opt.current_iteration >= grid_search_opt.max_iterations
assert grid_search_opt.current_iteration >= grid_search_opt.max_suggestions
assert list(grid_search_opt.pending_configs)
assert list(grid_search_opt.suggested_configs)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@

from mlos_bench.optimizers.convert_configspace import (
TunableValueKind,
_monkey_patch_quantization,
_tunable_to_configspace,
special_param_names,
tunable_groups_to_configspace,
)
from mlos_bench.tunables.tunable import Tunable
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_core.spaces.converters.util import monkey_patch_quantization

# pylint: disable=redefined-outer-name

Expand Down Expand Up @@ -103,7 +103,7 @@ def configuration_space() -> ConfigurationSpace:
)
hp = spaces["kernel_sched_latency_ns"]
assert isinstance(hp, NumericalHyperparameter)
_monkey_patch_quantization(hp, quantization_bins=10)
monkey_patch_quantization(hp, quantization_bins=11)
return spaces


Expand Down
39 changes: 39 additions & 0 deletions mlos_core/mlos_core/spaces/converters/util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""Helper functions for config space converters."""

from ConfigSpace.functional import quantize
from ConfigSpace.hyperparameters import NumericalHyperparameter


def monkey_patch_quantization(hp: NumericalHyperparameter, quantization_bins: int) -> None:
"""
Monkey-patch quantization into the Hyperparameter.
Parameters
----------
hp : NumericalHyperparameter
ConfigSpace hyperparameter to patch.
quantization_bins : int
Number of bins to quantize the hyperparameter into.
"""
if quantization_bins <= 1:
raise ValueError(f"{quantization_bins=} :: must be greater than 1.")

# Temporary workaround to dropped quantization support in ConfigSpace 1.0
# See Also: https://github.com/automl/ConfigSpace/issues/390
if not hasattr(hp, "sample_value_mlos_orig"):
setattr(hp, "sample_value_mlos_orig", hp.sample_value)

assert hasattr(hp, "sample_value_mlos_orig")
setattr(
hp,
"sample_value",
lambda size=None, **kwargs: quantize(
hp.sample_value_mlos_orig(size, **kwargs),
bounds=(hp.lower, hp.upper),
bins=quantization_bins,
).astype(type(hp.default_value)),
)
30 changes: 29 additions & 1 deletion mlos_core/mlos_core/tests/spaces/adapters/llamatune_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,18 @@
import pytest

from mlos_core.spaces.adapters import LlamaTuneAdapter
from mlos_core.spaces.converters.util import monkey_patch_quantization

# Explicitly test quantized values with llamatune space adapter.
# TODO: Add log scale sampling tests as well.

def construct_parameter_space(

def construct_parameter_space( # pylint: disable=too-many-arguments
*,
n_continuous_params: int = 0,
n_quantized_continuous_params: int = 0,
n_integer_params: int = 0,
n_quantized_integer_params: int = 0,
n_categorical_params: int = 0,
seed: int = 1234,
) -> CS.ConfigurationSpace:
Expand All @@ -26,8 +33,16 @@ def construct_parameter_space(

for idx in range(n_continuous_params):
input_space.add(CS.UniformFloatHyperparameter(name=f"cont_{idx}", lower=0, upper=64))
for idx in range(n_quantized_continuous_params):
param_int = CS.UniformFloatHyperparameter(name=f"cont_{idx}", lower=0, upper=64)
monkey_patch_quantization(param_int, 6)
input_space.add(param_int)
for idx in range(n_integer_params):
input_space.add(CS.UniformIntegerHyperparameter(name=f"int_{idx}", lower=-1, upper=256))
for idx in range(n_quantized_integer_params):
param_float = CS.UniformIntegerHyperparameter(name=f"int_{idx}", lower=0, upper=256)
monkey_patch_quantization(param_float, 17)
input_space.add(param_float)
for idx in range(n_categorical_params):
input_space.add(
CS.CategoricalHyperparameter(
Expand All @@ -49,6 +64,13 @@ def construct_parameter_space(
{"n_continuous_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_categorical_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_categorical_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_quantized_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{
"n_quantized_continuous_params": int(
num_target_space_dims * num_orig_space_factor
)
},
# Mix of all three types
{
"n_continuous_params": int(num_target_space_dims * num_orig_space_factor / 3),
Expand Down Expand Up @@ -358,6 +380,12 @@ def test_max_unique_values_per_param() -> None:
{"n_continuous_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_categorical_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_quantized_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{
"n_quantized_continuous_params": int(
num_target_space_dims * num_orig_space_factor
)
},
# Mix of all three types
{
"n_continuous_params": int(num_target_space_dims * num_orig_space_factor / 3),
Expand Down
Loading

0 comments on commit 9d43651

Please sign in to comment.