Skip to content

Commit

Permalink
Merge branch 'main' into finish-renaming-iterations-to-suggestions
Browse files Browse the repository at this point in the history
  • Loading branch information
motus authored Aug 19, 2024
2 parents c8c813d + 9183ae6 commit f21e7be
Show file tree
Hide file tree
Showing 5 changed files with 79 additions and 43 deletions.
35 changes: 2 additions & 33 deletions mlos_bench/mlos_bench/optimizers/convert_configspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@
Normal,
Uniform,
)
from ConfigSpace.functional import quantize
from ConfigSpace.hyperparameters import NumericalHyperparameter
from ConfigSpace.types import NotSet

from mlos_bench.tunables.tunable import Tunable, TunableValue
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_bench.util import try_parse_val
from mlos_core.spaces.converters.util import monkey_patch_quantization

_LOG = logging.getLogger(__name__)

Expand All @@ -49,37 +49,6 @@ def _normalize_weights(weights: List[float]) -> List[float]:
return [w / total for w in weights]


def _monkey_patch_quantization(hp: NumericalHyperparameter, quantization_bins: int) -> None:
"""
Monkey-patch quantization into the Hyperparameter.
Parameters
----------
hp : NumericalHyperparameter
ConfigSpace hyperparameter to patch.
quantization_bins : int
Number of bins to quantize the hyperparameter into.
"""
if quantization_bins <= 1:
raise ValueError(f"{quantization_bins=} :: must be greater than 1.")

# Temporary workaround to dropped quantization support in ConfigSpace 1.0
# See Also: https://github.com/automl/ConfigSpace/issues/390
if not hasattr(hp, "sample_value_mlos_orig"):
setattr(hp, "sample_value_mlos_orig", hp.sample_value)

assert hasattr(hp, "sample_value_mlos_orig")
setattr(
hp,
"sample_value",
lambda size=None, **kwargs: quantize(
hp.sample_value_mlos_orig(size, **kwargs),
bounds=(hp.lower, hp.upper),
bins=quantization_bins,
).astype(type(hp.default_value)),
)


def _tunable_to_configspace(
tunable: Tunable,
group_name: Optional[str] = None,
Expand Down Expand Up @@ -171,7 +140,7 @@ def _tunable_to_configspace(
if tunable.quantization_bins:
# Temporary workaround to dropped quantization support in ConfigSpace 1.0
# See Also: https://github.com/automl/ConfigSpace/issues/390
_monkey_patch_quantization(range_hp, tunable.quantization_bins)
monkey_patch_quantization(range_hp, tunable.quantization_bins)

if not tunable.special:
return ConfigurationSpace({tunable.name: range_hp})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@

from mlos_bench.optimizers.convert_configspace import (
TunableValueKind,
_monkey_patch_quantization,
_tunable_to_configspace,
special_param_names,
tunable_groups_to_configspace,
)
from mlos_bench.tunables.tunable import Tunable
from mlos_bench.tunables.tunable_groups import TunableGroups
from mlos_core.spaces.converters.util import monkey_patch_quantization

# pylint: disable=redefined-outer-name

Expand Down Expand Up @@ -103,7 +103,7 @@ def configuration_space() -> ConfigurationSpace:
)
hp = spaces["kernel_sched_latency_ns"]
assert isinstance(hp, NumericalHyperparameter)
_monkey_patch_quantization(hp, quantization_bins=10)
monkey_patch_quantization(hp, quantization_bins=11)
return spaces


Expand Down
39 changes: 39 additions & 0 deletions mlos_core/mlos_core/spaces/converters/util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""Helper functions for config space converters."""

from ConfigSpace.functional import quantize
from ConfigSpace.hyperparameters import NumericalHyperparameter


def monkey_patch_quantization(hp: NumericalHyperparameter, quantization_bins: int) -> None:
"""
Monkey-patch quantization into the Hyperparameter.
Parameters
----------
hp : NumericalHyperparameter
ConfigSpace hyperparameter to patch.
quantization_bins : int
Number of bins to quantize the hyperparameter into.
"""
if quantization_bins <= 1:
raise ValueError(f"{quantization_bins=} :: must be greater than 1.")

# Temporary workaround to dropped quantization support in ConfigSpace 1.0
# See Also: https://github.com/automl/ConfigSpace/issues/390
if not hasattr(hp, "sample_value_mlos_orig"):
setattr(hp, "sample_value_mlos_orig", hp.sample_value)

assert hasattr(hp, "sample_value_mlos_orig")
setattr(
hp,
"sample_value",
lambda size=None, **kwargs: quantize(
hp.sample_value_mlos_orig(size, **kwargs),
bounds=(hp.lower, hp.upper),
bins=quantization_bins,
).astype(type(hp.default_value)),
)
30 changes: 29 additions & 1 deletion mlos_core/mlos_core/tests/spaces/adapters/llamatune_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,18 @@
import pytest

from mlos_core.spaces.adapters import LlamaTuneAdapter
from mlos_core.spaces.converters.util import monkey_patch_quantization

# Explicitly test quantized values with llamatune space adapter.
# TODO: Add log scale sampling tests as well.

def construct_parameter_space(

def construct_parameter_space( # pylint: disable=too-many-arguments
*,
n_continuous_params: int = 0,
n_quantized_continuous_params: int = 0,
n_integer_params: int = 0,
n_quantized_integer_params: int = 0,
n_categorical_params: int = 0,
seed: int = 1234,
) -> CS.ConfigurationSpace:
Expand All @@ -26,8 +33,16 @@ def construct_parameter_space(

for idx in range(n_continuous_params):
input_space.add(CS.UniformFloatHyperparameter(name=f"cont_{idx}", lower=0, upper=64))
for idx in range(n_quantized_continuous_params):
param_int = CS.UniformFloatHyperparameter(name=f"cont_{idx}", lower=0, upper=64)
monkey_patch_quantization(param_int, 6)
input_space.add(param_int)
for idx in range(n_integer_params):
input_space.add(CS.UniformIntegerHyperparameter(name=f"int_{idx}", lower=-1, upper=256))
for idx in range(n_quantized_integer_params):
param_float = CS.UniformIntegerHyperparameter(name=f"int_{idx}", lower=0, upper=256)
monkey_patch_quantization(param_float, 17)
input_space.add(param_float)
for idx in range(n_categorical_params):
input_space.add(
CS.CategoricalHyperparameter(
Expand All @@ -49,6 +64,13 @@ def construct_parameter_space(
{"n_continuous_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_categorical_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_categorical_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_quantized_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{
"n_quantized_continuous_params": int(
num_target_space_dims * num_orig_space_factor
)
},
# Mix of all three types
{
"n_continuous_params": int(num_target_space_dims * num_orig_space_factor / 3),
Expand Down Expand Up @@ -358,6 +380,12 @@ def test_max_unique_values_per_param() -> None:
{"n_continuous_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_categorical_params": int(num_target_space_dims * num_orig_space_factor)},
{"n_quantized_integer_params": int(num_target_space_dims * num_orig_space_factor)},
{
"n_quantized_continuous_params": int(
num_target_space_dims * num_orig_space_factor
)
},
# Mix of all three types
{
"n_continuous_params": int(num_target_space_dims * num_orig_space_factor / 3),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
from ConfigSpace import UniformFloatHyperparameter, UniformIntegerHyperparameter
from numpy.random import RandomState

from mlos_bench.optimizers.convert_configspace import _monkey_patch_quantization
from mlos_bench.tests import SEED
from mlos_core.spaces.converters.util import monkey_patch_quantization
from mlos_core.tests import SEED


def test_configspace_quant_int() -> None:
Expand All @@ -20,7 +20,7 @@ def test_configspace_quant_int() -> None:
# Before patching: expect that at least one value is not quantized.
assert not set(hp.sample_value(100)).issubset(quantized_values)

_monkey_patch_quantization(hp, 11)
monkey_patch_quantization(hp, 11)
# After patching: *all* values must belong to the set of quantized values.
assert hp.sample_value() in quantized_values # check scalar type
assert set(hp.sample_value(100)).issubset(quantized_values) # batch version
Expand All @@ -35,7 +35,7 @@ def test_configspace_quant_float() -> None:
assert not set(hp.sample_value(100)).issubset(quantized_values)

# 5 is a nice number of bins to avoid floating point errors.
_monkey_patch_quantization(hp, 5)
monkey_patch_quantization(hp, 5)
# After patching: *all* values must belong to the set of quantized values.
assert hp.sample_value() in quantized_values # check scalar type
assert set(hp.sample_value(100)).issubset(quantized_values) # batch version
Expand All @@ -49,18 +49,18 @@ def test_configspace_quant_repatch() -> None:
# Before patching: expect that at least one value is not quantized.
assert not set(hp.sample_value(100)).issubset(quantized_values)

_monkey_patch_quantization(hp, 11)
monkey_patch_quantization(hp, 11)
# After patching: *all* values must belong to the set of quantized values.
samples = hp.sample_value(100, seed=RandomState(SEED))
assert set(samples).issubset(quantized_values)

# Patch the same hyperparameter again and check that the results are the same.
_monkey_patch_quantization(hp, 11)
monkey_patch_quantization(hp, 11)
# After patching: *all* values must belong to the set of quantized values.
assert all(samples == hp.sample_value(100, seed=RandomState(SEED)))

# Repatch with the higher number of bins and make sure we get new values.
_monkey_patch_quantization(hp, 21)
monkey_patch_quantization(hp, 21)
samples_set = set(hp.sample_value(100, seed=RandomState(SEED)))
quantized_values_new = set(range(5, 96, 10))
assert samples_set.issubset(set(range(0, 101, 5)))
Expand Down

0 comments on commit f21e7be

Please sign in to comment.