Skip to content

Commit

Permalink
Merge branch 'main' into backporting-tweaks-from-demo
Browse files Browse the repository at this point in the history
  • Loading branch information
bpkroth committed Aug 16, 2024
2 parents cb9fdce + 8afe9c3 commit 31d99f7
Show file tree
Hide file tree
Showing 22 changed files with 207 additions and 96 deletions.
2 changes: 1 addition & 1 deletion .bumpversion.cfg
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[bumpversion]
current_version = 0.6.0
current_version = 0.6.1
commit = True
tag = True

Expand Down
7 changes: 7 additions & 0 deletions .devcontainer/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -92,3 +92,10 @@ RUN umask 0002 \
&& mkdir -p /opt/conda/pkgs/cache/ && chown -R vscode:conda /opt/conda/pkgs/cache/
RUN mkdir -p /home/vscode/.conda/envs \
&& ln -s /opt/conda/envs/mlos /home/vscode/.conda/envs/mlos

# Try and prime the devcontainer's ssh known_hosts keys with the github one for scripted calls.
RUN mkdir -p /home/vscode/.ssh \
&& ( \
grep -q ^github.com /home/vscode/.ssh/known_hosts \
|| ssh-keyscan github.com | tee -a /home/vscode/.ssh/known_hosts \
)
4 changes: 4 additions & 0 deletions .github/workflows/devcontainer.yml
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,10 @@ jobs:
run: |
docker exec --user vscode --env USER=vscode mlos-devcontainer printenv
- name: Check that github.com is in the ssh known_hosts file
run: |
docker exec --user vscode --env USER=vscode mlos-devcontainer grep ^github.com /home/vscode/.ssh/known_hosts
- name: Update the conda env in the devcontainer
timeout-minutes: 10
run: |
Expand Down
2 changes: 1 addition & 1 deletion doc/source/version.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
"""

# NOTE: This should be managed by bumpversion.
VERSION = '0.6.0'
VERSION = '0.6.1'

if __name__ == "__main__":
print(VERSION)
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,10 @@
"maxItems": 2,
"uniqueItems": true
},
"quantization": {
"quantization_bins": {
"description": "The number of buckets to quantize the range into.",
"$comment": "type left unspecified here"
"type": "integer",
"exclusiveMinimum": 1
},
"log_scale": {
"description": "Whether to use log instead of linear scale for the range search.",
Expand Down Expand Up @@ -186,7 +187,7 @@
},
"required": ["type", "default", "values"],
"not": {
"required": ["range", "special", "special_weights", "range_weight", "log", "quantization", "distribution"]
"required": ["range", "special", "special_weights", "range_weight", "log", "quantization_bins", "distribution"]
},
"$comment": "TODO: add check that default is in values",
"unevaluatedProperties": false
Expand Down Expand Up @@ -216,10 +217,8 @@
"distribution": {
"$ref": "#/$defs/tunable_param_distribution"
},
"quantization": {
"$ref": "#/$defs/quantization",
"type": "integer",
"exclusiveMinimum": 1
"quantization_bins": {
"$ref": "#/$defs/quantization_bins"
},
"log": {
"$ref": "#/$defs/log_scale"
Expand Down Expand Up @@ -266,10 +265,8 @@
"distribution": {
"$ref": "#/$defs/tunable_param_distribution"
},
"quantization": {
"$ref": "#/$defs/quantization",
"type": "number",
"exclusiveMinimum": 0
"quantization_bins": {
"$ref": "#/$defs/quantization_bins"
},
"log": {
"$ref": "#/$defs/log_scale"
Expand Down
56 changes: 40 additions & 16 deletions mlos_bench/mlos_bench/optimizers/convert_configspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,17 @@

from ConfigSpace import (
Beta,
BetaFloatHyperparameter,
BetaIntegerHyperparameter,
CategoricalHyperparameter,
Configuration,
ConfigurationSpace,
EqualsCondition,
Float,
Integer,
Normal,
NormalFloatHyperparameter,
NormalIntegerHyperparameter,
Uniform,
UniformFloatHyperparameter,
UniformIntegerHyperparameter,
)
from ConfigSpace.functional import quantize
from ConfigSpace.hyperparameters import NumericalHyperparameter
from ConfigSpace.types import NotSet

from mlos_bench.tunables.tunable import Tunable, TunableValue
Expand Down Expand Up @@ -53,6 +49,37 @@ def _normalize_weights(weights: List[float]) -> List[float]:
return [w / total for w in weights]


def _monkey_patch_quantization(hp: NumericalHyperparameter, quantization_bins: int) -> None:
"""
Monkey-patch quantization into the Hyperparameter.
Parameters
----------
hp : NumericalHyperparameter
ConfigSpace hyperparameter to patch.
quantization_bins : int
Number of bins to quantize the hyperparameter into.
"""
if quantization_bins <= 1:
raise ValueError(f"{quantization_bins=} :: must be greater than 1.")

# Temporary workaround to dropped quantization support in ConfigSpace 1.0
# See Also: https://github.com/automl/ConfigSpace/issues/390
if not hasattr(hp, "sample_value_mlos_orig"):
setattr(hp, "sample_value_mlos_orig", hp.sample_value)

assert hasattr(hp, "sample_value_mlos_orig")
setattr(
hp,
"sample_value",
lambda size=None, **kwargs: quantize(
hp.sample_value_mlos_orig(size, **kwargs),
bounds=(hp.lower, hp.upper),
bins=quantization_bins,
).astype(type(hp.default_value)),
)


def _tunable_to_configspace(
tunable: Tunable,
group_name: Optional[str] = None,
Expand All @@ -77,6 +104,7 @@ def _tunable_to_configspace(
cs : ConfigurationSpace
A ConfigurationSpace object that corresponds to the Tunable.
"""
# pylint: disable=too-complex
meta: Dict[Hashable, TunableValue] = {"cost": cost}
if group_name is not None:
meta["group"] = group_name
Expand Down Expand Up @@ -110,20 +138,12 @@ def _tunable_to_configspace(
elif tunable.distribution is not None:
raise TypeError(f"Invalid Distribution Type: {tunable.distribution}")

range_hp: Union[
BetaFloatHyperparameter,
BetaIntegerHyperparameter,
NormalFloatHyperparameter,
NormalIntegerHyperparameter,
UniformFloatHyperparameter,
UniformIntegerHyperparameter,
]
range_hp: NumericalHyperparameter
if tunable.type == "int":
range_hp = Integer(
name=tunable.name,
bounds=(int(tunable.range[0]), int(tunable.range[1])),
log=bool(tunable.is_log),
# TODO: Restore quantization support (#803).
distribution=distribution,
default=(
int(tunable.default)
Expand All @@ -137,7 +157,6 @@ def _tunable_to_configspace(
name=tunable.name,
bounds=tunable.range,
log=bool(tunable.is_log),
# TODO: Restore quantization support (#803).
distribution=distribution,
default=(
float(tunable.default)
Expand All @@ -149,6 +168,11 @@ def _tunable_to_configspace(
else:
raise TypeError(f"Invalid Parameter Type: {tunable.type}")

if tunable.quantization_bins:
# Temporary workaround to dropped quantization support in ConfigSpace 1.0
# See Also: https://github.com/automl/ConfigSpace/issues/390
_monkey_patch_quantization(range_hp, tunable.quantization_bins)

if not tunable.special:
return ConfigurationSpace({tunable.name: range_hp})

Expand Down
6 changes: 3 additions & 3 deletions mlos_bench/mlos_bench/optimizers/grid_search_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def __init__(
self._suggested_configs: Set[Tuple[TunableValue, ...]] = set()

def _sanity_check(self) -> None:
size = np.prod([tunable.cardinality for (tunable, _group) in self._tunables])
size = np.prod([tunable.cardinality or np.inf for (tunable, _group) in self._tunables])
if size == np.inf:
raise ValueError(
f"Unquantized tunables are not supported for grid search: {self._tunables}"
Expand Down Expand Up @@ -79,9 +79,9 @@ def _get_grid(self) -> Tuple[Tuple[str, ...], Dict[Tuple[TunableValue, ...], Non
for config in generate_grid(
self.config_space,
{
tunable.name: int(tunable.cardinality)
tunable.name: tunable.cardinality or 0 # mypy wants an int
for (tunable, _group) in self._tunables
if tunable.quantization or tunable.type == "int"
if tunable.is_numerical and tunable.cardinality
},
)
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"type": "float",
"default": 10,
"range": [0, 10],
"quantization": true // <-- this is invalid
"quantization_bins": true // <-- this is invalid
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"type": "float",
"default": 10,
"range": [1, 500],
"quantization": 0 // <-- should be greater than 0
"quantization_bins": 1 // <-- should be greater than 1
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"type": "int",
"default": 10,
"range": [1, 500],
"quantization": 1 // <-- should be greater than 1
"quantization_bins": 1 // <-- should be greater than 1
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"type": "int",
"default": 10,
"range": [1, 500],
"quantization": "yes" // <-- this is invalid
"quantization_bins": "yes" // <-- this is invalid
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@
"description": "Int",
"type": "int",
"default": 10,
"range": [1, 500],
"range": [0, 500],
"meta": {"suffix": "MB"},
"special": [-1],
"special_weights": [0.1],
"range_weight": 0.9,
"quantization": 50,
"quantization_bins": 50,
"distribution": {
"type": "beta",
"params": {
Expand All @@ -26,12 +26,12 @@
"description": "Int",
"type": "int",
"default": 10,
"range": [1, 500],
"range": [0, 500],
"meta": {"suffix": "MB"},
"special": [-1],
"special_weights": [0.1],
"range_weight": 0.9,
"quantization": 50,
"quantization_bins": 50,
"distribution": {
"type": "normal",
"params": {
Expand All @@ -48,7 +48,7 @@
"meta": {"scale": 1000, "prefix": "/proc/var/random/", "base": 2.71828},
"range": [1.1, 111.1],
"special": [-1.1],
"quantization": 10,
"quantization_bins": 11,
"distribution": {
"type": "uniform"
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import random
from typing import Dict, List

import numpy as np
import pytest

from mlos_bench.environments.status import Status
Expand Down Expand Up @@ -40,7 +41,7 @@ def grid_search_tunables_config() -> dict:
"type": "float",
"range": [0, 1],
"default": 0.5,
"quantization": 0.25,
"quantization_bins": 5,
},
},
},
Expand Down Expand Up @@ -99,7 +100,9 @@ def test_grid_search_grid(
) -> None:
"""Make sure that grid search optimizer initializes and works correctly."""
# Check the size.
expected_grid_size = math.prod(tunable.cardinality for tunable, _group in grid_search_tunables)
expected_grid_size = math.prod(
tunable.cardinality or np.inf for tunable, _group in grid_search_tunables
)
assert expected_grid_size > len(grid_search_tunables)
assert len(grid_search_tunables_grid) == expected_grid_size
# Check for specific example configs inclusion.
Expand Down
1 change: 1 addition & 0 deletions mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@
"type": "int",
"default": 2000000,
"range": [0, 1000000000],
"quantization_bins": 11,
"log": false
}
}
Expand Down
31 changes: 21 additions & 10 deletions mlos_bench/mlos_bench/tests/tunables/test_tunables_size_props.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
#
"""Unit tests for checking tunable size properties."""

import numpy as np
import pytest

from mlos_bench.tunables.tunable import Tunable
Expand All @@ -23,9 +22,9 @@ def test_tunable_int_size_props() -> None:
"default": 3,
},
)
assert tunable.span == 4
assert tunable.cardinality == 5
expected = [1, 2, 3, 4, 5]
assert tunable.span == 4
assert tunable.cardinality == len(expected)
assert list(tunable.quantized_values or []) == expected
assert list(tunable.values or []) == expected

Expand All @@ -41,7 +40,7 @@ def test_tunable_float_size_props() -> None:
},
)
assert tunable.span == 3.5
assert tunable.cardinality == np.inf
assert tunable.cardinality is None
assert tunable.quantized_values is None
assert tunable.values is None

Expand All @@ -68,11 +67,17 @@ def test_tunable_quantized_int_size_props() -> None:
"""Test quantized tunable int size properties."""
tunable = Tunable(
name="test",
config={"type": "int", "range": [100, 1000], "default": 100, "quantization": 100},
config={
"type": "int",
"range": [100, 1000],
"default": 100,
"quantization_bins": 10,
},
)
assert tunable.span == 900
assert tunable.cardinality == 10
expected = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]
assert tunable.span == 900
assert tunable.cardinality == len(expected)
assert tunable.quantization_bins == len(expected)
assert list(tunable.quantized_values or []) == expected
assert list(tunable.values or []) == expected

Expand All @@ -81,10 +86,16 @@ def test_tunable_quantized_float_size_props() -> None:
"""Test quantized tunable float size properties."""
tunable = Tunable(
name="test",
config={"type": "float", "range": [0, 1], "default": 0, "quantization": 0.1},
config={
"type": "float",
"range": [0, 1],
"default": 0,
"quantization_bins": 11,
},
)
assert tunable.span == 1
assert tunable.cardinality == 11
expected = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
assert tunable.span == 1
assert tunable.cardinality == len(expected)
assert tunable.quantization_bins == len(expected)
assert pytest.approx(list(tunable.quantized_values or []), 0.0001) == expected
assert pytest.approx(list(tunable.values or []), 0.0001) == expected
Loading

0 comments on commit 31d99f7

Please sign in to comment.