From 8f59f5a062d57f0f738bc6a60cfc91cf110bacb1 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 18 Jan 2024 17:17:22 -0800 Subject: [PATCH 01/21] add weights support to teh Tunable and the JSON schema --- .../tunables/tunable-params-schema.json | 18 +++++++++++++++++ .../good/full/full-tunable-params-test.jsonc | 7 ++++--- mlos_bench/mlos_bench/tests/conftest.py | 6 ++++-- mlos_bench/mlos_bench/tunables/tunable.py | 20 +++++++++++++++++++ 4 files changed, 46 insertions(+), 5 deletions(-) diff --git a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json index 1156c0bb6f4..0e6b71a86f0 100644 --- a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json @@ -35,6 +35,12 @@ }, "minItems": 1, "uniqueItems": true + }, + "weights": { + "type": "array", + "items": { + "type": "float" + } } }, "required": ["type", "default", "values"], @@ -73,6 +79,12 @@ "type": "integer" }, "uniqueItems": true + }, + "weights": { + "type": "array", + "items": { + "type": "float" + } } }, "required": ["type", "default", "range"], @@ -111,6 +123,12 @@ "type": "number" }, "uniqueItems": true + }, + "weights": { + "type": "array", + "items": { + "type": "float" + } } }, "required": ["type", "default", "range"], diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc index 5dab5fbe26e..1086af9fbaf 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc @@ -9,7 +9,8 @@ "default": 10, "range": [1, 500], "meta": {"suffix": "MB"}, - "special": [-1] + "special": [-1], + "weights": [0.1, 0.9] }, "float": { "description": "Float", @@ -44,7 +45,6 @@ "values": [true, 1, "true"], "default": true } - } }, "covariant_group_name-2": { @@ -75,7 +75,8 @@ "type": "categorical", "default": "yes", "meta": {"quote": true}, - "values": ["yes", "no"] + "values": ["yes", "no"], + "weights": [50, 50] } } } diff --git a/mlos_bench/mlos_bench/tests/conftest.py b/mlos_bench/mlos_bench/tests/conftest.py index 9f646ca8f92..5db9543bac0 100644 --- a/mlos_bench/mlos_bench/tests/conftest.py +++ b/mlos_bench/mlos_bench/tests/conftest.py @@ -48,7 +48,8 @@ "description": "Idling method", "type": "categorical", "default": "halt", - "values": ["halt", "mwait", "noidle"] + "values": ["halt", "mwait", "noidle"], + "weights": [1, 1, 2] } } }, @@ -60,7 +61,8 @@ "type": "int", "default": -1, "range": [0, 500000], - "special": [-1, 0] + "special": [-1, 0], + "weights": [0.25, 0.25, 0.5] }, "kernel_sched_latency_ns": { "description": "Initial value for the scheduler period", diff --git a/mlos_bench/mlos_bench/tunables/tunable.py b/mlos_bench/mlos_bench/tunables/tunable.py index 8992e9d96b3..7fb251d11ff 100644 --- a/mlos_bench/mlos_bench/tunables/tunable.py +++ b/mlos_bench/mlos_bench/tunables/tunable.py @@ -33,6 +33,7 @@ class TunableDict(TypedDict, total=False): values: Optional[List[Optional[str]]] range: Optional[Union[Sequence[int], Sequence[float]]] special: Optional[Union[List[int], List[float]]] + weights: Optional[List[float]] meta: Dict[str, Any] @@ -79,6 +80,7 @@ def __init__(self, name: str, config: TunableDict): config_range = (config_range[0], config_range[1]) self._range = config_range self._special: Union[List[int], List[float]] = config.get("special") or [] + self._weights: List[float] = config.get("weights") or [] self._current_value = None self._sanity_check() self.value = self._default @@ -96,11 +98,16 @@ def _sanity_check(self) -> None: raise ValueError(f"Values must be unique for the categorical type tunable {self}") if self._special: raise ValueError(f"Categorical tunable cannot have special values: {self}") + if self._weights and len(self._weights) != len(self._values): + raise ValueError(f"Must specify weights for all values: {self}") elif self.is_numerical: if self._values is not None: raise ValueError(f"Values must be None for the numerical type tunable {self}") if not self._range or len(self._range) != 2 or self._range[0] >= self._range[1]: raise ValueError(f"Invalid range for tunable {self}: {self._range}") + if self._weights and len(self._weights) != len(self._special) + 1: + raise ValueError("Must specify weights for all special values plus" + + f" one weight for the regular range: {self}") else: raise ValueError(f"Invalid parameter type for tunable {self}: {self._type}") if not self.is_valid(self.default): @@ -369,6 +376,19 @@ def is_special(self) -> bool: """ return self.value in self._special + @property + def weights(self) -> Optional[List[float]]: + """ + Get the weights of the categories or special values of the tunable. + Return None if there are none. + + Returns + ------- + weights : [float] + A list of weights or None. + """ + return self._weights + @property def type(self) -> str: """ From 996cc9ac05736127ea46edcace642faacfd649e6 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 18 Jan 2024 17:21:02 -0800 Subject: [PATCH 02/21] typo: use "number" instead of "float" in JSON schema --- .../config/schemas/tunables/tunable-params-schema.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json index 0e6b71a86f0..f9cf32ed906 100644 --- a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json @@ -39,7 +39,7 @@ "weights": { "type": "array", "items": { - "type": "float" + "type": "number" } } }, @@ -83,7 +83,7 @@ "weights": { "type": "array", "items": { - "type": "float" + "type": "number" } } }, @@ -127,7 +127,7 @@ "weights": { "type": "array", "items": { - "type": "float" + "type": "number" } } }, From afc41e22189a5f9722ac4c03570d960d7b12c155 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 18 Jan 2024 17:48:54 -0800 Subject: [PATCH 03/21] add unit tests for weighted parameters of the tunables; check for negative weights --- .../tests/tunables/tunable_definition_test.py | 132 +++++++++++++++++- mlos_bench/mlos_bench/tunables/tunable.py | 16 ++- 2 files changed, 142 insertions(+), 6 deletions(-) diff --git a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py index 5a100b59d1a..7ffdb4c5141 100644 --- a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py +++ b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py @@ -28,7 +28,58 @@ def test_categorical_required_params() -> None: json_config = """ { "type": "categorical", - "values_missing": ["foo", "bar", "foo"], + "values_missing": ["foo", "bar", "baz"], + "default": "foo" + } + """ + config = json.loads(json_config) + with pytest.raises(ValueError): + Tunable(name='test', config=config) + + +def test_categorical_weights() -> None: + """ + Instantiate a categorical tunable with weights. + """ + json_config = """ + { + "type": "categorical", + "values": ["foo", "bar", "baz"], + "weights": [25, 25, 50], + "default": "foo" + } + """ + config = json.loads(json_config) + tunable = Tunable(name='test', config=config) + assert tunable.weights == [25, 25, 50] + + +def test_categorical_weights_wrong_count() -> None: + """ + Try to instantiate a categorical tunable with incorrect number of weights. + """ + json_config = """ + { + "type": "categorical", + "values": ["foo", "bar", "baz"], + "weights": [50, 50], + "default": "foo" + } + """ + config = json.loads(json_config) + with pytest.raises(ValueError): + Tunable(name='test', config=config) + + +def test_categorical_weights_wrong_values() -> None: + """ + Try to instantiate a categorical tunable with invalid weights. + """ + json_config = """ + { + "type": "categorical", + "values": ["foo", "bar", "baz"], + "weights": [-1, 50, 50], "default": "foo" } """ @@ -174,6 +225,85 @@ def test_numerical_tunable_reversed_range(tunable_type: str) -> None: Tunable(name=f'test_{tunable_type}', config=config) +@pytest.mark.parametrize("tunable_type", ["int", "float"]) +def test_numerical_weights(tunable_type: str) -> None: + """ + Instantiate a numerical tunable with weighted special values. + """ + json_config = f""" + { + "type": "{tunable_type}", + "range": [0, 100], + "special": [0], + "weights": [0.1, 0.9], + "default": 0 + } + """ + config = json.loads(json_config) + tunable = Tunable(name='test', config=config) + assert tunable.special == [0] + assert tunable.weights == [0.1, 0.9] + + +@pytest.mark.parametrize("tunable_type", ["int", "float"]) +def test_numerical_weights_non_normalized(tunable_type: str) -> None: + """ + Instantiate a numerical tunable with non-normalized weights + of the special values. + """ + json_config = f""" + { + "type": "{tunable_type}", + "range": [0, 100], + "special": [-1, 0], + "weights": [10, 10, 80], + "default": 0 + } + """ + config = json.loads(json_config) + tunable = Tunable(name='test', config=config) + assert tunable.special == [-1, 0] + assert tunable.weights == [10, 10, 80] + + +@pytest.mark.parametrize("tunable_type", ["int", "float"]) +def test_numerical_weights_wrong_count(tunable_type: str) -> None: + """ + Try to instantiate a numerical tunable with incorrect number of weights. + """ + json_config = f""" + { + "type": "{tunable_type}", + "range": [0, 100], + "special": [0], + "weights": [0.1, 0.1, 0.8], + "default": 0 + } + """ + config = json.loads(json_config) + with pytest.raises(ValueError): + Tunable(name='test', config=config) + + +@pytest.mark.parametrize("tunable_type", ["int", "float"]) +def test_numerical_weights_wrong_values(tunable_type: str) -> None: + """ + Try to instantiate a numerical tunable with incorrect number of weights. + """ + json_config = f""" + { + "type": "{tunable_type}", + "range": [0, 100], + "special": [0], + "weights": [-1, 10], + "default": 0 + } + """ + config = json.loads(json_config) + with pytest.raises(ValueError): + Tunable(name='test', config=config) + + def test_bad_type() -> None: """ Disallow bad types. diff --git a/mlos_bench/mlos_bench/tunables/tunable.py b/mlos_bench/mlos_bench/tunables/tunable.py index 7fb251d11ff..603962891c9 100644 --- a/mlos_bench/mlos_bench/tunables/tunable.py +++ b/mlos_bench/mlos_bench/tunables/tunable.py @@ -98,16 +98,22 @@ def _sanity_check(self) -> None: raise ValueError(f"Values must be unique for the categorical type tunable {self}") if self._special: raise ValueError(f"Categorical tunable cannot have special values: {self}") - if self._weights and len(self._weights) != len(self._values): - raise ValueError(f"Must specify weights for all values: {self}") + if self._weights: + if len(self._weights) != len(self._values): + raise ValueError(f"Must specify weights for all values: {self}") + if any(w < 0 for w in self._weights): + raise ValueError(f"All weights must be non-negative: {self}") elif self.is_numerical: if self._values is not None: raise ValueError(f"Values must be None for the numerical type tunable {self}") if not self._range or len(self._range) != 2 or self._range[0] >= self._range[1]: raise ValueError(f"Invalid range for tunable {self}: {self._range}") - if self._weights and len(self._weights) != len(self._special) + 1: - raise ValueError("Must specify weights for all special values plus" + - f" one weight for the regular range: {self}") + if self._weights: + if len(self._weights) != len(self._special) + 1: + raise ValueError("Must specify weights for all special values plus" + + f" one weight for the regular range: {self}") + if any(w < 0 for w in self._weights): + raise ValueError(f"All weights must be non-negative: {self}") else: raise ValueError(f"Invalid parameter type for tunable {self}: {self._type}") if not self.is_valid(self.default): From cf3d19ec5b30b66d06845b4384f14434fdb37c20 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 18 Jan 2024 17:54:01 -0800 Subject: [PATCH 04/21] bugfix: incorrect formatting of intyerpolated JSON strings in the unit tests --- .../tests/tunables/tunable_definition_test.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py index 7ffdb4c5141..8d446313ead 100644 --- a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py +++ b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py @@ -231,13 +231,13 @@ def test_numerical_weights(tunable_type: str) -> None: Instantiate a numerical tunable with weighted special values. """ json_config = f""" - { + {{ "type": "{tunable_type}", "range": [0, 100], "special": [0], "weights": [0.1, 0.9], "default": 0 - } + }} """ config = json.loads(json_config) tunable = Tunable(name='test', config=config) @@ -252,13 +252,13 @@ def test_numerical_weights_non_normalized(tunable_type: str) -> None: of the special values. """ json_config = f""" - { + {{ "type": "{tunable_type}", "range": [0, 100], "special": [-1, 0], "weights": [10, 10, 80], "default": 0 - } + }} """ config = json.loads(json_config) tunable = Tunable(name='test', config=config) @@ -272,13 +272,13 @@ def test_numerical_weights_wrong_count(tunable_type: str) -> None: Try to instantiate a numerical tunable with incorrect number of weights. """ json_config = f""" - { + {{ "type": "{tunable_type}", "range": [0, 100], "special": [0], "weights": [0.1, 0.1, 0.8], "default": 0 - } + }} """ config = json.loads(json_config) with pytest.raises(ValueError): @@ -291,13 +291,13 @@ def test_numerical_weights_wrong_values(tunable_type: str) -> None: Try to instantiate a numerical tunable with incorrect number of weights. """ json_config = f""" - { + {{ "type": "{tunable_type}", "range": [0, 100], "special": [0], "weights": [-1, 10], "default": 0 - } + }} """ config = json.loads(json_config) with pytest.raises(ValueError): From e1439284f45a876e7ddd9b7aca4ecd31b1226d3a Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 18 Jan 2024 17:57:03 -0800 Subject: [PATCH 05/21] check for zero weights (this is ok) --- .../mlos_bench/tests/tunables/tunable_definition_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py index 8d446313ead..1e8422b1186 100644 --- a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py +++ b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py @@ -256,14 +256,14 @@ def test_numerical_weights_non_normalized(tunable_type: str) -> None: "type": "{tunable_type}", "range": [0, 100], "special": [-1, 0], - "weights": [10, 10, 80], + "weights": [0, 10, 90], "default": 0 }} """ config = json.loads(json_config) tunable = Tunable(name='test', config=config) assert tunable.special == [-1, 0] - assert tunable.weights == [10, 10, 80] + assert tunable.weights == [0, 10, 90] # Zero weights are ok @pytest.mark.parametrize("tunable_type", ["int", "float"]) From 450368916c1dc6e9a1f9563388df96ef07adeaf5 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 18 Jan 2024 18:24:52 -0800 Subject: [PATCH 06/21] pass weights from Tunable to ConfigSpace hyperparameters --- .../optimizers/convert_configspace.py | 45 +++++++++++++++---- .../tests/tunables/tunable_definition_test.py | 18 ++++++++ 2 files changed, 54 insertions(+), 9 deletions(-) diff --git a/mlos_bench/mlos_bench/optimizers/convert_configspace.py b/mlos_bench/mlos_bench/optimizers/convert_configspace.py index a50be040034..f6059fa837b 100644 --- a/mlos_bench/mlos_bench/optimizers/convert_configspace.py +++ b/mlos_bench/mlos_bench/optimizers/convert_configspace.py @@ -8,7 +8,7 @@ import logging -from typing import Dict, Optional, Tuple +from typing import Dict, List, Optional, Tuple from ConfigSpace import ( CategoricalHyperparameter, @@ -35,6 +35,14 @@ class TunableValueKind: RANGE = "range" +def _normalize_weights(weights: List[float]) -> List[float]: + """ + Helper function for normalizing weights to probabilities. + """ + total = sum(weights) + return [w / total for w in weights] + + def _tunable_to_configspace( tunable: Tunable, group_name: Optional[str] = None, cost: int = 0) -> ConfigurationSpace: """ @@ -61,8 +69,11 @@ def _tunable_to_configspace( if tunable.type == "categorical": return ConfigurationSpace({ tunable.name: CategoricalHyperparameter( - name=tunable.name, choices=tunable.categories, - default_value=tunable.default, meta=meta) + name=tunable.name, + choices=tunable.categories, + weights=_normalize_weights(tunable.weights) if tunable.weights else None, + default_value=tunable.default, + meta=meta) }) if tunable.type == "int": @@ -75,28 +86,44 @@ def _tunable_to_configspace( if not tunable.special: return ConfigurationSpace({ tunable.name: hp_type( - name=tunable.name, lower=tunable.range[0], upper=tunable.range[1], + name=tunable.name, + lower=tunable.range[0], + upper=tunable.range[1], default_value=tunable.default if tunable.in_range(tunable.default) else None, meta=meta) }) + # Normalize the weights and use the last one for regular values in the range. + special_weights: Optional[List[float]] = None + switch_weights = [0.5, 0.5] # FLAML requires uniform weights. + if tunable.weights: + special_weights = _normalize_weights(tunable.weights[:-1]) + switch_weights = _normalize_weights([sum(tunable.weights[:-1]), tunable.weights[-1]]) + # Create three hyperparameters: one for regular values, # one for special values, and one to choose between the two. (special_name, type_name) = special_param_names(tunable.name) conf_space = ConfigurationSpace({ tunable.name: hp_type( - name=tunable.name, lower=tunable.range[0], upper=tunable.range[1], + name=tunable.name, + lower=tunable.range[0], + upper=tunable.range[1], default_value=tunable.default if tunable.in_range(tunable.default) else None, - meta=meta), + meta=meta + ), special_name: CategoricalHyperparameter( - name=special_name, choices=tunable.special, + name=special_name, + choices=tunable.special, + weights=special_weights, default_value=tunable.default if tunable.default in tunable.special else None, - meta=meta), + meta=meta + ), type_name: CategoricalHyperparameter( name=type_name, choices=[TunableValueKind.SPECIAL, TunableValueKind.RANGE], + weights=switch_weights, default_value=TunableValueKind.SPECIAL, - weights=[0.5, 0.5]), # TODO: Make weights configurable; FLAML requires uniform weights. + ), }) conf_space.add_condition(EqualsCondition( conf_space[special_name], conf_space[type_name], TunableValueKind.SPECIAL)) diff --git a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py index 1e8422b1186..89eed479f5b 100644 --- a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py +++ b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py @@ -245,6 +245,24 @@ def test_numerical_weights(tunable_type: str) -> None: assert tunable.weights == [0.1, 0.9] +@pytest.mark.parametrize("tunable_type", ["int", "float"]) +def test_numerical_weights_no_specials(tunable_type: str) -> None: + """ + Raise an error if weights are specified but no special values. + """ + json_config = f""" + {{ + "type": "{tunable_type}", + "range": [0, 100], + "weights": [0.1, 0.9], + "default": 0 + }} + """ + config = json.loads(json_config) + with pytest.raises(ValueError): + Tunable(name='test', config=config) + + @pytest.mark.parametrize("tunable_type", ["int", "float"]) def test_numerical_weights_non_normalized(tunable_type: str) -> None: """ From 4341df14eacfbe374a8e4a126941911d888dcbcb Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 18 Jan 2024 18:54:46 -0800 Subject: [PATCH 07/21] update the weights in unit tests to make FLAML optimizer happy --- mlos_bench/mlos_bench/tests/conftest.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mlos_bench/mlos_bench/tests/conftest.py b/mlos_bench/mlos_bench/tests/conftest.py index 5db9543bac0..138eaf6b5b2 100644 --- a/mlos_bench/mlos_bench/tests/conftest.py +++ b/mlos_bench/mlos_bench/tests/conftest.py @@ -27,7 +27,6 @@ # -- Ignore pylint complaints about pytest references to # `tunable_groups` fixture as both a function and a parameter. - TUNABLE_GROUPS_JSON = """ { "provision": { @@ -49,7 +48,7 @@ "type": "categorical", "default": "halt", "values": ["halt", "mwait", "noidle"], - "weights": [1, 1, 2] + "weights": [33, 33, 33] // FLAML requires uniform weights } } }, @@ -62,6 +61,8 @@ "default": -1, "range": [0, 500000], "special": [-1, 0], + // FLAML requires uniform weights, separately for + // specials and switching between specials and range. "weights": [0.25, 0.25, 0.5] }, "kernel_sched_latency_ns": { From 845c70006b072a8fe0d49145252397923d63a1dd Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 18 Jan 2024 19:12:47 -0800 Subject: [PATCH 08/21] specify probabilities in the config space unit tests --- .../mlos_bench/tests/tunables/tunable_to_configspace_test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mlos_bench/mlos_bench/tests/tunables/tunable_to_configspace_test.py b/mlos_bench/mlos_bench/tests/tunables/tunable_to_configspace_test.py index 0cc7bd0f994..42b24dd51e2 100644 --- a/mlos_bench/mlos_bench/tests/tunables/tunable_to_configspace_test.py +++ b/mlos_bench/mlos_bench/tests/tunables/tunable_to_configspace_test.py @@ -51,12 +51,14 @@ def configuration_space() -> ConfigurationSpace: "kernel_sched_latency_ns": (0, 1000000000), }) + # NOTE: FLAML requires distribution to be uniform spaces["vmSize"].default_value = "Standard_B4ms" spaces["idle"].default_value = "halt" spaces["kernel_sched_migration_cost_ns"].default_value = 250000 spaces[kernel_sched_migration_cost_ns_special].default_value = -1 + spaces[kernel_sched_migration_cost_ns_special].probabilities = (0.5, 0.5) spaces[kernel_sched_migration_cost_ns_type].default_value = TunableValueKind.SPECIAL - spaces[kernel_sched_migration_cost_ns_type].probabilities = (0.5, 0.5) # FLAML requires distribution to be uniform + spaces[kernel_sched_migration_cost_ns_type].probabilities = (0.5, 0.5) spaces["kernel_sched_latency_ns"].default_value = 2000000 spaces.add_condition(EqualsCondition( From b30950c32e011a9886c3f6292f3805e27de0053d Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 1 Feb 2024 15:21:58 -0800 Subject: [PATCH 09/21] add a range_weight property --- .../optimizers/convert_configspace.py | 6 ++-- mlos_bench/mlos_bench/tunables/tunable.py | 28 ++++++++++++++++--- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/mlos_bench/mlos_bench/optimizers/convert_configspace.py b/mlos_bench/mlos_bench/optimizers/convert_configspace.py index f6059fa837b..db16fc3d44d 100644 --- a/mlos_bench/mlos_bench/optimizers/convert_configspace.py +++ b/mlos_bench/mlos_bench/optimizers/convert_configspace.py @@ -93,12 +93,12 @@ def _tunable_to_configspace( meta=meta) }) - # Normalize the weights and use the last one for regular values in the range. + # Compute the probabilities of switching between regular and special values. special_weights: Optional[List[float]] = None switch_weights = [0.5, 0.5] # FLAML requires uniform weights. if tunable.weights: - special_weights = _normalize_weights(tunable.weights[:-1]) - switch_weights = _normalize_weights([sum(tunable.weights[:-1]), tunable.weights[-1]]) + special_weights = _normalize_weights(tunable.weights) + switch_weights = _normalize_weights([sum(tunable.weights), tunable.range_weight or 0]) # Create three hyperparameters: one for regular values, # one for special values, and one to choose between the two. diff --git a/mlos_bench/mlos_bench/tunables/tunable.py b/mlos_bench/mlos_bench/tunables/tunable.py index 603962891c9..f01507cbaee 100644 --- a/mlos_bench/mlos_bench/tunables/tunable.py +++ b/mlos_bench/mlos_bench/tunables/tunable.py @@ -34,6 +34,7 @@ class TunableDict(TypedDict, total=False): range: Optional[Union[Sequence[int], Sequence[float]]] special: Optional[Union[List[int], List[float]]] weights: Optional[List[float]] + range_weight: Optional[float] meta: Dict[str, Any] @@ -81,6 +82,7 @@ def __init__(self, name: str, config: TunableDict): self._range = config_range self._special: Union[List[int], List[float]] = config.get("special") or [] self._weights: List[float] = config.get("weights") or [] + self._range_weight: Optional[float] = config.get("range_weight") self._current_value = None self._sanity_check() self.value = self._default @@ -98,6 +100,8 @@ def _sanity_check(self) -> None: raise ValueError(f"Values must be unique for the categorical type tunable {self}") if self._special: raise ValueError(f"Categorical tunable cannot have special values: {self}") + if self._range_weight is not None: + raise ValueError(f"Categorical tunable cannot have range_weight: {self}") if self._weights: if len(self._weights) != len(self._values): raise ValueError(f"Must specify weights for all values: {self}") @@ -109,11 +113,14 @@ def _sanity_check(self) -> None: if not self._range or len(self._range) != 2 or self._range[0] >= self._range[1]: raise ValueError(f"Invalid range for tunable {self}: {self._range}") if self._weights: - if len(self._weights) != len(self._special) + 1: - raise ValueError("Must specify weights for all special values plus" + - f" one weight for the regular range: {self}") - if any(w < 0 for w in self._weights): + if self._range_weight is None: + raise ValueError(f"Must specify weight for the range: {self}") + if len(self._weights) != len(self._special): + raise ValueError("Must specify weights for all special values {self}") + if any(w < 0 for w in self._weights + [self._range_weight]): raise ValueError(f"All weights must be non-negative: {self}") + elif self._range_weight is not None: + raise ValueError(f"Must specify both weights and range_weight or none: {self}") else: raise ValueError(f"Invalid parameter type for tunable {self}: {self._type}") if not self.is_valid(self.default): @@ -395,6 +402,19 @@ def weights(self) -> Optional[List[float]]: """ return self._weights + @property + def range_weight(self) -> Optional[float]: + """ + Get weight of the range of the numeric tunable. + Return None if there are no weights or a tunable is categorical. + + Returns + ------- + weight : float + Weight of the range or None. + """ + return self._range_weight + @property def type(self) -> str: """ From 0b6d21aa55f7f9e889f40b289fe203ec0da64872 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 1 Feb 2024 15:41:35 -0800 Subject: [PATCH 10/21] move range weight to a special parameter --- .../tunables/tunable-params-schema.json | 6 ++ .../good/full/full-tunable-params-test.jsonc | 3 +- .../tests/tunable_groups_fixtures.py | 3 +- .../tests/tunables/tunable_definition_test.py | 72 +++++++++++++++++-- 4 files changed, 77 insertions(+), 7 deletions(-) diff --git a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json index f9cf32ed906..56289f4e73f 100644 --- a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json @@ -85,6 +85,9 @@ "items": { "type": "number" } + }, + "range_weight": { + "type": "number" } }, "required": ["type", "default", "range"], @@ -129,6 +132,9 @@ "items": { "type": "number" } + }, + "range_weight": { + "type": "number" } }, "required": ["type", "default", "range"], diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc index 1086af9fbaf..29b98fa76b2 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc @@ -10,7 +10,8 @@ "range": [1, 500], "meta": {"suffix": "MB"}, "special": [-1], - "weights": [0.1, 0.9] + "weights": [0.1], + "range_weight": 0.9 }, "float": { "description": "Float", diff --git a/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py b/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py index b646fd20f2e..6f2027373cf 100644 --- a/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py +++ b/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py @@ -54,7 +54,8 @@ "special": [-1, 0], // FLAML requires uniform weights, separately for // specials and switching between specials and range. - "weights": [0.25, 0.25, 0.5] + "weights": [0.25, 0.25], + "range_weight": 0.5 }, "kernel_sched_latency_ns": { "description": "Initial value for the scheduler period", diff --git a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py index 89eed479f5b..2bd3a5d1eb7 100644 --- a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py +++ b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py @@ -235,14 +235,16 @@ def test_numerical_weights(tunable_type: str) -> None: "type": "{tunable_type}", "range": [0, 100], "special": [0], - "weights": [0.1, 0.9], + "weights": [0.1], + "range_weight": 0.9, "default": 0 }} """ config = json.loads(json_config) tunable = Tunable(name='test', config=config) assert tunable.special == [0] - assert tunable.weights == [0.1, 0.9] + assert tunable.weights == [0.1] + assert tunable.range_weight == 0.9 @pytest.mark.parametrize("tunable_type", ["int", "float"]) @@ -274,14 +276,16 @@ def test_numerical_weights_non_normalized(tunable_type: str) -> None: "type": "{tunable_type}", "range": [0, 100], "special": [-1, 0], - "weights": [0, 10, 90], + "weights": [0, 10], + "range_weight": 90, "default": 0 }} """ config = json.loads(json_config) tunable = Tunable(name='test', config=config) assert tunable.special == [-1, 0] - assert tunable.weights == [0, 10, 90] # Zero weights are ok + assert tunable.weights == [0, 10] # Zero weights are ok + assert tunable.range_weight == 90 @pytest.mark.parametrize("tunable_type", ["int", "float"]) @@ -295,6 +299,63 @@ def test_numerical_weights_wrong_count(tunable_type: str) -> None: "range": [0, 100], "special": [0], "weights": [0.1, 0.1, 0.8], + "range_weight": 0.1, + "default": 0 + }} + """ + config = json.loads(json_config) + with pytest.raises(ValueError): + Tunable(name='test', config=config) + + +@pytest.mark.parametrize("tunable_type", ["int", "float"]) +def test_numerical_weights_no_range_weight(tunable_type: str) -> None: + """ + Try to instantiate a numerical tunable with weights but no range_weight. + """ + json_config = f""" + {{ + "type": "{tunable_type}", + "range": [0, 100], + "special": [0, -1], + "weights": [0.1, 0.2], + "default": 0 + }} + """ + config = json.loads(json_config) + with pytest.raises(ValueError): + Tunable(name='test', config=config) + + +@pytest.mark.parametrize("tunable_type", ["int", "float"]) +def test_numerical_range_weight_no_weights(tunable_type: str) -> None: + """ + Try to instantiate a numerical tunable with specials but no range_weight. + """ + json_config = f""" + {{ + "type": "{tunable_type}", + "range": [0, 100], + "special": [0, -1], + "range_weight": 0.3, + "default": 0 + }} + """ + config = json.loads(json_config) + with pytest.raises(ValueError): + Tunable(name='test', config=config) + + +@pytest.mark.parametrize("tunable_type", ["int", "float"]) +def test_numerical_range_weight_no_specials(tunable_type: str) -> None: + """ + Try to instantiate a numerical tunable with specials but no range_weight. + """ + json_config = f""" + {{ + "type": "{tunable_type}", + "range": [0, 100], + "range_weight": 0.3, "default": 0 }} """ @@ -313,7 +374,8 @@ def test_numerical_weights_wrong_values(tunable_type: str) -> None: "type": "{tunable_type}", "range": [0, 100], "special": [0], - "weights": [-1, 10], + "weights": [-1], + "range_weight": 10, "default": 0 }} """ From fdeaad03e7ecaf29a55f87d682f75c71773f595c Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 1 Feb 2024 15:52:03 -0800 Subject: [PATCH 11/21] minor fix for pylint --- mlos_bench/mlos_bench/optimizers/convert_configspace.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mlos_bench/mlos_bench/optimizers/convert_configspace.py b/mlos_bench/mlos_bench/optimizers/convert_configspace.py index db16fc3d44d..60a739070aa 100644 --- a/mlos_bench/mlos_bench/optimizers/convert_configspace.py +++ b/mlos_bench/mlos_bench/optimizers/convert_configspace.py @@ -96,9 +96,9 @@ def _tunable_to_configspace( # Compute the probabilities of switching between regular and special values. special_weights: Optional[List[float]] = None switch_weights = [0.5, 0.5] # FLAML requires uniform weights. - if tunable.weights: + if tunable.weights and tunable.range_weight is not None: special_weights = _normalize_weights(tunable.weights) - switch_weights = _normalize_weights([sum(tunable.weights), tunable.range_weight or 0]) + switch_weights = _normalize_weights([sum(tunable.weights), tunable.range_weight]) # Create three hyperparameters: one for regular values, # one for special values, and one to choose between the two. From 61143e39110cfebed6e30eb9710fdbfc6dfd9b16 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 1 Feb 2024 16:32:19 -0800 Subject: [PATCH 12/21] add quantization points and log scale to the tunables and pass this data all the way to ConfigSpace --- .../tunables/tunable-params-schema.json | 12 ++++++ .../optimizers/convert_configspace.py | 4 ++ .../good/full/full-tunable-params-test.jsonc | 8 +++- mlos_bench/mlos_bench/tunables/tunable.py | 39 +++++++++++++++++++ 4 files changed, 61 insertions(+), 2 deletions(-) diff --git a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json index 56289f4e73f..06dc0cafc4f 100644 --- a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json @@ -73,6 +73,12 @@ "minItems": 2, "maxItems": 2 }, + "quantization": { + "type": "number" + }, + "log": { + "type": "boolean" + }, "special": { "type": "array", "items": { @@ -120,6 +126,12 @@ "minItems": 2, "maxItems": 2 }, + "quantization": { + "type": "number" + }, + "log": { + "type": "boolean" + }, "special": { "type": "array", "items": { diff --git a/mlos_bench/mlos_bench/optimizers/convert_configspace.py b/mlos_bench/mlos_bench/optimizers/convert_configspace.py index 60a739070aa..55c7fa74e01 100644 --- a/mlos_bench/mlos_bench/optimizers/convert_configspace.py +++ b/mlos_bench/mlos_bench/optimizers/convert_configspace.py @@ -89,6 +89,8 @@ def _tunable_to_configspace( name=tunable.name, lower=tunable.range[0], upper=tunable.range[1], + log=tunable.is_log, + q=tunable.quantization, default_value=tunable.default if tunable.in_range(tunable.default) else None, meta=meta) }) @@ -108,6 +110,8 @@ def _tunable_to_configspace( name=tunable.name, lower=tunable.range[0], upper=tunable.range[1], + log=tunable.is_log, + q=tunable.quantization, default_value=tunable.default if tunable.in_range(tunable.default) else None, meta=meta ), diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc index 29b98fa76b2..3ae7124b96b 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc @@ -11,7 +11,9 @@ "meta": {"suffix": "MB"}, "special": [-1], "weights": [0.1], - "range_weight": 0.9 + "range_weight": 0.9, + "quantization": 50, + "log": true }, "float": { "description": "Float", @@ -19,7 +21,9 @@ "default": 10.1, "meta": {"scale": 1000, "prefix": "/proc/var/random/", "base": 2.71828}, "range": [1.1, 111.1], - "special": [-1.1] + "special": [-1.1], + "quantization": 10, + "log": false }, "cat": { "description": "Cat", diff --git a/mlos_bench/mlos_bench/tunables/tunable.py b/mlos_bench/mlos_bench/tunables/tunable.py index f01507cbaee..60288d8aa47 100644 --- a/mlos_bench/mlos_bench/tunables/tunable.py +++ b/mlos_bench/mlos_bench/tunables/tunable.py @@ -32,6 +32,8 @@ class TunableDict(TypedDict, total=False): default: TunableValue values: Optional[List[Optional[str]]] range: Optional[Union[Sequence[int], Sequence[float]]] + quantization: Optional[int] + log: Optional[bool] special: Optional[Union[List[int], List[float]]] weights: Optional[List[float]] range_weight: Optional[float] @@ -75,6 +77,8 @@ def __init__(self, name: str, config: TunableDict): self._values = [str(v) if v is not None else v for v in self._values] self._meta: Dict[str, Any] = config.get("meta", {}) self._range: Optional[Union[Tuple[int, int], Tuple[float, float]]] = None + self._quantization: Optional[int] = config.get("quantization") + self._log: Optional[bool] = config.get("log") config_range = config.get("range") if config_range is not None: assert len(config_range) == 2, f"Invalid range: {config_range}" @@ -102,6 +106,10 @@ def _sanity_check(self) -> None: raise ValueError(f"Categorical tunable cannot have special values: {self}") if self._range_weight is not None: raise ValueError(f"Categorical tunable cannot have range_weight: {self}") + if self._log is not None: + raise ValueError(f"Categorical tunable cannot have log parameter: {self}") + if self._quantization is not None: + raise ValueError(f"Categorical tunable cannot have quantization parameter: {self}") if self._weights: if len(self._weights) != len(self._values): raise ValueError(f"Must specify weights for all values: {self}") @@ -112,6 +120,8 @@ def _sanity_check(self) -> None: raise ValueError(f"Values must be None for the numerical type tunable {self}") if not self._range or len(self._range) != 2 or self._range[0] >= self._range[1]: raise ValueError(f"Invalid range for tunable {self}: {self._range}") + if self._quantization is not None and self._quantization <= 1: + raise ValueError(f"Number of quantization points is <= 1: {self}") if self._weights: if self._range_weight is None: raise ValueError(f"Must specify weight for the range: {self}") @@ -413,6 +423,9 @@ def range_weight(self) -> Optional[float]: weight : float Weight of the range or None. """ + assert self.is_numerical + assert self._special + assert self._weights return self._range_weight @property @@ -480,6 +493,32 @@ def range(self) -> Union[Tuple[int, int], Tuple[float, float]]: assert self._range is not None return self._range + @property + def quantization(self) -> Optional[int]: + """ + Get the number of quantization points, if specified. + + Returns + ------- + quantization : int + Number of quantization points or None. + """ + assert self.is_numerical + return self._quantization + + @property + def is_log(self) -> Optional[bool]: + """ + Check if numeric tunable is log scale. + + Returns + ------- + log : bool + True if numeric tunable is log scale, False if linear. + """ + assert self.is_numerical + return self._log + @property def categories(self) -> List[Optional[str]]: """ From fd072e98c953229d6f38314a9656d3a9aa3dc459 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 1 Feb 2024 16:33:25 -0800 Subject: [PATCH 13/21] add new test cases --- .../bad/invalid/tunable-params-float-bad-log.jsonc | 13 +++++++++++++ ...tunable-params-float-bad-quantization-type.jsonc | 13 +++++++++++++ .../bad/invalid/tunable-params-int-bad-log.jsonc | 13 +++++++++++++ .../tunable-params-int-bad-quantization.jsonc | 13 +++++++++++++ 4 files changed, 52 insertions(+) create mode 100644 mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-float-bad-log.jsonc create mode 100644 mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-float-bad-quantization-type.jsonc create mode 100644 mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-log.jsonc create mode 100644 mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-quantization.jsonc diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-float-bad-log.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-float-bad-log.jsonc new file mode 100644 index 00000000000..4a498ea5ffe --- /dev/null +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-float-bad-log.jsonc @@ -0,0 +1,13 @@ +{ + "covariant_group_name-1": { + "cost": 1, + "params": { + "float": { + "type": "float", + "default": 10, + "range": [0, 10], + "log": "yes" // <-- this is invalid + } + } + } +} diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-float-bad-quantization-type.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-float-bad-quantization-type.jsonc new file mode 100644 index 00000000000..0ed88844574 --- /dev/null +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-float-bad-quantization-type.jsonc @@ -0,0 +1,13 @@ +{ + "covariant_group_name-1": { + "cost": 1, + "params": { + "float": { + "type": "float", + "default": 10, + "range": [0, 10], + "quantization": true // <-- this is invalid + } + } + } +} diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-log.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-log.jsonc new file mode 100644 index 00000000000..c40a672a020 --- /dev/null +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-log.jsonc @@ -0,0 +1,13 @@ +{ + "covariant_group_name-1": { + "cost": 1, + "params": { + "int": { + "type": "int", + "default": 10, + "range": [1, 500], + "log": 1 // <-- this is invalid + } + } + } +} diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-quantization.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-quantization.jsonc new file mode 100644 index 00000000000..1b7af4ffcd0 --- /dev/null +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-quantization.jsonc @@ -0,0 +1,13 @@ +{ + "covariant_group_name-1": { + "cost": 1, + "params": { + "int": { + "type": "int", + "default": 10, + "range": [1, 500], + "quantization": "yes" // <-- this is invalid + } + } + } +} From 8500d6734a709e93eb7b7c1cf3ec890d46ce9369 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 1 Feb 2024 16:35:00 -0800 Subject: [PATCH 14/21] add log to some tunables in the unit tests --- mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py b/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py index 6f2027373cf..a368ed66fcd 100644 --- a/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py +++ b/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py @@ -55,13 +55,15 @@ // FLAML requires uniform weights, separately for // specials and switching between specials and range. "weights": [0.25, 0.25], - "range_weight": 0.5 + "range_weight": 0.5, + "log": false }, "kernel_sched_latency_ns": { "description": "Initial value for the scheduler period", "type": "int", "default": 2000000, - "range": [0, 1000000000] + "range": [0, 1000000000], + "log": false } } } From 4bcd84bab3b6de12fd26f955de0330bdbc8d8d60 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Thu, 1 Feb 2024 16:45:25 -0800 Subject: [PATCH 15/21] add more unit tests for new tunable's properties --- .../tests/tunables/tunable_definition_test.py | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py index 2bd3a5d1eb7..8914a89c984 100644 --- a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py +++ b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py @@ -247,6 +247,43 @@ def test_numerical_weights(tunable_type: str) -> None: assert tunable.range_weight == 0.9 +@pytest.mark.parametrize("tunable_type", ["int", "float"]) +def test_numerical_quantization(tunable_type: str) -> None: + """ + Instantiate a numerical tunable with quantization. + """ + json_config = f""" + {{ + "type": "{tunable_type}", + "range": [0, 100], + "quantization": 10, + "default": 0 + }} + """ + config = json.loads(json_config) + tunable = Tunable(name='test', config=config) + assert tunable.quantization == 10 + assert not tunable.is_log + + +@pytest.mark.parametrize("tunable_type", ["int", "float"]) +def test_numerical_log(tunable_type: str) -> None: + """ + Instantiate a numerical tunable with log scale. + """ + json_config = f""" + {{ + "type": "{tunable_type}", + "range": [0, 100], + "log": true, + "default": 0 + }} + """ + config = json.loads(json_config) + tunable = Tunable(name='test', config=config) + assert tunable.is_log + + @pytest.mark.parametrize("tunable_type", ["int", "float"]) def test_numerical_weights_no_specials(tunable_type: str) -> None: """ @@ -384,6 +421,24 @@ def test_numerical_weights_wrong_values(tunable_type: str) -> None: Tunable(name='test', config=config) +@pytest.mark.parametrize("tunable_type", ["int", "float"]) +def test_numerical_quantization_wrong(tunable_type: str) -> None: + """ + Instantiate a numerical tunable with invalid number of quantization points. + """ + json_config = f""" + {{ + "type": "{tunable_type}", + "range": [0, 100], + "quantization": 0, + "default": 0 + }} + """ + config = json.loads(json_config) + with pytest.raises(ValueError): + Tunable(name='test', config=config) + + def test_bad_type() -> None: """ Disallow bad types. From cb30b7dc4d8dc5f76674a3a5041d9d2b69e0f10a Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Fri, 2 Feb 2024 12:30:29 -0800 Subject: [PATCH 16/21] use special_weights instead of just weights for the numerical tunables --- .../schemas/tunables/tunable-params-schema.json | 10 +++++----- .../good/full/full-tunable-params-test.jsonc | 2 +- .../mlos_bench/tests/tunable_groups_fixtures.py | 2 +- .../tests/tunables/tunable_definition_test.py | 14 +++++++------- mlos_bench/mlos_bench/tunables/tunable.py | 5 ++++- 5 files changed, 18 insertions(+), 15 deletions(-) diff --git a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json index 56289f4e73f..d7a1ad69248 100644 --- a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json @@ -45,7 +45,7 @@ }, "required": ["type", "default", "values"], "not": { - "required": ["range", "special"] + "required": ["range", "special", "special_weights", "range_weight"] }, "$comment": "TODO: add check that default is in values", "unevaluatedProperties": false @@ -80,7 +80,7 @@ }, "uniqueItems": true }, - "weights": { + "special_weights": { "type": "array", "items": { "type": "number" @@ -92,7 +92,7 @@ }, "required": ["type", "default", "range"], "not": { - "required": ["values"] + "required": ["values", "weights"] }, "$comment": "TODO: add check that default is in range", "unevaluatedProperties": false @@ -127,7 +127,7 @@ }, "uniqueItems": true }, - "weights": { + "special_weights": { "type": "array", "items": { "type": "number" @@ -139,7 +139,7 @@ }, "required": ["type", "default", "range"], "not": { - "required": ["values"] + "required": ["values", "weights"] }, "$comment": "TODO: add check that default is in range", "unevaluatedProperties": false diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc index 29b98fa76b2..4c271fce15e 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc @@ -10,7 +10,7 @@ "range": [1, 500], "meta": {"suffix": "MB"}, "special": [-1], - "weights": [0.1], + "special_weights": [0.1], "range_weight": 0.9 }, "float": { diff --git a/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py b/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py index 6f2027373cf..dd6a65e041a 100644 --- a/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py +++ b/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py @@ -54,7 +54,7 @@ "special": [-1, 0], // FLAML requires uniform weights, separately for // specials and switching between specials and range. - "weights": [0.25, 0.25], + "special_weights": [0.25, 0.25], "range_weight": 0.5 }, "kernel_sched_latency_ns": { diff --git a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py index 2bd3a5d1eb7..3a3897a9e52 100644 --- a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py +++ b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py @@ -235,7 +235,7 @@ def test_numerical_weights(tunable_type: str) -> None: "type": "{tunable_type}", "range": [0, 100], "special": [0], - "weights": [0.1], + "special_weights": [0.1], "range_weight": 0.9, "default": 0 }} @@ -250,13 +250,13 @@ def test_numerical_weights(tunable_type: str) -> None: @pytest.mark.parametrize("tunable_type", ["int", "float"]) def test_numerical_weights_no_specials(tunable_type: str) -> None: """ - Raise an error if weights are specified but no special values. + Raise an error if special_weights are specified but no special values. """ json_config = f""" {{ "type": "{tunable_type}", "range": [0, 100], - "weights": [0.1, 0.9], + "special_weights": [0.1, 0.9], "default": 0 }} """ @@ -276,7 +276,7 @@ def test_numerical_weights_non_normalized(tunable_type: str) -> None: "type": "{tunable_type}", "range": [0, 100], "special": [-1, 0], - "weights": [0, 10], + "special_weights": [0, 10], "range_weight": 90, "default": 0 }} @@ -298,7 +298,7 @@ def test_numerical_weights_wrong_count(tunable_type: str) -> None: "type": "{tunable_type}", "range": [0, 100], "special": [0], - "weights": [0.1, 0.1, 0.8], + "special_weights": [0.1, 0.1, 0.8], "range_weight": 0.1, "default": 0 }} @@ -318,7 +318,7 @@ def test_numerical_weights_no_range_weight(tunable_type: str) -> None: "type": "{tunable_type}", "range": [0, 100], "special": [0, -1], - "weights": [0.1, 0.2], + "special_weights": [0.1, 0.2], "default": 0 }} """ @@ -374,7 +374,7 @@ def test_numerical_weights_wrong_values(tunable_type: str) -> None: "type": "{tunable_type}", "range": [0, 100], "special": [0], - "weights": [-1], + "special_weights": [-1], "range_weight": 10, "default": 0 }} diff --git a/mlos_bench/mlos_bench/tunables/tunable.py b/mlos_bench/mlos_bench/tunables/tunable.py index f01507cbaee..ef4e8df51d6 100644 --- a/mlos_bench/mlos_bench/tunables/tunable.py +++ b/mlos_bench/mlos_bench/tunables/tunable.py @@ -34,6 +34,7 @@ class TunableDict(TypedDict, total=False): range: Optional[Union[Sequence[int], Sequence[float]]] special: Optional[Union[List[int], List[float]]] weights: Optional[List[float]] + special_weights: Optional[List[float]] range_weight: Optional[float] meta: Dict[str, Any] @@ -81,7 +82,9 @@ def __init__(self, name: str, config: TunableDict): config_range = (config_range[0], config_range[1]) self._range = config_range self._special: Union[List[int], List[float]] = config.get("special") or [] - self._weights: List[float] = config.get("weights") or [] + self._weights: List[float] = ( + config.get("weights") or config.get("special_weights") or [] + ) self._range_weight: Optional[float] = config.get("range_weight") self._current_value = None self._sanity_check() From f219178958a272f9940dfac22908f8367961efd0 Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Fri, 2 Feb 2024 15:32:11 -0800 Subject: [PATCH 17/21] use values_weights instead of just weights for categoricals --- .../config/schemas/tunables/tunable-params-schema.json | 6 +++--- .../test-cases/good/full/full-tunable-params-test.jsonc | 2 +- mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py | 2 +- .../mlos_bench/tests/tunables/tunable_definition_test.py | 6 +++--- mlos_bench/mlos_bench/tunables/tunable.py | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json index d7a1ad69248..e00118fe762 100644 --- a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json @@ -36,7 +36,7 @@ "minItems": 1, "uniqueItems": true }, - "weights": { + "values_weights": { "type": "array", "items": { "type": "number" @@ -92,7 +92,7 @@ }, "required": ["type", "default", "range"], "not": { - "required": ["values", "weights"] + "required": ["values", "values_weights"] }, "$comment": "TODO: add check that default is in range", "unevaluatedProperties": false @@ -139,7 +139,7 @@ }, "required": ["type", "default", "range"], "not": { - "required": ["values", "weights"] + "required": ["values", "values_weights"] }, "$comment": "TODO: add check that default is in range", "unevaluatedProperties": false diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc index 4c271fce15e..733385bb969 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/good/full/full-tunable-params-test.jsonc @@ -77,7 +77,7 @@ "default": "yes", "meta": {"quote": true}, "values": ["yes", "no"], - "weights": [50, 50] + "values_weights": [50, 50] } } } diff --git a/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py b/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py index dd6a65e041a..920005b7258 100644 --- a/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py +++ b/mlos_bench/mlos_bench/tests/tunable_groups_fixtures.py @@ -39,7 +39,7 @@ "type": "categorical", "default": "halt", "values": ["halt", "mwait", "noidle"], - "weights": [33, 33, 33] // FLAML requires uniform weights + "values_weights": [33, 33, 33] // FLAML requires uniform weights } } }, diff --git a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py index 3a3897a9e52..d4817d3cfbe 100644 --- a/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py +++ b/mlos_bench/mlos_bench/tests/tunables/tunable_definition_test.py @@ -45,7 +45,7 @@ def test_categorical_weights() -> None: { "type": "categorical", "values": ["foo", "bar", "baz"], - "weights": [25, 25, 50], + "values_weights": [25, 25, 50], "default": "foo" } """ @@ -62,7 +62,7 @@ def test_categorical_weights_wrong_count() -> None: { "type": "categorical", "values": ["foo", "bar", "baz"], - "weights": [50, 50], + "values_weights": [50, 50], "default": "foo" } """ @@ -79,7 +79,7 @@ def test_categorical_weights_wrong_values() -> None: { "type": "categorical", "values": ["foo", "bar", "baz"], - "weights": [-1, 50, 50], + "values_weights": [-1, 50, 50], "default": "foo" } """ diff --git a/mlos_bench/mlos_bench/tunables/tunable.py b/mlos_bench/mlos_bench/tunables/tunable.py index ef4e8df51d6..10192a70c46 100644 --- a/mlos_bench/mlos_bench/tunables/tunable.py +++ b/mlos_bench/mlos_bench/tunables/tunable.py @@ -33,7 +33,7 @@ class TunableDict(TypedDict, total=False): values: Optional[List[Optional[str]]] range: Optional[Union[Sequence[int], Sequence[float]]] special: Optional[Union[List[int], List[float]]] - weights: Optional[List[float]] + values_weights: Optional[List[float]] special_weights: Optional[List[float]] range_weight: Optional[float] meta: Dict[str, Any] @@ -83,7 +83,7 @@ def __init__(self, name: str, config: TunableDict): self._range = config_range self._special: Union[List[int], List[float]] = config.get("special") or [] self._weights: List[float] = ( - config.get("weights") or config.get("special_weights") or [] + config.get("values_weights") or config.get("special_weights") or [] ) self._range_weight: Optional[float] = config.get("range_weight") self._current_value = None From 000ebb5c240f4f47511cebfc8458967402f7601a Mon Sep 17 00:00:00 2001 From: Sergiy Matusevych Date: Tue, 6 Feb 2024 09:12:17 -0800 Subject: [PATCH 18/21] Update mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json Co-authored-by: Brian Kroth --- .../config/schemas/tunables/tunable-params-schema.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json index 79a82c482f5..ea42927b017 100644 --- a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json @@ -74,7 +74,8 @@ "maxItems": 2 }, "quantization": { - "type": "number" + "type": "number", + "minimum": 1, }, "log": { "type": "boolean" From ebac572a651b810b27db813110d55202dfc7d7be Mon Sep 17 00:00:00 2001 From: Brian Kroth Date: Tue, 6 Feb 2024 17:30:32 +0000 Subject: [PATCH 19/21] Restructure numeric tunable params schema for more docs and reuse --- .../tunables/tunable-params-schema.json | 93 ++++++++++++------- .../tunable-params-int-bad-quantization.jsonc | 2 +- ...e-params-int-wrong-quantization-type.jsonc | 13 +++ 3 files changed, 72 insertions(+), 36 deletions(-) create mode 100644 mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-wrong-quantization-type.jsonc diff --git a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json index 79a82c482f5..0817cd6812b 100644 --- a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json @@ -5,14 +5,56 @@ "$defs": { "tunable_param_meta": { + "description": "A dictionary of metadata about the tunable parameter. Can be used by scripts for additional info when generating configs from the suggested values.", "type": "object", "additionalProperties": { + "$comment": "Only flat dictionaries are allowed.", "type": ["array", "string", "boolean", "null", "number"], "items": { "type": ["string", "boolean", "null", "number"] } } }, + "numeric_range": { + "description": "Two element array representing the lower and upper bounds of the range.", + "type": "array", + "$comment": "items type left unspecified here", + "minItems": 2, + "maxItems": 2 + }, + "quantization": { + "description": "The number of buckets to quantize the range into.\nSee Also:\nhttps://automl.github.io/ConfigSpace/main/api/hyperparameters.html#module-ConfigSpace.api.types.float,\nhttps://automl.github.io/ConfigSpace/main/api/hyperparameters.html#module-ConfigSpace.api.types.integer", + "$comment": "type left unspecified here", + "minimum": 1 + }, + "log_scale": { + "description": "Whether to use log instead of linear scale for the range search.", + "type": "boolean" + }, + "special_values": { + "description": "An array of values that may have special meaning for the target system and could be outside the usual search range.", + "type": "array", + "items": { + "description": "Some special values may have a different type than the numeric parameter (e.g., keyword \"AUTO\").", + "type": ["number", "string", "boolean", "null"] + }, + "minItems": 1, + "uniqueItems": true + }, + "weights": { + "description": "An array of weights to be associated with the values in order to influence their search priorities.", + "type": "array", + "items": { + "type": "number", + "minimum": 0 + }, + "minItems": 1 + }, + "range_weight": { + "description": "The weight to be associated with the range in order to influence its search priority relative to specials values.", + "type": "number", + "minimum": 0 + }, "tunable_param_categorical": { "type": "object", "properties": { @@ -37,10 +79,7 @@ "uniqueItems": true }, "values_weights": { - "type": "array", - "items": { - "type": "number" - } + "$ref": "#/$defs/weights" } }, "required": ["type", "default", "values"], @@ -66,34 +105,26 @@ "type": "integer" }, "range": { - "type": "array", + "$ref": "#/$defs/numeric_range", "items": { "type": "integer" - }, - "minItems": 2, - "maxItems": 2 + } }, "quantization": { - "type": "number" + "$ref": "#/$defs/quantization", + "type": "integer" }, "log": { - "type": "boolean" + "$ref": "#/$defs/log_scale" }, "special": { - "type": "array", - "items": { - "type": "integer" - }, - "uniqueItems": true + "$ref": "#/$defs/special_values" }, "special_weights": { - "type": "array", - "items": { - "type": "number" - } + "$ref": "#/$defs/weights" }, "range_weight": { - "type": "number" + "$ref": "#/$defs/range_weight" } }, "required": ["type", "default", "range"], @@ -119,34 +150,26 @@ "type": "number" }, "range": { - "type": "array", + "$ref": "#/$defs/numeric_range", "items": { "type": "number" - }, - "minItems": 2, - "maxItems": 2 + } }, "quantization": { + "$ref": "#/$defs/quantization", "type": "number" }, "log": { - "type": "boolean" + "$ref": "#/$defs/log_scale" }, "special": { - "type": "array", - "items": { - "type": "number" - }, - "uniqueItems": true + "$ref": "#/$defs/special_values" }, "special_weights": { - "type": "array", - "items": { - "type": "number" - } + "$ref": "#/$defs/weights" }, "range_weight": { - "type": "number" + "$ref": "#/$defs/range_weight" } }, "required": ["type", "default", "range"], diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-quantization.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-quantization.jsonc index 1b7af4ffcd0..a32208efd7b 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-quantization.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-quantization.jsonc @@ -6,7 +6,7 @@ "type": "int", "default": 10, "range": [1, 500], - "quantization": "yes" // <-- this is invalid + "quantization": 0 } } } diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-wrong-quantization-type.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-wrong-quantization-type.jsonc new file mode 100644 index 00000000000..1b7af4ffcd0 --- /dev/null +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-wrong-quantization-type.jsonc @@ -0,0 +1,13 @@ +{ + "covariant_group_name-1": { + "cost": 1, + "params": { + "int": { + "type": "int", + "default": 10, + "range": [1, 500], + "quantization": "yes" // <-- this is invalid + } + } + } +} From 26b9862013f274a4280d0fa253d7fe72454c3e1a Mon Sep 17 00:00:00 2001 From: Brian Kroth Date: Tue, 6 Feb 2024 17:32:25 +0000 Subject: [PATCH 20/21] more descriptions --- .../config/schemas/tunables/tunable-params-schema.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json index 0817cd6812b..44bc3138156 100644 --- a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json @@ -65,12 +65,14 @@ "$ref": "#/$defs/tunable_param_meta" }, "type": { + "description": "A categorical type tunable.", "const": "categorical" }, "default": { "type": ["string", "number", "boolean"] }, "values": { + "description": "List of values for this categorical type tunable", "type": "array", "items": { "type": ["string", "number", "boolean"] @@ -99,6 +101,7 @@ "$ref": "#/$defs/tunable_param_meta" }, "type": { + "description": "An integer type tunable.", "const": "int" }, "default": { @@ -144,6 +147,7 @@ "$ref": "#/$defs/tunable_param_meta" }, "type": { + "description": "A continuous numerical type tunable.", "const": "float" }, "default": { From 88d98c45521c64ee880ae1b790049540bd73e112 Mon Sep 17 00:00:00 2001 From: Brian Kroth Date: Tue, 6 Feb 2024 20:40:17 +0000 Subject: [PATCH 21/21] add float vs int handling of quantization --- .../tunables/tunable-params-schema.json | 9 ++++---- ...le-params-int-bad-float-quantization.jsonc | 13 +++++++++++ ...ble-params-int-bad-int-quantization.jsonc} | 2 +- mlos_bench/mlos_bench/tunables/tunable.py | 23 ++++++++++++++----- 4 files changed, 36 insertions(+), 11 deletions(-) create mode 100644 mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-float-quantization.jsonc rename mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/{tunable-params-int-bad-quantization.jsonc => tunable-params-int-bad-int-quantization.jsonc} (76%) diff --git a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json index 44bc3138156..daf0a6e06ce 100644 --- a/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json +++ b/mlos_bench/mlos_bench/config/schemas/tunables/tunable-params-schema.json @@ -24,8 +24,7 @@ }, "quantization": { "description": "The number of buckets to quantize the range into.\nSee Also:\nhttps://automl.github.io/ConfigSpace/main/api/hyperparameters.html#module-ConfigSpace.api.types.float,\nhttps://automl.github.io/ConfigSpace/main/api/hyperparameters.html#module-ConfigSpace.api.types.integer", - "$comment": "type left unspecified here", - "minimum": 1 + "$comment": "type left unspecified here" }, "log_scale": { "description": "Whether to use log instead of linear scale for the range search.", @@ -115,7 +114,8 @@ }, "quantization": { "$ref": "#/$defs/quantization", - "type": "integer" + "type": "integer", + "exclusiveMinimum": 1 }, "log": { "$ref": "#/$defs/log_scale" @@ -161,7 +161,8 @@ }, "quantization": { "$ref": "#/$defs/quantization", - "type": "number" + "type": "number", + "exclusiveMinimum": 0 }, "log": { "$ref": "#/$defs/log_scale" diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-float-quantization.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-float-quantization.jsonc new file mode 100644 index 00000000000..194682b8592 --- /dev/null +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-float-quantization.jsonc @@ -0,0 +1,13 @@ +{ + "covariant_group_name-1": { + "cost": 1, + "params": { + "float": { + "type": "float", + "default": 10, + "range": [1, 500], + "quantization": 0 // <-- should be greater than 0 + } + } + } +} diff --git a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-quantization.jsonc b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-int-quantization.jsonc similarity index 76% rename from mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-quantization.jsonc rename to mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-int-quantization.jsonc index a32208efd7b..199cf681ca3 100644 --- a/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-quantization.jsonc +++ b/mlos_bench/mlos_bench/tests/config/schemas/tunable-params/test-cases/bad/invalid/tunable-params-int-bad-int-quantization.jsonc @@ -6,7 +6,7 @@ "type": "int", "default": 10, "range": [1, 500], - "quantization": 0 + "quantization": 1 // <-- should be greater than 1 } } } diff --git a/mlos_bench/mlos_bench/tunables/tunable.py b/mlos_bench/mlos_bench/tunables/tunable.py index de98e8f5505..9df34b4e091 100644 --- a/mlos_bench/mlos_bench/tunables/tunable.py +++ b/mlos_bench/mlos_bench/tunables/tunable.py @@ -32,7 +32,7 @@ class TunableDict(TypedDict, total=False): default: TunableValue values: Optional[List[Optional[str]]] range: Optional[Union[Sequence[int], Sequence[float]]] - quantization: Optional[int] + quantization: Optional[Union[int, float]] log: Optional[bool] special: Optional[Union[List[int], List[float]]] values_weights: Optional[List[float]] @@ -78,7 +78,7 @@ def __init__(self, name: str, config: TunableDict): self._values = [str(v) if v is not None else v for v in self._values] self._meta: Dict[str, Any] = config.get("meta", {}) self._range: Optional[Union[Tuple[int, int], Tuple[float, float]]] = None - self._quantization: Optional[int] = config.get("quantization") + self._quantization: Optional[Union[int, float]] = config.get("quantization") self._log: Optional[bool] = config.get("log") config_range = config.get("range") if config_range is not None: @@ -95,6 +95,7 @@ def __init__(self, name: str, config: TunableDict): self.value = self._default def _sanity_check(self) -> None: + # pylint: disable=too-complex,too-many-branches """ Check if the status of the Tunable is valid, and throw ValueError if it is not. """ @@ -123,8 +124,17 @@ def _sanity_check(self) -> None: raise ValueError(f"Values must be None for the numerical type tunable {self}") if not self._range or len(self._range) != 2 or self._range[0] >= self._range[1]: raise ValueError(f"Invalid range for tunable {self}: {self._range}") - if self._quantization is not None and self._quantization <= 1: - raise ValueError(f"Number of quantization points is <= 1: {self}") + if self._quantization is not None: + if self.dtype == int: + if not isinstance(self._quantization, int): + raise ValueError(f"Quantization of a int param should be an int: {self}") + if self._quantization <= 1: + raise ValueError(f"Number of quantization points is <= 1: {self}") + if self.dtype == float: + if not isinstance(self._quantization, (float, int)): + raise ValueError(f"Quantization of a float param should be a float or int: {self}") + if self._quantization <= 0: + raise ValueError(f"Number of quantization points is <= 0: {self}") if self._weights: if self._range_weight is None: raise ValueError(f"Must specify weight for the range: {self}") @@ -148,6 +158,7 @@ def __repr__(self) -> str: string : str A human-readable version of the Tunable. """ + # TODO? Add weights, specials, quantization, distribution? if self.is_categorical: return f"{self._name}[{self._type}]({self._values}:{self._default})={self._current_value}" return f"{self._name}[{self._type}]({self._range}:{self._default})={self._current_value}" @@ -497,13 +508,13 @@ def range(self) -> Union[Tuple[int, int], Tuple[float, float]]: return self._range @property - def quantization(self) -> Optional[int]: + def quantization(self) -> Optional[Union[int, float]]: """ Get the number of quantization points, if specified. Returns ------- - quantization : int + quantization : int, float, None Number of quantization points or None. """ assert self.is_numerical