Skip to content

Commit

Permalink
Add basic support for repeating a trial config. (#642)
Browse files Browse the repository at this point in the history
Adds very rudimentary support for repeating configs across multiple
trials. We fully expect to expand on more advanced support for this once
we add a proper scheduler (#463).

Additional tests forth coming with #633 and related PRs.
  • Loading branch information
bpkroth authored Jan 19, 2024
1 parent b531400 commit e2a0e10
Show file tree
Hide file tree
Showing 5 changed files with 39 additions and 14 deletions.
7 changes: 7 additions & 0 deletions mlos_bench/mlos_bench/config/schemas/cli/cli-schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,13 @@
"$ref": "#/$defs/json_config_path"
},

"trial_config_repeat_count": {
"description": "Number of times to repeat a config.",
"type": "integer",
"minimum": 1,
"examples": [3, 5]
},

"storage": {
"description": "Path to the json config describing the storage backend to use.",
"$ref": "#/$defs/json_config_path"
Expand Down
6 changes: 6 additions & 0 deletions mlos_bench/mlos_bench/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,8 @@ def __init__(self, description: str, long_text: str = "", argv: Optional[List[st
else:
config = {}

self.trial_config_repeat_count = args.trial_config_repeat_count or config.get("trial_config_repeat_count", 1)

log_level = args.log_level or config.get("log_level", _LOG_LEVEL)
try:
log_level = int(log_level)
Expand Down Expand Up @@ -195,6 +197,10 @@ def _parse_args(parser: argparse.ArgumentParser, argv: Optional[List[str]]) -> T
help='Path to the optimizer configuration file. If omitted, run' +
' a single trial with default (or specified in --tunable_values).')

parser.add_argument(
'--trial_config_repeat_count', '--trial-config-repeat-count', required=False, type=int, default=1,
help='Number of times to repeat each config. Default is 1 trial per config, though more may be advised.')

parser.add_argument(
'--storage', required=False,
help='Path to the storage configuration file.' +
Expand Down
34 changes: 22 additions & 12 deletions mlos_bench/mlos_bench/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ def _main() -> None:
storage=launcher.storage,
root_env_config=launcher.root_env_config,
global_config=launcher.global_config,
do_teardown=launcher.teardown
do_teardown=launcher.teardown,
trial_config_repeat_count=launcher.trial_config_repeat_count,
)

_LOG.info("Final result: %s", result)
Expand All @@ -48,7 +49,9 @@ def _optimize(*,
storage: Storage,
root_env_config: str,
global_config: Dict[str, Any],
do_teardown: bool) -> Tuple[Optional[float], Optional[TunableGroups]]:
do_teardown: bool,
trial_config_repeat_count: int = 1,
) -> Tuple[Optional[float], Optional[TunableGroups]]:
"""
Main optimization loop.
Expand All @@ -66,8 +69,13 @@ def _optimize(*,
Global configuration parameters.
do_teardown : bool
If True, teardown the environment at the end of the experiment
trial_config_repeat_count : int
How many trials to repeat for the same configuration.
"""
# pylint: disable=too-many-locals
if trial_config_repeat_count <= 0:
raise ValueError(f"Invalid trial_config_repeat_count: {trial_config_repeat_count}")

if _LOG.isEnabledFor(logging.INFO):
_LOG.info("Root Environment:\n%s", env.pprint())

Expand Down Expand Up @@ -118,16 +126,18 @@ def _optimize(*,
config_id, json.dumps(tunable_values, indent=2))
config_id = -1

trial = exp.new_trial(tunables, config={
# Add some additional metadata to track for the trial such as the
# optimizer config used.
# TODO: Improve for supporting multi-objective
# (e.g., opt_target_1, opt_target_2, ... and opt_direction_1, opt_direction_2, ...)
"optimizer": opt.name,
"opt_target": opt.target,
"opt_direction": opt.direction,
})
_run(env_context, opt_context, trial, global_config)
for repeat_i in range(1, trial_config_repeat_count + 1):
trial = exp.new_trial(tunables, config={
# Add some additional metadata to track for the trial such as the
# optimizer config used.
# TODO: Improve for supporting multi-objective
# (e.g., opt_target_1, opt_target_2, ... and opt_direction_1, opt_direction_2, ...)
"optimizer": opt.name,
"opt_target": opt.target,
"opt_direction": opt.direction,
"repeat_i": repeat_i,
})
_run(env_context, opt_context, trial, global_config)

if do_teardown:
env_context.teardown()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
"optimizer": "optimizers/one_shot_opt.jsonc",
"storage": "storage/sqlite.jsonc",

"trial_config_repeat_count": 3,

"random_init": true,
"random_seed": 42,

Expand Down
4 changes: 2 additions & 2 deletions mlos_bench/mlos_bench/tests/launcher_run_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def test_launch_main_app_opt(root_path: str, local_exec_service: LocalExecServic
"""
_launch_main_app(
root_path, local_exec_service,
"--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --max_iterations 3",
"--config mlos_bench/mlos_bench/tests/config/cli/mock-opt.jsonc --trial_config_repeat_count 3 --max_iterations 3",
[
# Iteration 1: Expect first value to be the baseline
f"^{_RE_DATE} mlos_core_optimizer\\.py:\\d+ " +
Expand All @@ -106,6 +106,6 @@ def test_launch_main_app_opt(root_path: str, local_exec_service: LocalExecServic
r"register DEBUG Score: \d+\.\d+ Dataframe:\s*$",
# Final result: baseline is the optimum for the mock environment
f"^{_RE_DATE} run\\.py:\\d+ " +
r"_optimize INFO Env: Mock environment best score: 64\.88\d+\s*$",
r"_optimize INFO Env: Mock environment best score: 64\.53\d+\s*$",
]
)

0 comments on commit e2a0e10

Please sign in to comment.