From 9af78caa0fb8371523ebfabc2a33af5f511a80e1 Mon Sep 17 00:00:00 2001 From: Xiaoyu Date: Mon, 21 Oct 2024 06:30:16 +0000 Subject: [PATCH 1/9] Save output model to output_dir --- olive/engine/engine.py | 24 ++++++---- olive/engine/footprint.py | 36 +++++++++++++- olive/engine/packaging/packaging_generator.py | 34 ++------------ test/unit_test/engine/test_engine.py | 47 +++++++++++++++++-- 4 files changed, 96 insertions(+), 45 deletions(-) diff --git a/olive/engine/engine.py b/olive/engine/engine.py index f7d865461..c0d2921ab 100644 --- a/olive/engine/engine.py +++ b/olive/engine/engine.py @@ -15,7 +15,7 @@ from olive.common.config_utils import validate_config from olive.common.constants import DEFAULT_WORKFLOW_ID, LOCAL_INPUT_MODEL_ID from olive.engine.config import FAILED_CONFIG, INVALID_CONFIG, PRUNED_CONFIGS -from olive.engine.footprint import Footprint, FootprintNodeMetric +from olive.engine.footprint import Footprint, FootprintNode, FootprintNodeMetric, get_best_candidate_node from olive.engine.packaging.packaging_generator import generate_output_artifacts from olive.evaluator.metric import Metric from olive.evaluator.metric_result import MetricResult, joint_metric_key @@ -229,12 +229,12 @@ def run( output_dir/output_footprints.json: footprint of the output models A. One pass flow: - output_dir/output_model/metrics.json: evaluation results of the output model - output_dir/output_model/model_config.json: output model configuration - output_dir/output_model/...: output model files + output_dir/metrics.json: evaluation results of the output model + output_dir/model_config.json: output model configuration + output_dir/...: output model files B. Multiple pass flows: - output_dir/output_model/{pass_flow}/...: Same as A but for each pass flow + output_dir/{pass_flow}/...: Same as A but for each pass flow 2. Multiple accelerator specs output_dir/{acclerator_spec}/...: Same as 1 but for each accelerator spec @@ -288,6 +288,14 @@ def run( else: logger.debug("No packaging config provided, skip packaging artifacts") + # TODO(team): refactor output structure + # Do not change condition order. For no search, values of outputs are MetricResult + # Consolidate the output structure for search and no search mode + if outputs and self.passes and not next(iter(outputs.values())).check_empty_nodes(): + best_node: FootprintNode = get_best_candidate_node(outputs, self.footprints) + self.cache.save_model(model_id=best_node.model_id, output_dir=output_dir, overwrite=True) + logger.info("Saved output model to %s", outputs) + return outputs def run_accelerator( @@ -396,8 +404,7 @@ def run_no_search( pass_name = pass_item["name"] raise ValueError(f"Pass {pass_name} has search space but search strategy is None") - # output models will be saved in output_dir/output_model - output_model_dir = Path(output_dir) / "output_model" + output_model_dir = Path(output_dir) output_model_ids = [] for pass_flow in self.pass_flows: @@ -431,9 +438,6 @@ def run_no_search( json.dump(signal.to_json(), f, indent=4) logger.info("Saved evaluation results of output model to %s", results_path) - self.cache.save_model(model_id=model_ids[-1], output_dir=flow_output_dir, overwrite=True) - logger.info("Saved output model to %s", flow_output_dir) - output_model_ids.append(model_ids[-1]) output_footprints = self.footprints[accelerator_spec].create_footprints_by_model_ids(output_model_ids) diff --git a/olive/engine/footprint.py b/olive/engine/footprint.py index 46c577531..258170cfc 100644 --- a/olive/engine/footprint.py +++ b/olive/engine/footprint.py @@ -6,11 +6,15 @@ import logging from collections import OrderedDict, defaultdict from copy import deepcopy -from typing import DefaultDict, Dict, List, NamedTuple, Optional +from typing import TYPE_CHECKING, DefaultDict, Dict, List, NamedTuple, Optional from olive.common.config_utils import ConfigBase, config_json_dumps, config_json_loads from olive.evaluator.metric_result import MetricResult +if TYPE_CHECKING: + from olive.hardware import AcceleratorSpec + + logger = logging.getLogger(__name__) @@ -159,6 +163,9 @@ def trace_back_run_history(self, model_id) -> Dict[str, Dict]: model_id = self.nodes[model_id].parent_model_id return rls + def check_empty_nodes(self): + return self.nodes is None or len(self.nodes) == 0 + def to_df(self): # to pandas.DataFrame raise NotImplementedError @@ -422,3 +429,30 @@ def _plot_pareto_frontier(self, ranks=None, save_path=None, is_show=True, save_f if is_show: fig.show() + + +def get_best_candidate_node( + pf_footprints: Dict["AcceleratorSpec", Footprint], footprints: Dict["AcceleratorSpec", Footprint] +): + objective_dict = next(iter(pf_footprints.values())).objective_dict + top_nodes = [] + for accelerator_spec, pf_footprint in pf_footprints.items(): + footprint = footprints[accelerator_spec] + if pf_footprint.nodes and footprint.nodes: + top_nodes.append(next(iter(pf_footprint.get_top_ranked_nodes(1)))) + return next( + iter( + sorted( + top_nodes, + key=lambda x: tuple( + ( + x.metrics.value[metric].value + if x.metrics.cmp_direction[metric] == 1 + else -x.metrics.value[metric].value + ) + for metric in objective_dict + ), + reverse=True, + ) + ) + ) diff --git a/olive/engine/packaging/packaging_generator.py b/olive/engine/packaging/packaging_generator.py index b1c95e084..beaf046d5 100644 --- a/olive/engine/packaging/packaging_generator.py +++ b/olive/engine/packaging/packaging_generator.py @@ -18,6 +18,7 @@ from olive.common.constants import OS from olive.common.utils import retry_func, run_subprocess +from olive.engine.footprint import get_best_candidate_node from olive.engine.packaging.packaging_config import ( AzureMLDeploymentPackagingConfig, DockerfilePackagingConfig, @@ -68,7 +69,7 @@ def _package_dockerfile( config: DockerfilePackagingConfig = packaging_config.config logger.info("Packaging output models to Dockerfile") base_image = config.base_image - best_node = _get_best_candidate_node(pf_footprints, footprints) + best_node = get_best_candidate_node(pf_footprints, footprints) docker_context_path = "docker_content" content_path = output_dir / docker_context_path @@ -133,7 +134,7 @@ def _package_azureml_deployment( try: # Get best model from footprint - best_node = _get_best_candidate_node(pf_footprints, footprints) + best_node = get_best_candidate_node(pf_footprints, footprints) with tempfile.TemporaryDirectory() as temp_dir: tempdir = Path(temp_dir) @@ -303,33 +304,6 @@ def _package_azureml_deployment( raise -def _get_best_candidate_node( - pf_footprints: Dict["AcceleratorSpec", "Footprint"], footprints: Dict["AcceleratorSpec", "Footprint"] -): - objective_dict = next(iter(pf_footprints.values())).objective_dict - top_nodes = [] - for accelerator_spec, pf_footprint in pf_footprints.items(): - footprint = footprints[accelerator_spec] - if pf_footprint.nodes and footprint.nodes: - top_nodes.append(next(iter(pf_footprint.get_top_ranked_nodes(1)))) - return next( - iter( - sorted( - top_nodes, - key=lambda x: tuple( - ( - x.metrics.value[metric].value - if x.metrics.cmp_direction[metric] == 1 - else -x.metrics.value[metric].value - ) - for metric in objective_dict - ), - reverse=True, - ) - ) - ) - - def _is_generative_model(config: Dict[str, Any]) -> bool: model_attributes = config.get("model_attributes") or {} return model_attributes.get("is_generative", False) @@ -353,7 +327,7 @@ def _package_candidate_models( tempdir = Path(temp_dir) if packaging_type == PackagingType.Zipfile: - best_node: FootprintNode = _get_best_candidate_node(pf_footprints, footprints) + best_node: FootprintNode = get_best_candidate_node(pf_footprints, footprints) is_generative = _is_generative_model(best_node.model_config["config"]) if packaging_config.include_runtime_packages: diff --git a/test/unit_test/engine/test_engine.py b/test/unit_test/engine/test_engine.py index 1cc3a817a..19863e92b 100644 --- a/test/unit_test/engine/test_engine.py +++ b/test/unit_test/engine/test_engine.py @@ -307,7 +307,7 @@ def test_run_no_search(self, mock_local_system_init, tmp_path): # output model to output_dir output_dir = tmp_path / "output_dir" expected_metrics = MetricResult.parse_obj(metric_result_dict) - expected_saved_model_config = get_onnx_model_config(model_path=output_dir / "output_model" / "model.onnx") + expected_saved_model_config = get_onnx_model_config(model_path=output_dir / "model.onnx") # execute footprint = engine.run( @@ -321,17 +321,56 @@ def test_run_no_search(self, mock_local_system_init, tmp_path): assert output_node.model_config == onnx_model_config assert expected_metrics == output_node.metrics.value - output_model_dir = output_dir / "output_model" - model_json_path = output_model_dir / "model_config.json" + model_json_path = output_dir / "model_config.json" assert model_json_path.is_file() with model_json_path.open() as f: assert json.load(f) == expected_saved_model_config.to_json() - result_json_path = output_model_dir / "metrics.json" + result_json_path = output_dir / "metrics.json" assert result_json_path.is_file() with result_json_path.open() as f: assert json.load(f) == expected_metrics.__root__ + @pytest.mark.parametrize( + "search_strategy", + [ + { + "execution_order": "joint", + "search_algorithm": "random", + }, + None, + ], + ) + def test_run_output_model(self, search_strategy, tmp_path): + # setup + model_config = get_pytorch_model_config() + metric = get_accuracy_metric(AccuracySubType.ACCURACY_SCORE) + evaluator_config = OliveEvaluatorConfig(metrics=[metric]) + options = { + "cache_config": { + "cache_dir": tmp_path, + "clean_cache": True, + "clean_evaluation_cache": True, + }, + "search_strategy": search_strategy, + "evaluator": evaluator_config, + } + engine = Engine(**options) + _, p_config = get_onnxconversion_pass(ignore_pass_config=False, target_opset=13) + engine.register(OnnxConversion, config=p_config, disable_search=True) + # output model to output_dir + output_dir = tmp_path / "output_dir" + + # execute + engine.run( + model_config, + [DEFAULT_CPU_ACCELERATOR], + output_dir=output_dir, + ) + + # assert + assert Path(output_dir / "model.onnx").is_file() + def test_pass_exception(self, caplog, tmpdir): # Need explicitly set the propagate to allow the message to be logged into caplog # setup From 7a25bdc4f958da5274a55b865cce30fdd81399a0 Mon Sep 17 00:00:00 2001 From: Xiaoyu Date: Mon, 21 Oct 2024 07:15:15 +0000 Subject: [PATCH 2/9] fix whisper example test --- examples/whisper/test_transcription.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/whisper/test_transcription.py b/examples/whisper/test_transcription.py index fb910543e..e23f00f12 100644 --- a/examples/whisper/test_transcription.py +++ b/examples/whisper/test_transcription.py @@ -79,7 +79,7 @@ def main(raw_args=None): ep = config["systems"]["local_system"]["accelerators"][0]["execution_providers"][0] # load output model json - output_model_json_path = Path(config["output_dir"]) / "output_model" / "model_config.json" + output_model_json_path = Path(config["output_dir"]) / "model_config.json" with output_model_json_path.open() as f: output_model_json = json.load(f) From 25a255ac0eb87fca6cf9eaf3569f42fb85d48f46 Mon Sep 17 00:00:00 2001 From: Xiaoyu Date: Tue, 22 Oct 2024 22:16:27 +0000 Subject: [PATCH 3/9] Fix tests --- olive/cache.py | 2 +- olive/cli/base.py | 2 +- olive/engine/footprint.py | 16 ++++++++++++++++ test/unit_test/cli/test_cli.py | 2 +- 4 files changed, 19 insertions(+), 3 deletions(-) diff --git a/olive/cache.py b/olive/cache.py index 36ce71198..c3473b9c1 100644 --- a/olive/cache.py +++ b/olive/cache.py @@ -423,7 +423,7 @@ def save_model( additional_files = model_attributes.get("additional_files", []) for i, src_filepath in enumerate(additional_files): - dst_filepath = output_dir / "model" / Path(src_filepath).name + dst_filepath = output_dir / Path(src_filepath).name additional_files[i] = str(dst_filepath) if not dst_filepath.exists(): diff --git a/olive/cli/base.py b/olive/cli/base.py index 146929b6a..702fe14f5 100644 --- a/olive/cli/base.py +++ b/olive/cli/base.py @@ -409,7 +409,7 @@ def save_output_model(config: Dict, output_model_dir: Union[str, Path]): This assumes a single accelerator workflow. """ - run_output_path = Path(config["output_dir"]) / "output_model" + run_output_path = Path(config["output_dir"]) if not any(run_output_path.rglob("model_config.json")): # there must be an run_output_path with at least one model_config.json print("Command failed. Please set the log_level to 1 for more detailed logs.") diff --git a/olive/engine/footprint.py b/olive/engine/footprint.py index 258170cfc..0d462855a 100644 --- a/olive/engine/footprint.py +++ b/olive/engine/footprint.py @@ -434,6 +434,22 @@ def _plot_pareto_frontier(self, ranks=None, save_path=None, is_show=True, save_f def get_best_candidate_node( pf_footprints: Dict["AcceleratorSpec", Footprint], footprints: Dict["AcceleratorSpec", Footprint] ): + """Select the best candidate node from the pareto frontier footprints. + + This function evaluates nodes from the given pareto frontier footprints and selects the top-ranked node + based on specified objective metrics. It compares nodes from two dictionaries of footprints and + ranks them according to their metrics. + + Args: + pf_footprints (Dict["AcceleratorSpec", Footprint]): A dictionary mapping accelerator specifications + to their corresponding pareto frontier footprints, which contain nodes and their metrics. + footprints (Dict["AcceleratorSpec", Footprint"]): A dictionary mapping accelerator specifications + to their corresponding footprints, which contain nodes and their metrics. + + Returns: + Node: The top-ranked node based on the specified objective metrics. + + """ objective_dict = next(iter(pf_footprints.values())).objective_dict top_nodes = [] for accelerator_spec, pf_footprint in pf_footprints.items(): diff --git a/test/unit_test/cli/test_cli.py b/test/unit_test/cli/test_cli.py index 14fb4ad3d..213ef058c 100644 --- a/test/unit_test/cli/test_cli.py +++ b/test/unit_test/cli/test_cli.py @@ -113,7 +113,7 @@ def test_finetune_command(_, mock_tempdir, mock_run, tmp_path): # setup mock_tempdir.return_value = tmpdir.resolve() - workflow_output_dir = tmpdir / "output_model" + workflow_output_dir = tmpdir workflow_output_dir.mkdir(parents=True) dummy_output = workflow_output_dir / "model_config.json" with open(dummy_output, "w") as f: From ac96d5804248512a7daf946222505ca059bfb904 Mon Sep 17 00:00:00 2001 From: Xiaoyu Date: Tue, 22 Oct 2024 22:57:44 +0000 Subject: [PATCH 4/9] update test --- test/unit_test/cli/test_cli.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/unit_test/cli/test_cli.py b/test/unit_test/cli/test_cli.py index 213ef058c..22fb31a0a 100644 --- a/test/unit_test/cli/test_cli.py +++ b/test/unit_test/cli/test_cli.py @@ -183,7 +183,7 @@ def test_capture_onnx_command(_, mock_tempdir, mock_run, use_model_builder, tmp_ # setup mock_tempdir.return_value = tmpdir.resolve() - workflow_output_dir = tmpdir / "output_model" + workflow_output_dir = tmpdir workflow_output_dir.mkdir(parents=True) dummy_output = workflow_output_dir / "model_config.json" with open(dummy_output, "w") as f: @@ -256,7 +256,7 @@ def test_quantize_command(mock_repo_exists, mock_tempdir, mock_run, algorithm_na mock_tempdir.return_value = tmpdir.resolve() mock_run.return_value = {} - workflow_output_dir = tmpdir / "output_model" / algorithm_name + workflow_output_dir = tmpdir / algorithm_name workflow_output_dir.mkdir(parents=True) model_config_path = workflow_output_dir / "model_config.json" with model_config_path.open("w") as f: From a5627bb07f5d4d5709bf16e2b8288723d7ea5137 Mon Sep 17 00:00:00 2001 From: Xiaoyu Date: Wed, 23 Oct 2024 23:32:29 +0000 Subject: [PATCH 5/9] fix nit --- test/unit_test/cli/test_cli.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/unit_test/cli/test_cli.py b/test/unit_test/cli/test_cli.py index 22fb31a0a..bebffd528 100644 --- a/test/unit_test/cli/test_cli.py +++ b/test/unit_test/cli/test_cli.py @@ -114,7 +114,6 @@ def test_finetune_command(_, mock_tempdir, mock_run, tmp_path): # setup mock_tempdir.return_value = tmpdir.resolve() workflow_output_dir = tmpdir - workflow_output_dir.mkdir(parents=True) dummy_output = workflow_output_dir / "model_config.json" with open(dummy_output, "w") as f: json.dump({"dummy": "output"}, f) @@ -184,7 +183,6 @@ def test_capture_onnx_command(_, mock_tempdir, mock_run, use_model_builder, tmp_ # setup mock_tempdir.return_value = tmpdir.resolve() workflow_output_dir = tmpdir - workflow_output_dir.mkdir(parents=True) dummy_output = workflow_output_dir / "model_config.json" with open(dummy_output, "w") as f: json.dump({"config": {"inference_settings": {"dummy-key": "dummy-value"}}}, f) From 9007c08a5e1243f74a2a2bd076ec75270ddac93e Mon Sep 17 00:00:00 2001 From: Xiaoyu Date: Mon, 28 Oct 2024 21:56:42 +0000 Subject: [PATCH 6/9] fix nit --- test/unit_test/engine/test_engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit_test/engine/test_engine.py b/test/unit_test/engine/test_engine.py index cd57a7185..b228a913f 100644 --- a/test/unit_test/engine/test_engine.py +++ b/test/unit_test/engine/test_engine.py @@ -335,7 +335,7 @@ def test_run_output_model(self, search_strategy, tmp_path): } engine = Engine(**options) _, p_config = get_onnxconversion_pass(ignore_pass_config=False, target_opset=13) - engine.register(OnnxConversion, config=p_config, disable_search=True) + engine.register(OnnxConversion, config=p_config) # output model to output_dir output_dir = tmp_path / "output_dir" From 0c8d07e085911994fb7d775b8ed7027588859819 Mon Sep 17 00:00:00 2001 From: Xiaoyu Date: Sat, 16 Nov 2024 02:11:47 +0000 Subject: [PATCH 7/9] fix test --- test/unit_test/cli/test_cli.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/test/unit_test/cli/test_cli.py b/test/unit_test/cli/test_cli.py index 00dffab55..c3f8153aa 100644 --- a/test/unit_test/cli/test_cli.py +++ b/test/unit_test/cli/test_cli.py @@ -282,9 +282,7 @@ def test_quantize_command(mock_repo_exists, mock_tempdir, mock_run, algorithm_na mock_tempdir.return_value = tmpdir.resolve() mock_run.return_value = {} - workflow_output_dir = tmpdir / algorithm_name - workflow_output_dir.mkdir(parents=True) - model_config_path = workflow_output_dir / "model_config.json" + model_config_path = tmpdir / "model_config.json" with model_config_path.open("w") as f: f.write("{}") @@ -307,7 +305,7 @@ def test_quantize_command(mock_repo_exists, mock_tempdir, mock_run, algorithm_na config = mock_run.call_args[0][0] assert config["input_model"]["model_path"] == "dummy_model" - assert {el.name for el in output_dir.iterdir()} == {algorithm_name} + assert {el.name for el in output_dir.iterdir()} == {"model_config.json"} # TODO(anyone): Add tests for ManageAMLComputeCommand From be8af113f02bca72fcf6b01d41553ff274c6c462 Mon Sep 17 00:00:00 2001 From: Xiaoyu Zhang Date: Tue, 19 Nov 2024 14:11:51 -0800 Subject: [PATCH 8/9] Update folder structure commment --- olive/engine/engine.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/olive/engine/engine.py b/olive/engine/engine.py index 1f9c60c4b..d43d568e8 100644 --- a/olive/engine/engine.py +++ b/olive/engine/engine.py @@ -208,9 +208,11 @@ def run( output_dir/pareto_frontier_footprints.json: pareto frontier footprints output_dir/run_history.txt: run history output_dir/input_model_metrics.json: evaluation results of the input model + output_dir/...: output model files 2. Multiple accelerator specs: output_dir/{acclerator_spec}/...: Same as 1 but for each accelerator spec + output_dir/...: output model files No search mode: 1. One accelerator spec @@ -218,17 +220,15 @@ def run( output_dir/run_history.txt: run history output_dir/input_model_metrics.json: evaluation results of the input model output_dir/output_footprints.json: footprint of the output models + output_dir/...: output model files A. One pass flow: output_dir/metrics.json: evaluation results of the output model - output_dir/model_config.json: output model configuration output_dir/...: output model files - B. Multiple pass flows: - output_dir/{pass_flow}/...: Same as A but for each pass flow - 2. Multiple accelerator specs output_dir/{acclerator_spec}/...: Same as 1 but for each accelerator spec + output_dir/...: output model files """ if not accelerator_specs: From cd59774d1414124f09d0993aa6ba067b1e4cec4e Mon Sep 17 00:00:00 2001 From: Xiaoyu Zhang Date: Wed, 20 Nov 2024 23:16:39 -0800 Subject: [PATCH 9/9] Update cli saving output model logic --- olive/cli/base.py | 54 +++++++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/olive/cli/base.py b/olive/cli/base.py index 01346a0a7..4e2ee3c6f 100644 --- a/olive/cli/base.py +++ b/olive/cli/base.py @@ -14,7 +14,14 @@ from olive.cli.constants import CONDA_CONFIG from olive.common.user_module_loader import UserModuleLoader -from olive.common.utils import hardlink_copy_dir, hash_dict, hf_repo_exists, set_nested_dict_value, unescaped_str +from olive.common.utils import ( + hardlink_copy_dir, + hardlink_copy_file, + hash_dict, + hf_repo_exists, + set_nested_dict_value, + unescaped_str, +) from olive.hardware.accelerator import AcceleratorSpec from olive.hardware.constants import DEVICE_TO_EXECUTION_PROVIDERS from olive.resource_path import OLIVE_RESOURCE_ANNOTATIONS, find_all_resources @@ -417,35 +424,32 @@ def save_output_model(config: Dict, output_model_dir: Union[str, Path]): This assumes a single accelerator workflow. """ run_output_path = Path(config["output_dir"]) - if not any(run_output_path.rglob("model_config.json")): - # there must be an run_output_path with at least one model_config.json + model_config_path = run_output_path / "model_config.json" + if not model_config_path.exists(): print("Command failed. Please set the log_level to 1 for more detailed logs.") return output_model_dir = Path(output_model_dir).resolve() - # hardlink/copy the output model to the output_model_dir - hardlink_copy_dir(run_output_path, output_model_dir) - - # need to update the local path in the model_config.json - # should the path be relative or absolute? relative makes it easy to move the output - # around but the path needs to be updated when the model config is used - for model_config_file in output_model_dir.rglob("model_config.json"): - with model_config_file.open("r") as f: - model_config = json.load(f) - - all_resources = find_all_resources(model_config) - for resource_key, resource_path in all_resources.items(): - resource_path_str = resource_path.get_path() - if resource_path_str.startswith(str(run_output_path)): - set_nested_dict_value( - model_config, - resource_key, - resource_path_str.replace(str(run_output_path), str(output_model_dir)), - ) - - with model_config_file.open("w") as f: - json.dump(model_config, f, indent=4) + with model_config_path.open("r") as f: + model_config = json.load(f) + + all_resources = find_all_resources(model_config) + for resource_key, resource_path in all_resources.items(): + src_path = Path(resource_path.get_path()).resolve() + if src_path.is_dir(): + hardlink_copy_dir(src_path, output_model_dir / src_path) + else: + hardlink_copy_file(src_path, output_model_dir) + + set_nested_dict_value( + model_config, + resource_key, + str(output_model_dir / src_path.name), + ) + output_model_config_path = output_model_dir / "model_config.json" + with output_model_config_path.open("w") as f: + json.dump(model_config, f, indent=4) print(f"Command succeeded. Output model saved to {output_model_dir}")