diff --git a/docs/benchmarks/language/reproducibility/indyscc24-bert.md b/docs/benchmarks/language/reproducibility/indyscc24-bert.md index 86f4f285d..463d1a299 100644 --- a/docs/benchmarks/language/reproducibility/indyscc24-bert.md +++ b/docs/benchmarks/language/reproducibility/indyscc24-bert.md @@ -37,7 +37,6 @@ All the needed files are automatically pushed to the GitHub repository if you ma ```bash cm run script --tags=generate,inference,submission \ --clean \ - --preprocess_submission=yes \ --run-checker \ --tar=yes \ --env.CM_TAR_OUTFILE=submission.tar.gz \ diff --git a/docs/benchmarks/text_to_image/reproducibility/scc24.md b/docs/benchmarks/text_to_image/reproducibility/scc24.md index 6fe119b53..82a231b4d 100644 --- a/docs/benchmarks/text_to_image/reproducibility/scc24.md +++ b/docs/benchmarks/text_to_image/reproducibility/scc24.md @@ -59,10 +59,10 @@ or supporting multi-node execution) useful for the community and [MLCommons](htt ### Generate actual submission tree + ```bash cm run script --tags=generate,inference,submission \ --clean \ - --preprocess_submission=yes \ --run-checker \ --tar=yes \ --env.CM_TAR_OUTFILE=submission.tar.gz \ diff --git a/docs/img/submission-flow.png b/docs/img/submission-flow.png new file mode 100644 index 000000000..33a09a535 Binary files /dev/null and b/docs/img/submission-flow.png differ diff --git a/docs/submission/index.md b/docs/submission/index.md index adcd3df53..c99802420 100644 --- a/docs/submission/index.md +++ b/docs/submission/index.md @@ -2,7 +2,16 @@ hide: - toc --- -[![Streamline your MLPerf results using CM Framework](https://img.youtube.com/vi/eI1Hoecc3ho/0.jpg)](https://youtu.be/eI1Hoecc3ho) + +
+ +
+ +Figure: MLPerf Inference Submission Generation Flow
+ + + +Click [here](https://youtu.be/eI1Hoecc3ho) to view the recording of the workshop: Streamlining your MLPerf Inference results using CM. === "CM based benchmark" If you have followed the `cm run` commands under the individual model pages in the [benchmarks](../index.md) directory, all the valid results will get aggregated to the `cm cache` folder. The following command could be used to browse the structure of inference results folder generated by CM. diff --git a/main.py b/main.py index a0a0ec99e..c8c64b8c3 100755 --- a/main.py +++ b/main.py @@ -122,6 +122,7 @@ def mlperf_inference_implementation_readme( if not categories: if model.lower() == "bert-99.9": categories = ["Datacenter"] + elif ( "dlrm" in model.lower() or "llama2" in model.lower() @@ -148,7 +149,7 @@ def mlperf_inference_implementation_readme( scenarios = [ scenario for scenario in scenarios if scenario in fixed_scenarios] - content += f'{pre_space}=== "{category.lower()}"\n\n' + content += f"{pre_space}=== \"{category.lower()}\"\n\n" cur_space = pre_space + " " scenarios_string = ", ".join(scenarios) @@ -173,6 +174,7 @@ def mlperf_inference_implementation_readme( # minimum system requirements content += get_min_system_requirements( + cur_space2, model, implementation, device ) @@ -235,6 +237,11 @@ def mlperf_inference_implementation_readme( extra_docker_input_string, ) + common_info = get_common_info( + spaces + 16, + implementation + ) + if ( execution_env == "Native" ): # Native implementation steps through virtual environment @@ -242,6 +249,8 @@ def mlperf_inference_implementation_readme( content += get_venv_command(spaces + 16) content += f"{cur_space3}####### Performance Estimation for Offline Scenario\n" + content += common_info + content += setup_run_cmd.replace( "--docker ", "") @@ -256,6 +265,9 @@ def mlperf_inference_implementation_readme( device, setup_tips, ) + + content += common_info + content += docker_info content += setup_run_cmd @@ -373,7 +385,8 @@ def mlperf_inference_implementation_readme( extra_input_string, ) content += run_cmd - content += run_suffix + + content += run_suffix readme_prefix = get_readme_prefix( spaces, model, implementation, extra_variation_tags @@ -473,6 +486,24 @@ def get_venv_command(spaces): {pre_space}export CM_SCRIPT_EXTRA_CMD=\"--adr.python.name=mlperf\" {pre_space}```\n""" + # contains run command information which is common to both docker and + # native runs + def get_common_info(spaces, implementation): + info = "" + pre_space = "" + for i in range(1, spaces): + pre_space = pre_space + " " + pre_space += " " + # pre_space = " " + info += f"\n{pre_space}!!! tip\n\n" + info += f"{pre_space} - Batch size could be adjusted using `--batch_size=#`, where `#` is the desired batch size. This option works only if the implementation in use is supporting the given batch size.\n\n" + if implementation.lower() == "reference": + info += f"{pre_space} - Add `--adr.mlperf-implementation.tags=_branch.master,_repo.