diff --git a/docs/benchmarks/text_to_image/reproducibility/scc24.md b/docs/benchmarks/text_to_image/reproducibility/scc24.md index 4f66097a6..7f7617678 100644 --- a/docs/benchmarks/text_to_image/reproducibility/scc24.md +++ b/docs/benchmarks/text_to_image/reproducibility/scc24.md @@ -48,10 +48,45 @@ or supporting multi-node execution) useful for the community and [MLCommons](htt === "MLCommons-Python" ## MLPerf Reference Implementation in Python -{{ mlperf_inference_implementation_readme (4, "sdxl", "reference", extra_variation_tags=",_short", scenarios=["Offline"],categories=["Edge"], setup_tips=False) }} +{{ mlperf_inference_implementation_readme (4, "sdxl", "reference", extra_variation_tags=",_short,_scc24-base", devices=["ROCm", "CUDA"],scenarios=["Offline"],categories=["Edge"], setup_tips=False) }} === "Nvidia" ## Nvidia MLPerf Implementation {{ mlperf_inference_implementation_readme (4, "sdxl", "nvidia", extra_variation_tags=",_short", scenarios=["Offline"],categories=["Edge"], setup_tips=False, implementation_tips=False) }} +## Submission Commands +### Generate actual submission tree + +```bash + cm run script --tags=generate,inference,submission \ + --clean \ + --preprocess_submission=yes \ + --run-checker \ + --submitter= \ + --tar=yes \ + --env.CM_TAR_OUTFILE=submission.tar.gz \ + --division=open \ + --category=datacenter \ + --env.CM_DETERMINE_MEMORY_CONFIGURATION=yes \ + --quiet +``` + +* Use `--hw_name="My system name"` to give a meaningful system name. Examples can be seen [here](https://github.com/mlcommons/inference_results_v3.0/tree/main/open/cTuning/systems) + +* Use `--hw_notes_extra` option to add additional notes like `--hw_notes_extra="Result taken by NAME" ` + + diff --git a/main.py b/main.py index 4e9a93d9a..be6ae49ee 100755 --- a/main.py +++ b/main.py @@ -417,8 +417,10 @@ def mlperf_inference_run_command(spaces, model, implementation, framework, categ if docker: docker_cmd_suffix = f" \\\n{pre_space} --docker --quiet" - docker_cmd_suffix += f" \\\n{pre_space} --test_query_count={test_query_count} {extra_docker_input_string} {extra_input_string}" - + if "scc24" not in extra_variation_tags: + docker_cmd_suffix += f" \\\n{pre_space} --test_query_count={test_query_count} {extra_docker_input_string} {extra_input_string}" + if extra_docker_input_string != "" or extra_input_string != "": + docker_cmd_suffix += f" \\\n{pre_space} {extra_docker_input_string} {extra_input_string}" if "bert" in model.lower() and framework == "deepsparse": docker_cmd_suffix += f"\\\n{pre_space} --env.CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=zoo:nlp/question_answering/mobilebert-none/pytorch/huggingface/squad/base_quant-none" if "llama2-70b" in model.lower(): @@ -454,7 +456,7 @@ def mlperf_inference_run_command(spaces, model, implementation, framework, categ else: cmd_suffix = f"\\\n{pre_space} --quiet {extra_input_string}" - if execution_mode == "test" and test_query_count > 0: + if execution_mode == "test" and test_query_count > 0 and "scc24" not in extra_variation_tags: cmd_suffix += f" \\\n {pre_space} --test_query_count={test_query_count}" if "bert" in model.lower() and framework == "deepsparse":