diff --git a/.github/workflows/test-cm-tutorial-tvm-pip.yml b/.github/workflows/test-cm-tutorial-tvm-pip.yml index 1919583f5..89a6d1d66 100644 --- a/.github/workflows/test-cm-tutorial-tvm-pip.yml +++ b/.github/workflows/test-cm-tutorial-tvm-pip.yml @@ -5,7 +5,7 @@ name: CM tutorial tvm pip install on: pull_request: - branches: [ "main", "test" ] + branches: [ "main", "test", "mlperf-inference" ] paths: - '.github/workflows/test-cm-tutorial-tvm-pip.yml' - '**' diff --git a/.github/workflows/test-mlperf-inference-abtf-poc.yml b/.github/workflows/test-mlperf-inference-abtf-poc.yml index 60e1f0d4a..bb5ab71b2 100644 --- a/.github/workflows/test-mlperf-inference-abtf-poc.yml +++ b/.github/workflows/test-mlperf-inference-abtf-poc.yml @@ -37,7 +37,7 @@ jobs: cm pull repo mlcommons@cm4abtf --branch=poc - name: Test MLPerf Inference ABTF POC using ${{ matrix.backend }} on docker run: | - cm run script --tags=run-abtf,inference,_poc-demo --test_query_count=5 --adr.compiler.tags=gcc --quiet -v + cm run script --tags=run-abtf,inference,_poc-demo --test_query_count=5 --adr.compiler.tags=gcc --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 --quiet -v build2: runs-on: ${{ matrix.os }} @@ -62,7 +62,7 @@ jobs: cm pull repo mlcommons@cm4abtf --branch=poc - name: Test MLPerf Inference ABTF POC using ${{ matrix.backend }} on ${{ matrix.os }} run: | - cm run script --tags=run-abtf,inference,_poc-demo --adr.compiler.tags=gcc --quiet -v + cm run script --tags=run-abtf,inference,_poc-demo --adr.compiler.tags=gcc --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 --quiet -v build3: runs-on: ${{ matrix.os }} @@ -89,4 +89,4 @@ jobs: cm pull repo mlcommons@cm4abtf --branch=poc - name: Test MLPerf Inference ABTF POC using ${{ matrix.backend }} on ${{ matrix.os }} run: | - cm run script --tags=run-abtf,inference,_poc-demo --quiet --env.CM_MLPERF_LOADGEN_BUILD_FROM_SRC=off -v + cm run script --tags=run-abtf,inference,_poc-demo --quiet --env.CM_MLPERF_LOADGEN_BUILD_FROM_SRC=off --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 -v diff --git a/automation/script/module.py b/automation/script/module.py index 534bd3009..fbb400f89 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -2340,7 +2340,9 @@ def search(self, i): # Print filtered paths if console if console: for script in r['list']: - logging.info(script.path) +# This should not be logging since the output can be consumed by other external tools and scripts +# logging.info(script.path) + print (script.path) # Finalize output r['script_tags'] = script_tags @@ -2355,7 +2357,7 @@ def test(self, i): Test automation (TBD) Args: - (CM input dict): + (CM input dict): (out) (str): if 'con', output to console @@ -2641,8 +2643,7 @@ def add(self, i): if k in ii: del ii[k] if artifact_repo != None: - artifact = ii.get('artifact','') - ii['artifact'] = utils.assemble_cm_object2(artifact_repo) + ':' + artifact + ii['artifact'] = utils.assemble_cm_object2(artifact_repo) + ':' + utils.assemble_cm_object2(artifact_repo) r_obj=self.cmind.access(ii) if r_obj['return']>0: return r_obj diff --git a/script/app-mlperf-inference-amd/customize.py b/script/app-mlperf-inference-amd/customize.py index 26a781a45..87819e2e2 100644 --- a/script/app-mlperf-inference-amd/customize.py +++ b/script/app-mlperf-inference-amd/customize.py @@ -13,6 +13,9 @@ def preprocess(i): if env.get('CM_MLPERF_SKIP_RUN', '') == "yes": return {'return':0} + env['CM_MLPERF_AMD_SCRIPT_PATH'] = env['CM_TMP_CURRENT_SCRIPT_PATH'] + env['CM_MLPERF_AMD_CODE_PATH'] = os.path.join(env['CM_MLPERF_INFERENCE_IMPLEMENTATION_REPO'], "closed", "AMD") + if 'CM_MODEL' not in env: return {'return': 1, 'error': 'Please select a variation specifying the model to run'} if 'CM_MLPERF_BACKEND' not in env: @@ -22,6 +25,7 @@ def preprocess(i): if "llama2" in env['CM_MODEL']: env['CM_RUN_DIR'] = i['run_script_input']['path'] + env['CM_MLPERF_AMD_LLAMA2_CODE_PATH'] = os.path.join(env['CM_MLPERF_AMD_CODE_PATH'], "llama2-70b-99.9/VllmFp8") env['CM_RUN_CMD'] = "bash run-llama2.sh " else: return {'return':1, 'error':'Model {} not supported'.format(env['CM_MODEL'])} diff --git a/script/app-mlperf-inference-amd/run-llama2.sh b/script/app-mlperf-inference-amd/run-llama2.sh new file mode 100644 index 000000000..4692bfcc4 --- /dev/null +++ b/script/app-mlperf-inference-amd/run-llama2.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +set -xeu + +N_SAMPLES=${N_SAMPLES:-24576} #24576 #3072 #2457 #6 +TP=1 +DP=${DP:-8} +WD=${WD:-0} +SORTING=${SORTING:-descending} #ascending #descending #lexicographic #skip + +export HIP_FORCE_DEV_KERNARG=1 +export VLLM_USE_TRITON_FLASH_ATTN=0 +export VLLM_FP8_PADDING=1 +export VLLM_FP8_ACT_PADDING=1 +export VLLM_FP8_WEIGHT_PADDING=1 +export VLLM_FP8_REDUCE_CONV=1 +export VLLM_SCHED_PREFILL_KVC_FREEPCT=31.0 + +export HARNESS_DISABLE_VLLM_LOGS=1 +export VLLM_LOGGING_LEVEL=ERROR + +MODEL_PATH=${CM_ML_MODEL_LLAMA2_FILE_WITH_PATH:-/data/llm/llama2-70b-chat/} +DATASET_PATH=${CM_DATASET_OPENORCA_PREPROCESSED_PATH:-/data/open_orca/open_orca_gpt4_tokenized_llama.sampled_24576.pkl.gz} +QUANTIZED_WEIGHTS_PATH=${CM_LLAMA2_FINAL_SAFE_TENSORS_PATH:-quantized/quark_share/modelzoo/llama2_70b_wfp8_afp8_ofp8_nomerge/json-safetensors/llama.safetensors} +QUANTIZATION_PARAM_PATH=${QUANTIZATION_PARAM_PATH:-/app/kv_cache_scales.json} + +MLPERF_CONF="${CM_MLPERF_CONF:-/app/mlperf_inference/mlperf.conf}" +USER_CONF="${CM_MLPERF_USER_CONF:-/lab-mlperf-inference/code/llama2-70b-99.9/mlperf_config_VllmFp8/user.conf}" + +SUBMISSION=${SUBMISSION:-0} + +LOG_DIR=${CM_MLPERF_OUTPUT_DIR} + +cp $USER_CONF ${LOG_DIR}/user.conf + +cmd ="${CM_PYTHON_BIN_WITH_PATH} ${CM_MLPERF_AMD_LLAMA2_CODE_PATH}/mainVllmFp8_Offline.py \ + --scenario ${CM_MLPERF_LOADGEN_SCENARIO \ + --output-log-dir ${LOG_DIR} \ + --model-path $MODEL_PATH \ + --mlperf-conf $MLPERF_CONF \ + --user-conf $USER_CONF \ + --total-sample-count $N_SAMPLES \ + --dataset-path $DATASET_PATH \ + --dtype float16 \ + --backend vllm \ + --device cuda:0 \ + --kv-cache-dtype fp8 \ + -tp ${TP} \ + -dp ${DP} \ + --quantization fp8 \ + --quantized-weights-path ${QUANTIZED_WEIGHTS_PATH} \ + --quantization-param-path ${QUANTIZATION_PARAM_PATH} \ + --warmup-duration ${WD} \ + --sorting ${SORTING} \ + --enforce-eager True \ + --gpu-memory-utilization 0.99" diff --git a/script/app-mlperf-inference-mlcommons-python/_cm.yaml b/script/app-mlperf-inference-mlcommons-python/_cm.yaml index cd55d9726..c87f8a50d 100644 --- a/script/app-mlperf-inference-mlcommons-python/_cm.yaml +++ b/script/app-mlperf-inference-mlcommons-python/_cm.yaml @@ -780,6 +780,9 @@ variations: CM_MLPERF_BACKEND_VERSION: <<>> deps: - tags: get,generic-python-lib,_onnx + - tags: get,generic-python-lib,_numpy + version_max: "1.26.4" + version_max_usable: "1.26.4" - tags: get,tvm names: - tvm diff --git a/script/app-mlperf-inference-nvidia/_cm.yaml b/script/app-mlperf-inference-nvidia/_cm.yaml index 21f1515f0..21abbb3b2 100644 --- a/script/app-mlperf-inference-nvidia/_cm.yaml +++ b/script/app-mlperf-inference-nvidia/_cm.yaml @@ -316,9 +316,16 @@ post_deps: # Variations to customize dependencies variations: # MLPerf inference version - v4.0: + v4.1: group: version default: true + env: + CM_MLPERF_INFERENCE_CODE_VERSION: "v4.1" + adr: + pytorch: + tags: _for-nvidia-mlperf-inference-v4.1 + v4.0: + group: version env: CM_MLPERF_INFERENCE_CODE_VERSION: "v4.0" CM_MLPERF_GPTJ_MODEL_FP8_PATH_SUFFIX: GPTJ-FP8-quantized @@ -455,6 +462,14 @@ variations: - scipy version: 1.10.1 + sdxl,v4.1: + deps: + - tags: get,generic-python-lib,_package.torchrec + version: 0.4.0 + - tags: get,generic-python-lib,_package.torchmetrics + version: 1.0.3 + - tags: get,generic-python-lib,_package.typeguard + bert_: deps: - tags: get,generic-python-lib,_transformers diff --git a/script/app-mlperf-inference/_cm.yaml b/script/app-mlperf-inference/_cm.yaml index 6d44c6066..9dc43aa62 100644 --- a/script/app-mlperf-inference/_cm.yaml +++ b/script/app-mlperf-inference/_cm.yaml @@ -307,7 +307,7 @@ variations: nvidia-original,r4.1_default: docker: - base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v4.0-cuda12.2-cudnn8.9-x86_64-ubuntu20.04-public + base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v4.1-cuda12.4-pytorch24.04-ubuntu22.04-x86_64-release nvidia-original,r4.1_default,gptj_: docker: @@ -349,6 +349,8 @@ variations: os_version: "20.04" deps: - tags: get,mlperf,inference,nvidia,scratch,space + names: + - mlperf-inference-nvidia-scratch-space - tags: get,nvidia-docker skip_if_env: CM_SKIP_GET_NVIDIA_DOCKER: @@ -1114,6 +1116,9 @@ variations: all_gpus: 'yes' deps: - tags: get,nvidia-docker + skip_if_env: + CM_SKIP_GET_NVIDIA_DOCKER: + - yes group: device env: @@ -1415,19 +1420,19 @@ variations: reproducibility add_deps_recursive: nvidia-inference-common-code: - version: r4.0 + version: r4.1 tags: _go nvidia-inference-server: - version: r4.0 + version: r4.1 tags: _go intel-harness: - tags: _v4.0 + tags: _v4.1 default_env: CM_SKIP_SYS_UTILS: 'yes' CM_REGENERATE_MEASURE_FILES: 'yes' env: CM_ENV_NVMITTEN_DOCKER_WHEEL_PATH: '/opt/nvmitten-0.1.3b0-cp38-cp38-linux_x86_64.whl' - + CM_MLPERF_INFERENCE_VERSION: '4.1' invalid_variation_combinations: - @@ -1523,7 +1528,11 @@ docker: use_host_user_id: True deps: - tags: get,mlperf,inference,results,dir,local + names: + - get-mlperf-inference-results-dir - tags: get,mlperf,inference,submission,dir,local + names: + - get-mlperf-inference-submission-dir pre_run_cmds: #- cm pull repo && cm run script --tags=get,git,repo,_repo.https://github.com/GATEOverflow/inference_results_v4.0.git --update - cm pull repo @@ -1536,6 +1545,7 @@ docker: - "${{ LLAMA2_CHECKPOINT_PATH }}:${{ LLAMA2_CHECKPOINT_PATH }}" - "${{ DLRM_DATA_PATH }}:/home/mlperf_inf_dlrmv2" - "${{ CM_NVIDIA_LLAMA_DATASET_FILE_PATH }}:${{ CM_NVIDIA_LLAMA_DATASET_FILE_PATH }}" + - "${{ SDXL_CHECKPOINT_PATH }}:${{ SDXL_CHECKPOINT_PATH }}" skip_run_cmd: 'no' shm_size: '32gb' interactive: True diff --git a/script/app-mlperf-inference/customize.py b/script/app-mlperf-inference/customize.py index f478c6d67..c1fe19682 100644 --- a/script/app-mlperf-inference/customize.py +++ b/script/app-mlperf-inference/customize.py @@ -206,7 +206,7 @@ def postprocess(i): cm_sut_info['device'] = env['CM_MLPERF_DEVICE'] cm_sut_info['framework'] = state['CM_SUT_META']['framework'] cm_sut_info['run_config'] = env['CM_MLPERF_INFERENCE_SUT_RUN_CONFIG'] - with open(os.path.join(result_sut_folder_path,"cm_sut_info.json"), "w") as fp: + with open(os.path.join(result_sut_folder_path,"cm-sut-info.json"), "w") as fp: json.dump(cm_sut_info, fp, indent=2) system_meta = state['CM_SUT_META'] diff --git a/script/benchmark-program/customize.py b/script/benchmark-program/customize.py index 49f77ad70..af8c73a32 100644 --- a/script/benchmark-program/customize.py +++ b/script/benchmark-program/customize.py @@ -81,7 +81,7 @@ def preprocess(i): # generate the post run cmd - for killing the process that records runtime system infos post_run_cmd = "" if env.get('CM_PROFILE_NVIDIA_POWER', '') == "on": - post_run_cmd += "echo 'killing process \${cmd_pid}' && kill -TERM \${cmd_pid}" + post_run_cmd += "echo killing process \$cmd_pid && kill -TERM \${cmd_pid}" print(f"Post run command for killing the process that measures the runtime system information: {post_run_cmd}") env['CM_POST_RUN_CMD'] = post_run_cmd diff --git a/script/benchmark-program/run.sh b/script/benchmark-program/run.sh old mode 100644 new mode 100755 index e8946905b..cb4eb9204 --- a/script/benchmark-program/run.sh +++ b/script/benchmark-program/run.sh @@ -1,4 +1,20 @@ #!/bin/bash + +# function to safely exit the background process +safe_exit() { + if [[ "${CM_POST_RUN_CMD}" != "" ]]; then + eval ${CM_POST_RUN_CMD} + if [ $? -eq 0 ]; then + exit 0 + else + exit $? + fi + fi +} + +# trap signals to redirect the execution flow to safe_exit +trap safe_exit SIGINT SIGTERM + if [[ ${CM_MLPERF_POWER} == "yes" && ${CM_MLPERF_LOADGEN_MODE} == "performance" ]]; then exit 0 fi @@ -45,18 +61,10 @@ eval ${CM_PRE_RUN_CMD} if [[ "${CM_RUN_CMD0}" != "" ]]; then eval ${CM_RUN_CMD0} exitstatus=$? - if [ -e exitstatus ]; then - exitstatus=$( cat exitstatus ) - fi - test $exitstatus -eq 0 || $exitstatus else echo "${CM_RUN_CMD}" eval ${CM_RUN_CMD} exitstatus=$? - if [ -e exitstatus ]; then - exitstatus=$( cat exitstatus ) - fi - test $exitstatus -eq 0 || $exitstatus fi eval ${CM_POST_RUN_CMD} diff --git a/script/build-mlperf-inference-server-nvidia/_cm.yaml b/script/build-mlperf-inference-server-nvidia/_cm.yaml index 460a86394..74ce30e2f 100644 --- a/script/build-mlperf-inference-server-nvidia/_cm.yaml +++ b/script/build-mlperf-inference-server-nvidia/_cm.yaml @@ -218,7 +218,6 @@ versions: version: r3.0 nvidia-scratch-space: tags: _version.3_0 - r3.1: add_deps_recursive: nvidia-inference-common-code: @@ -237,6 +236,26 @@ versions: - tags: install,nccl,libs,_cuda r4.0: + add_deps_recursive: + nvidia-inference-common-code: + version: r4.0 + nvidia-scratch-space: + tags: _version.4_0 + default_env: + BUILD_TRTLLM: 1 + deps: + - tags: get,generic,sys-util,_nlohmann-json3-dev + - tags: get,generic,sys-util,_git-lfs + - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v4.0 + names: + - pytorch + - torch + - tags: install,torchvision,from.src,_for-nvidia-mlperf-inference-v4.0 + names: + - pytorchvision + - torchvision + + r4.1-dev: add_deps_recursive: nvidia-inference-common-code: version: r4.0 @@ -255,6 +274,15 @@ versions: names: - pytorchvision - torchvision + + r4.1: + add_deps_recursive: + nvidia-inference-common-code: + version: r4.1 + nvidia-scratch-space: + tags: _version.4_1 + default_env: + BUILD_TRTLLM: 1 docker: skip_run_cmd: 'no' diff --git a/script/build-mlperf-inference-server-nvidia/customize.py b/script/build-mlperf-inference-server-nvidia/customize.py index e540beb0b..47338aed9 100644 --- a/script/build-mlperf-inference-server-nvidia/customize.py +++ b/script/build-mlperf-inference-server-nvidia/customize.py @@ -21,7 +21,8 @@ def preprocess(i): if env.get('CM_GCC_VERSION', '') != '': gcc_major_version = env['CM_GCC_VERSION'].split(".")[0] if int(gcc_major_version) > 10: - cxxflags.append("-Wno-error=range-loop-construct") + if env.get('CM_MLPERF_INFERENCE_VERSION','') != "4.1": + cxxflags.append("-Wno-error=range-loop-construct") if env.get('CM_MLPERF_DEVICE','') == "inferentia": env['USE_INFERENTIA'] = "1" diff --git a/script/download-and-extract/_cm.json b/script/download-and-extract/_cm.json index 143a5c9bf..b6e4dfb0e 100644 --- a/script/download-and-extract/_cm.json +++ b/script/download-and-extract/_cm.json @@ -76,7 +76,7 @@ "uid": "c67e81a4ce2649f5", "variations": { "cmutil": { - "add_deps_recursive": { + "add_deps": { "download-script": { "tags": "_cmutil" } @@ -85,7 +85,7 @@ "group": "download-tool" }, "curl": { - "add_deps_recursive": { + "add_deps": { "download-script": { "tags": "_wget" } @@ -98,7 +98,7 @@ } }, "rclone": { - "add_deps_recursive": { + "add_deps": { "download-script": { "tags": "_rclone" } @@ -106,7 +106,7 @@ "group": "download-tool" }, "gdown": { - "add_deps_recursive": { + "add_deps": { "download-script": { "tags": "_gdown" } @@ -154,7 +154,7 @@ } }, "wget": { - "add_deps_recursive": { + "add_deps": { "download-script": { "tags": "_wget" } diff --git a/script/download-file/customize.py b/script/download-file/customize.py index 4c84f6048..31116579b 100644 --- a/script/download-file/customize.py +++ b/script/download-file/customize.py @@ -137,12 +137,12 @@ def preprocess(i): env['CM_DOWNLOAD_DOWNLOADED_PATH'] = filepath x='*' if os_info['platform'] == 'windows' else '' - x_c='s' if os_info['platform'] == 'darwin' else '' + x_c=' -s ' if os_info['platform'] == 'darwin_off' else '' #not using this option for now #verify checksum if file already present if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '': env['CM_DOWNLOAD_CHECKSUM_CMD'] = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{q}{env['CM_DOWNLOAD_CHECKSUM_FILE']}{q}" elif env.get('CM_DOWNLOAD_CHECKSUM', '') != '': - env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{}{}{} | md5sum -c{} ".format(env.get('CM_DOWNLOAD_CHECKSUM'), x, q, env['CM_DOWNLOAD_FILENAME'], q, x_c) + env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{}{}{} | md5sum {}-c -".format(env.get('CM_DOWNLOAD_CHECKSUM'), x, q, env['CM_DOWNLOAD_FILENAME'], q, x_c) else: env['CM_DOWNLOAD_CHECKSUM_CMD'] = "" diff --git a/script/download-file/run.sh b/script/download-file/run.sh index ef4e407b0..91f5428c9 100644 --- a/script/download-file/run.sh +++ b/script/download-file/run.sh @@ -16,9 +16,8 @@ fi if [[ ${CM_DOWNLOAD_TOOL} == "cmutil" ]]; then require_download=0 -fi -if [ -e "${CM_DOWNLOAD_DOWNLOADED_PATH}" ]; then +elif [ -e "${CM_DOWNLOAD_DOWNLOADED_PATH}" ]; then if [[ "${CM_DOWNLOAD_CHECKSUM_CMD}" != "" ]]; then echo "" echo "${CM_DOWNLOAD_CHECKSUM_CMD}" @@ -51,5 +50,4 @@ if [[ ${require_download} == "1" ]]; then test $? -eq 0 || exit $? fi fi - test $? -eq 0 || exit $? diff --git a/script/extract-file/customize.py b/script/extract-file/customize.py index af769aecf..3fba68636 100644 --- a/script/extract-file/customize.py +++ b/script/extract-file/customize.py @@ -42,7 +42,7 @@ def preprocess(i): # By default remove archive after extraction remove_extracted = False if env.get('CM_EXTRACT_REMOVE_EXTRACTED','').lower() == 'no' else True - if filename.endswith(".zip"): + if filename.endswith(".zip") or filename.endswith(".pth"): env['CM_EXTRACT_TOOL'] = "unzip" elif filename.endswith(".tar.gz"): if windows: @@ -51,6 +51,9 @@ def preprocess(i): filename = filename[:-3] # leave only .tar env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvf' env['CM_EXTRACT_TOOL'] = 'tar ' + elif os_info['platform'] == 'darwin': + env['CM_EXTRACT_TOOL_OPTIONS'] = ' -xvzf ' + env['CM_EXTRACT_TOOL'] = 'tar ' else: env['CM_EXTRACT_TOOL_OPTIONS'] = ' --skip-old-files -xvzf ' env['CM_EXTRACT_TOOL'] = 'tar ' diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py index c5812db65..cdcf1e7f5 100644 --- a/script/generate-mlperf-inference-submission/customize.py +++ b/script/generate-mlperf-inference-submission/customize.py @@ -144,12 +144,12 @@ def generate_submission(i): # check whether the root folder contains the sut infos # if yes then there is no need to check for meta files inside individual model folders - if "cm_sut_info.json" in os.listdir(result_path): - sut_info = fill_from_json(os.path.join(result_path, "cm_sut_info.json"), sut_info.keys(), sut_info) + if "cm-sut-info.json" in os.listdir(result_path): + sut_info = fill_from_json(os.path.join(result_path, "cm-sut-info.json"), sut_info.keys(), sut_info) if sut_info == -1: - return {'return':1, 'error':f"key value mismatch. Refer the populating dictionary:\n{sut_info}\n and file {os.path.join(result_path, 'cm_sut_info.json')}"} + return {'return':1, 'error':f"key value mismatch. Refer the populating dictionary:\n{sut_info}\n and file {os.path.join(result_path, 'cm-sut-info.json')}"} if check_dict_filled(sut_info.keys(), sut_info): - print(f"sut info completely filled from {os.path.join(result_path, 'cm_sut_info.json')}!") + print(f"sut info completely filled from {os.path.join(result_path, 'cm-sut-info.json')}!") # Check whether the root folder contains the model mapping file # expects json file in the format: @@ -216,7 +216,7 @@ def generate_submission(i): system_meta_default['framework'] = framework + " " + framework_version else: print(parts) - return {'return': 1, 'error': f"The required details for generating the inference submission:\n1.system_name\n2.implementation\n3.framework\n4.run_config\nInclude a cm_sut_info.json file with the above content in {result_path}"} + return {'return': 1, 'error': f"The required details for generating the inference submission:\n1.system_name\n2.implementation\n3.framework\n4.run_config\nInclude a cm-sut-info.json file with the above content in {result_path}"} platform_prefix = inp.get('platform_prefix', '') if platform_prefix: @@ -276,6 +276,12 @@ def generate_submission(i): modes = [f for f in os.listdir(result_scenario_path) if not os.path.isfile(os.path.join(result_scenario_path, f))] power_run = False + + #we check for the existance of mlperf_log_summary.txt mlperf_log_detail.txt to consider a result folder as valid. Rest of the checks are done later by the submission checker + files_to_check = [ "mlperf_log_summary.txt", "mlperf_log_detail.txt" ] + if not all([os.path.exists(os.path.join(result_scenario_path, "performance", "run_1", f)) for f in files_to_check]): + continue + for mode in modes: result_mode_path = os.path.join(result_scenario_path, mode) submission_mode_path = os.path.join(submission_scenario_path, mode) @@ -346,6 +352,12 @@ def generate_submission(i): if os.path.exists(user_conf_path): shutil.copy(user_conf_path, os.path.join(submission_measurement_path, 'user.conf')) measurements_json_path = os.path.join(result_mode_path, "measurements.json") + # get model precision + model_precision = "fp32" + if os.path.exists(measurements_json_path): + with open(measurements_json_path, "r") as f: + measurements_json = json.load(f) + model_precision = measurements_json.get("weight_data_types", "fp32") if os.path.exists(user_conf_path): shutil.copy(measurements_json_path, os.path.join(submission_measurement_path, sub_res+'.json')) files = [] @@ -400,7 +412,7 @@ def generate_submission(i): f.write("TBD") #create an empty README else: readme_suffix = "" - result_string, result = mlperf_utils.get_result_string(env['CM_MLPERF_LAST_RELEASE'], model, scenario, result_scenario_path, power_run, sub_res, division, system_file) + result_string, result = mlperf_utils.get_result_string(env['CM_MLPERF_LAST_RELEASE'], model, scenario, result_scenario_path, power_run, sub_res, division, system_file, model_precision) for key in result: results[model][scenario][key] = result[key] diff --git a/script/generate-mlperf-inference-submission/sample-cm-sut-info.json b/script/generate-mlperf-inference-submission/sample-cm-sut-info.json new file mode 100644 index 000000000..3cc137038 --- /dev/null +++ b/script/generate-mlperf-inference-submission/sample-cm-sut-info.json @@ -0,0 +1,7 @@ +{ + "hardware_name": "VivoBook-ASUSLaptop-X515UA-M515UA", + "implementation": "reference", + "device": "cpu", + "framework": "pytorch_v2.4.0", + "run_config": "default" +} \ No newline at end of file diff --git a/script/get-dataset-cifar10/requirements.txt b/script/get-dataset-cifar10/requirements.txt index bf9d458b1..530995dd0 100644 --- a/script/get-dataset-cifar10/requirements.txt +++ b/script/get-dataset-cifar10/requirements.txt @@ -1,47 +1,47 @@ -absl-py==0.11.0 -astunparse==1.6.3 -cachetools==4.2.1 -certifi==2020.12.5 -chardet==4.0.0 -cycler==0.10.0 -flatbuffers==1.12 -gast==0.4.0 -google-auth==1.27.0 -google-auth-oauthlib==0.4.2 -google-pasta==0.2.0 -grpcio==1.34.0 -h5py==3.1.0 -idna==2.10 -imageio==2.9.0 -joblib==1.1.1 -Keras-Preprocessing==1.1.2 -kiwisolver==1.3.1 -Markdown==3.3.3 -matplotlib==3.3.4 -numpy==1.19.5 -oauthlib==3.1.0 -opencv-python==4.5.1.48 -opt-einsum==3.3.0 -Pillow==8.1.0 -protobuf==3.14.0 -pyasn1==0.4.8 -pyasn1-modules==0.2.8 -pyparsing==2.4.7 -python-dateutil==2.8.1 -PyYAML==5.4.1 -requests==2.25.1 -requests-oauthlib==1.3.0 -rsa==4.7.1 -scikit-learn==0.24.1 -scipy==1.6.0 -six==1.15.0 -tensorboard==2.5.0 -tensorboard-plugin-wit==1.8.0 -tensorflow==2.5.0 -tensorflow-estimator==2.5.0 -termcolor==1.1.0 -threadpoolctl==2.1.0 -typing-extensions==3.7.4.3 -urllib3==1.26.3 -Werkzeug==1.0.1 -wrapt==1.12.1 +absl-py +astunparse +cachetools +certifi +chardet +cycler +flatbuffers +gast +google-auth +google-auth-oauthlib +google-pasta +grpcio +h5py +idna +imageio +joblib +Keras-Preprocessing +kiwisolver +Markdown +matplotlib +numpy +oauthlib +opencv-python +opt-einsum +Pillow +protobuf +pyasn1 +pyasn1-modules +pyparsing +python-dateutil +PyYAML +requests +requests-oauthlib +rsa +scikit-learn +scipy +six +tensorboard +tensorboard-plugin-wit +tensorflow +tensorflow-estimator +termcolor +threadpoolctl +typing-extensions +urllib3 +Werkzeug +wrapt diff --git a/script/get-dataset-imagenet-aux/_cm.json b/script/get-dataset-imagenet-aux/_cm.json index 286af2583..14dc937ac 100644 --- a/script/get-dataset-imagenet-aux/_cm.json +++ b/script/get-dataset-imagenet-aux/_cm.json @@ -14,6 +14,22 @@ "image-classification", "imagenet-aux" ], + "prehook_deps": [ + { + "tags": "download-and-extract,_extract,_wget", + "env": { + "CM_EXTRACT_EXTRACTED_FILENAME": "<<>>", + "CM_EXTRACT_FINAL_ENV_NAME": "CM_DATASET_AUX_PATH", + "CM_DOWNLOAD_URL": "<<>>", + "CM_DOWNLOAD_URL1": "<<>>" + }, + "update_tags_from_env_with_prefix": { + "_url.": [ "CM_PACKAGE_URL" ] + }, + "force_cache": true, + "extra_cache_tags": "imagenet-aux,dataset-aux" + } + ], "uid": "bb2c6dd8c8c64217", "variations": { "2012": { @@ -27,7 +43,9 @@ "2012" ], "env": { - "CM_WGET_URL": "http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz" + "CM_PACKAGE_URL": "http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz", + "CM_PACKAGE_URL1": "https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz", + "CM_DOWNLOAD_CHECKSUM_": "f963098ea0e785a968ca1eb634003a90" } }, "from.dropbox": { @@ -37,7 +55,9 @@ "2012" ], "env": { - "CM_WGET_URL": "https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz" + "CM_PACKAGE_URL": "https://www.dropbox.com/s/92n2fyej3lzy3s3/caffe_ilsvrc12.tar.gz", + "CM_PACKAGE_URL1": "http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz", + "CM_DOWNLOAD_CHECKSUM_": "ee346d67141e476df9c1a3f813552503" } } } diff --git a/script/get-dataset-imagenet-aux/run.sh b/script/get-dataset-imagenet-aux/run.sh deleted file mode 100644 index a05e8538c..000000000 --- a/script/get-dataset-imagenet-aux/run.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -echo "" - -wget -4 -nc ${CM_WGET_URL} --no-check-certificate -test $? -eq 0 || exit 1 - -mkdir data - -tar -C data -xvzf caffe_ilsvrc12.tar.gz -test $? -eq 0 || exit 1 - -rm -rf caffe_ilsvrc12.tar.gz - -echo "CM_DATASET_AUX_PATH=$PWD/data" > tmp-run-env.out diff --git a/script/get-generic-sys-util/_cm.json b/script/get-generic-sys-util/_cm.json index 387e3dc68..ee734cc47 100644 --- a/script/get-generic-sys-util/_cm.json +++ b/script/get-generic-sys-util/_cm.json @@ -50,6 +50,17 @@ } } }, + "g++-11": { + "env": { + "CM_SYS_UTIL_NAME": "g++11" + }, + "state": { + "g++11": { + "apt": "g++-11", + "dnf": "gcc-toolset-11-gcc-c++" + } + } + }, "g++-12": { "env": { "CM_SYS_UTIL_NAME": "g++12" @@ -61,6 +72,37 @@ } } }, + "g++-9": { + "env": { + "CM_SYS_UTIL_NAME": "g++9" + }, + "state": { + "g++9": { + "apt": "g++-9", + "dnf": "gcc-toolset-9-gcc-c++" + } + } + }, + "gcc-11": { + "env": { + "CM_SYS_UTIL_NAME": "gcc11" + }, + "state": { + "gcc11": { + "apt": "gcc-11" + } + } + }, + "gcc-9": { + "env": { + "CM_SYS_UTIL_NAME": "gcc9" + }, + "state": { + "gcc9": { + "apt": "gcc-9" + } + } + }, "gflags-dev": { "env": { "CM_SYS_UTIL_NAME": "gflags-dev" @@ -136,6 +178,16 @@ } } }, + "libffi": { + "env": { + "CM_SYS_UTIL_NAME": "libffi" + }, + "state": { + "libffi7": { + "apt": "libffi" + } + } + }, "libffi-dev": { "env": { "CM_SYS_UTIL_NAME": "libffi_dev" @@ -159,6 +211,16 @@ } } }, + "libffi8": { + "env": { + "CM_SYS_UTIL_NAME": "libffi8" + }, + "state": { + "libffi8": { + "apt": "libffi8" + } + } + }, "libgdbm-dev": { "env": { "CM_SYS_UTIL_NAME": "libgdbm_dev" diff --git a/script/get-generic-sys-util/customize.py b/script/get-generic-sys-util/customize.py index 27a4f8391..e1b6d8d36 100644 --- a/script/get-generic-sys-util/customize.py +++ b/script/get-generic-sys-util/customize.py @@ -24,7 +24,9 @@ def preprocess(i): if util == '': return {'return': 1, 'error': 'Please select a variation specifying the sys util name'} + package = state.get(util) + if not package: return {'return': 1, 'error': 'No package name specified for {} and util name {}'.format(pm, util)} @@ -32,6 +34,13 @@ def preprocess(i): if not package_name: return {'return': 1, 'error': 'No package name specified for {} and util name {}'.format(pm, util)} + if util == "libffi": + if env.get("CM_HOST_OS_FLAVOR", "") == "ubuntu": + if env.get("CM_HOST_OS_VERSION", "") in [ "20.04", "20.10", "21.04", "21.10" ]: + package_name = "libffi7" + else: + package_name = "libffi8" + # Temporary handling of dynamic state variables tmp_values = re.findall(r'<<<(.*?)>>>', str(package_name)) for tmp_value in tmp_values: diff --git a/script/get-git-repo/customize.py b/script/get-git-repo/customize.py index d1e92f704..125308f33 100644 --- a/script/get-git-repo/customize.py +++ b/script/get-git-repo/customize.py @@ -35,7 +35,7 @@ def preprocess(i): git_clone_cmd = "git clone " + env['CM_GIT_RECURSE_SUBMODULES'] + git_checkout_string + " " + env['CM_GIT_URL'] + " " + env.get('CM_GIT_DEPTH','') + ' ' + env['CM_GIT_CHECKOUT_FOLDER'] env['CM_GIT_CLONE_CMD'] = git_clone_cmd - env['CM_TMP_GIT_PATH'] = os.path.join(os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER'], ".git") + env['CM_TMP_GIT_PATH'] = os.path.join(os.getcwd(), env['CM_GIT_CHECKOUT_FOLDER'], ".gitdone") return {'return':0} diff --git a/script/get-git-repo/run.sh b/script/get-git-repo/run.sh index 87803526d..6cd8ef8fb 100644 --- a/script/get-git-repo/run.sh +++ b/script/get-git-repo/run.sh @@ -5,7 +5,7 @@ echo "$CUR_DIR" SCRIPT_DIR=${CM_TMP_CURRENT_SCRIPT_PATH} folder=${CM_GIT_CHECKOUT_FOLDER} -if [ ! -d "${CM_TMP_GIT_PATH}" ]; then +if [ ! -e "${CM_TMP_GIT_PATH}" ]; then rm -rf ${folder} echo "******************************************************" echo "Current directory: ${CUR_DIR}" diff --git a/script/get-ml-model-3d-unet-kits19/_cm.json b/script/get-ml-model-3d-unet-kits19/_cm.json index 13de73c7e..16ea76083 100644 --- a/script/get-ml-model-3d-unet-kits19/_cm.json +++ b/script/get-ml-model-3d-unet-kits19/_cm.json @@ -40,9 +40,22 @@ "group": "framework" }, "onnx,fp32": { + "deps":[ + { + "tags": "download,file,download-file,_wget", + "extra_cache_tags": "3d-unet,medical-imaging", + "force-cache": true, + "env": { + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH", + "CM_DOWNLOAD_URL": "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128_dynbatch.onnx?download=1", + "CM_DOWNLOAD_CHECKSUM":"82f0618fde78f9839e7c712274019b4a", + "CM_DOWNLOAD_FILENAME": "3dunet_kits19_128x128x128_dynbatch.onnx" + } + } + ], "env": { "CM_ML_MODEL_ACCURACY": "0.86170", - "CM_PACKAGE_URL": "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128_dynbatch.onnx?download=1" + "CM_ML_MODEL_FILE": "3dunet_kits19_128x128x128_dynbatch.onnx" } }, "pytorch": { @@ -52,17 +65,41 @@ "group": "framework" }, "pytorch,fp32": { + "deps":[ + { + "tags": "download,file,download-file,_wget", + "extra_cache_tags": "3d-unet,medical-imaging", + "force-cache": true, + "env": { + "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH", + "CM_DOWNLOAD_URL": "https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch.ptc?download=1", + "CM_DOWNLOAD_CHECKSUM":"2251109371f408c9f10a4320ffdcaef8", + "CM_DOWNLOAD_FILENAME": "3dunet_kits19_pytorch.ptc" + } + } + ], "env": { "CM_ML_MODEL_ACCURACY": "0.86170", - "CM_PACKAGE_URL": "https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch.ptc?download=1" + "CM_ML_MODEL_FILE": "3dunet_kits19_pytorch.ptc" } }, "pytorch,fp32,weights": { + "deps":[ + { + "tags": "download-and-extract,_wget,_extract", + "extra_cache_tags": "3d-unet,medical-imaging", + "force-cache": true, + "env": { + "CM_DAE_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH", + "CM_DAE_URL": "https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch_checkpoint.pth?download=1", + "CM_DOWNLOAD_CHECKSUM":"09c696e3ec13d83c628498bcd831eb5b", + "CM_DOWNLOAD_FILENAME": "3dunet_kits19_pytorch_checkpoint.pth" + } + } + ], "env": { "CM_ML_MODEL_ACCURACY": "0.86170", - "CM_ML_MODEL_FILE": "3dunet_kits19_pytorch_checkpoint.pth", - "CM_PACKAGE_URL": "https://zenodo.org/record/5597155/files/3dunet_kits19_pytorch_checkpoint.pth?download=1", - "CM_UNZIP": "yes" + "CM_ML_MODEL_FILE": "3dunet_kits19_pytorch_checkpoint.pth" } }, "weights": { @@ -80,11 +117,22 @@ "alias": "tf" }, "tf,fp32": { + "deps":[ + { + "tags": "download-and-extract,_wget,_extract", + "extra_cache_tags": "3d-unet,medical-imaging", + "force-cache": true, + "env": { + "CM_DAE_FINAL_ENV_NAME": "CM_ML_MODEL_FILE_WITH_PATH", + "CM_DAE_URL": "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.tf.zip?download=1", + "CM_DOWNLOAD_CHECKSUM":"9497108bd0504ae8f85a764a807b76a9", + "CM_DOWNLOAD_FILENAME": "3dunet_kits19_128x128x128.tf.zip" + } + } + ], "env": { "CM_ML_MODEL_ACCURACY": "0.86170", - "CM_ML_MODEL_FILE": "3dunet_kits19_128x128x128.tf", - "CM_PACKAGE_URL": "https://zenodo.org/record/5597155/files/3dunet_kits19_128x128x128.tf.zip?download=1", - "CM_UNZIP": "yes" + "CM_ML_MODEL_FILE": "3dunet_kits19_128x128x128.tf" } } }, diff --git a/script/get-ml-model-3d-unet-kits19/customize.py b/script/get-ml-model-3d-unet-kits19/customize.py index 65961f156..66c0a28ff 100644 --- a/script/get-ml-model-3d-unet-kits19/customize.py +++ b/script/get-ml-model-3d-unet-kits19/customize.py @@ -11,28 +11,14 @@ def preprocess(i): cm = automation.cmind - path = os.getcwd() - - url = env['CM_PACKAGE_URL'] - - print ('Downloading from {}'.format(url)) - - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', - 'url':url}) - if r['return']>0: return r - - filename = r['filename'] - - if env.get('CM_UNZIP') == "yes": - os.system("unzip "+filename) - filename = env['CM_ML_MODEL_FILE'] - env['CM_ML_MODEL_FILE_WITH_PATH']=os.path.join(path, filename) + path = os.path.dirname(env['CM_ML_MODEL_FILE_WITH_PATH']) + + if env.get("CM_DAE_EXTRACT_DOWNLOADED", " ") != " ": + env['CM_ML_MODEL_PATH'] = os.path.join(path, env['CM_ML_MODEL_FILE']) + env['CM_ML_MODEL_FILE_WITH_PATH'] = env['CM_ML_MODEL_PATH'] else: - # Add to path - env['CM_ML_MODEL_FILE']=filename - env['CM_ML_MODEL_FILE_WITH_PATH']=r['path'] - - env['CM_ML_MODEL_PATH']=path + env['CM_ML_MODEL_PATH'] = path + env['CM_GET_DEPENDENT_CACHED_PATH'] = env['CM_ML_MODEL_PATH'] + return {'return':0} diff --git a/script/get-ml-model-bert-base-squad/_cm.json b/script/get-ml-model-bert-base-squad/_cm.json index b893590b8..39bcd8f1f 100644 --- a/script/get-ml-model-bert-base-squad/_cm.json +++ b/script/get-ml-model-bert-base-squad/_cm.json @@ -62,7 +62,7 @@ "deps": [ { "names": [ "neural-magic-zoo-downloader" ], - "tags": "get,ml-model,zoo,deepsparse,_pruned95_obs_quant-none" + "tags": "get,ml-model,zoo,deepsparse,_bert-base-pruned95_obs_quant-none" } ], "env": { @@ -87,6 +87,7 @@ } }, "print_env_at_the_end" : { - "CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH": "Path to the BERT vocab file" + "CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH": "Path to the BERT vocab file", + "CM_ML_MODEL_FILE_WITH_PATH": "Path to the ML model" } } diff --git a/script/get-ml-model-bert-large-squad/_cm.json b/script/get-ml-model-bert-large-squad/_cm.json index 8616af705..399052b20 100644 --- a/script/get-ml-model-bert-large-squad/_cm.json +++ b/script/get-ml-model-bert-large-squad/_cm.json @@ -84,7 +84,8 @@ }, "onnx,fp32": { "env": { - "CM_ML_MODEL_F1": "90.874" + "CM_ML_MODEL_F1": "90.874", + "CM_DOWNLOAD_CHECKSUM":"819b25b19cd8e59080c10892689750ca" } }, "onnx,fp32,zenodo": { @@ -101,7 +102,8 @@ "onnx,int8": { "env": { "CM_ML_MODEL_F1": "90.067", - "CM_PACKAGE_URL": "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx" + "CM_PACKAGE_URL": "https://zenodo.org/record/3750364/files/bert_large_v1_1_fake_quant.onnx", + "CM_DOWNLOAD_CHECKSUM":"45f88ffb2915362242703c85c38ec2d4" } }, "onnx,int8,zenodo": { @@ -145,7 +147,8 @@ }, "tf,fp32": { "env": { - "CM_ML_MODEL_F1": "90.874" + "CM_ML_MODEL_F1": "90.874", + "CM_DOWNLOAD_CHECKSUM":"dd72de12e8226f25f0128a1a864b97ad" } }, "tf,fp32,zenodo": { @@ -186,7 +189,8 @@ }, "pytorch,int8": { "env": { - "CM_ML_MODEL_F1": "90.633" + "CM_ML_MODEL_F1": "90.633", + "CM_DOWNLOAD_CHECKSUM":"0734c580cb53b4b56a3f400771ffcb7c" } }, "pytorch,int8,zenodo": { diff --git a/script/get-mlperf-inference-nvidia-common-code/_cm.json b/script/get-mlperf-inference-nvidia-common-code/_cm.json index d1f25926d..5785f93f8 100644 --- a/script/get-mlperf-inference-nvidia-common-code/_cm.json +++ b/script/get-mlperf-inference-nvidia-common-code/_cm.json @@ -72,6 +72,13 @@ "version": "v4.0" } } + }, + "r4.1": { + "add_deps_recursive": { + "mlperf-inference-results": { + "version": "v4.1" + } + } } } } diff --git a/script/get-mlperf-inference-results/_cm.json b/script/get-mlperf-inference-results/_cm.json index cf6e10b1b..45d1c2f51 100644 --- a/script/get-mlperf-inference-results/_cm.json +++ b/script/get-mlperf-inference-results/_cm.json @@ -19,6 +19,9 @@ "force_env_keys": [ "CM_GIT_*" ], + "names": [ + "inference-results-repo" + ], "env": { "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_MLPERF_INFERENCE_RESULTS_PATH" }, @@ -64,6 +67,17 @@ "CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME": "v4.0", "CM_GIT_URL": "https://github.com/<<>>/inference_results_v4.0.git" } + }, + "v4.1": { + "env": { + "CM_MLPERF_INFERENCE_RESULTS_VERSION_NAME": "v4.1", + "CM_GIT_URL": "https://github.com/<<>>/inference_results_v4.1.git" + }, + "adr": { + "inference-results-repo": { + "tags": "_branch.cm-fixes" + } + } } }, "variations": { diff --git a/script/get-mlperf-inference-utils/mlperf_utils.py b/script/get-mlperf-inference-utils/mlperf_utils.py index 4d960ef43..1da27dfae 100644 --- a/script/get-mlperf-inference-utils/mlperf_utils.py +++ b/script/get-mlperf-inference-utils/mlperf_utils.py @@ -133,7 +133,7 @@ def get_accuracy_metric(config, model, path): return is_valid, acc_results, acc_targets, acc_limits -def get_result_string(version, model, scenario, result_path, has_power, sub_res, division="open", system_json=None): +def get_result_string(version, model, scenario, result_path, has_power, sub_res, division="open", system_json=None, model_precision="fp32"): config = checker.Config( version, @@ -219,6 +219,7 @@ def get_result_string(version, model, scenario, result_path, has_power, sub_res, result_string = f"\n\n## Results\n" result_string += f"\nPlatform: {sub_res}\n" + result_string += f"\nModel Precision: {model_precision}\n" result_string += "\n### Accuracy Results \n" + accuracy_result_string result_string += "\n### Performance Results \n" + performance_result_string if has_power: diff --git a/script/get-tvm/_cm.json b/script/get-tvm/_cm.json index dd81f21c4..1b221bf79 100644 --- a/script/get-tvm/_cm.json +++ b/script/get-tvm/_cm.json @@ -55,6 +55,7 @@ "variations": { "pip-install": { "group": "installation-type", + "default": true, "deps": [ { "tags": "get,generic-python-lib,_apache-tvm" @@ -76,7 +77,6 @@ }, "llvm": { "group": "installation-type", - "default": true, "deps": [ { "names": [ "llvm" ], diff --git a/script/run-mlperf-inference-app/_cm.yaml b/script/run-mlperf-inference-app/_cm.yaml index 7c1a2651e..82466d011 100644 --- a/script/run-mlperf-inference-app/_cm.yaml +++ b/script/run-mlperf-inference-app/_cm.yaml @@ -126,6 +126,8 @@ deps: - inference-src tags: get,mlcommons,inference,src - tags: get,sut,description + skip_if_env: + CM_MLPERF_USE_DOCKER: [ on ] - tags: get,mlperf,inference,results,dir names: @@ -280,6 +282,13 @@ variations: env: CM_MLPERF_INFERENCE_VERSION: '4.1' CM_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r4.1_default + adr: + get-mlperf-inference-results-dir: + version: "r4.1" + get-mlperf-inference-submission-dir: + version: "r4.1" + mlperf-inference-nvidia-scratch-space: + version: "r4.1" group: benchmark-version short: diff --git a/script/run-mlperf-inference-app/customize.py b/script/run-mlperf-inference-app/customize.py index 829e931f9..7f566cf84 100644 --- a/script/run-mlperf-inference-app/customize.py +++ b/script/run-mlperf-inference-app/customize.py @@ -23,8 +23,10 @@ def preprocess(i): return {'return':0} dump_version_info = env.get('CM_DUMP_VERSION_INFO', True) - system_meta = state['CM_SUT_META'] - env['CM_SUT_META_EXISTS'] = "yes" + + system_meta = state.get('CM_SUT_META', {}) + if system_meta: + env['CM_SUT_META_EXISTS'] = "yes" env['CM_MODEL'] = env['CM_MLPERF_MODEL'] @@ -182,7 +184,8 @@ def preprocess(i): state = {} docker_extra_input = {} - del(env['CM_HW_NAME']) + if env.get('CM_HW_NAME'): + del(env['CM_HW_NAME']) for k in inp: if k.startswith("docker_"): diff --git a/script/runtime-system-infos/customize.py b/script/runtime-system-infos/customize.py index 40b837450..4caee358a 100644 --- a/script/runtime-system-infos/customize.py +++ b/script/runtime-system-infos/customize.py @@ -73,7 +73,6 @@ def preprocess(i): # Write data as a row to CSV file writer.writerow(data) - print("raw written") # To be removed. Currently present for debugging purpose. time.sleep(interval) f.close()