diff --git a/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml b/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml index 294356e59..0da19d06d 100644 --- a/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml @@ -5,7 +5,7 @@ name: MLPerf inference MLCommons C++ ResNet50 on: pull_request: - branches: [ "main", "dev" ] + branches: [ "main", "dev", "mlperf-inference" ] paths: - '.github/workflows/test-mlperf-inference-mlcommons-cpp-resnet50.yml' - '**' diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index e8b759e37..3285224d4 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -52,11 +52,16 @@ def preprocess(i): if not docker_image_base: return {'return': 1, 'error': f"Version \"{env['CM_DOCKER_OS_VERSION']}\" is not supported yet for \"{env['CM_DOCKER_OS']}\" "} - if "CM_MLOPS_REPO" in env: + if env.get("CM_MLOPS_REPO", "") != "": cm_mlops_repo = env["CM_MLOPS_REPO"] else: cm_mlops_repo = "mlcommons@ck" + if env.get("CM_MLOPS_REPO_BRANCH", '') != '': + cm_mlops_repo_branch_string = f" --branch {env['CM_MLOPS_REPO_BRANCH']}" + else: + cm_mlops_repo_branch_string = "" + if 'CM_DOCKERFILE_WITH_PATH' not in env: env['CM_DOCKERFILE_WITH_PATH'] = os.path.join(os.getcwd(), "Dockerfile") diff --git a/script/build-mlperf-inference-server-nvidia/_cm.yaml b/script/build-mlperf-inference-server-nvidia/_cm.yaml index 6dd5ebe8e..bb6f60a68 100644 --- a/script/build-mlperf-inference-server-nvidia/_cm.yaml +++ b/script/build-mlperf-inference-server-nvidia/_cm.yaml @@ -78,7 +78,7 @@ deps: # Detect CMake - tags: get,cmake - version_min: "3.25" + version: "3.25.1" # Detect Google Logger - tags: get,generic,sys-util,_glog-dev @@ -203,7 +203,6 @@ versions: nvidia-scratch-space: tags: _version.4_0 deps: - - tags: install,nccl,libs,_cuda - tags: install,pytorch,from.src,_for-nvidia-mlperf-inference-v3.1 names: - pytorch @@ -212,6 +211,7 @@ versions: names: - pytorchvision - torchvision + - tags: install,nccl,libs,_cuda docker: skip_run_cmd: 'no' @@ -219,6 +219,7 @@ docker: shm_size: '32gb' extra_run_args: ' --runtime=nvidia --ulimit memlock=-1 --cap-add SYS_ADMIN --cap-add SYS_TIME --security-opt apparmor=unconfined --security-opt seccomp=unconfined' docker_os: ubuntu + cm_repo_flags1: ' --branch=mlperf-inference' docker_real_run: False interactive: True docker_os_version: '20.04' diff --git a/script/get-ml-model-rnnt/_cm.json b/script/get-ml-model-rnnt/_cm.json index ea149ea02..5754713c0 100644 --- a/script/get-ml-model-rnnt/_cm.json +++ b/script/get-ml-model-rnnt/_cm.json @@ -52,7 +52,6 @@ }, "pytorch,fp32,amazon-s3": { "env": { - "CM_PACKAGE_URL": "https://mlperf-public.s3.us-west-2.amazonaws.com/DistributedDataParallel_1576581068.9962234-epoch-100.pt" } }, "pytorch,fp32,zenodo": { @@ -61,11 +60,11 @@ } }, "zenodo": { - "group": "download-src" - }, - "amazon-s3": { "group": "download-src", "default": true + }, + "amazon-s3": { + "group": "download-src" } }, "print_env_at_the_end" : { diff --git a/script/install-pytorch-from-src/_cm.json b/script/install-pytorch-from-src/_cm.json index 75f15a501..057dbe681 100644 --- a/script/install-pytorch-from-src/_cm.json +++ b/script/install-pytorch-from-src/_cm.json @@ -93,7 +93,7 @@ "CM_GIT_CHECKOUT_TAG": "#" }, "ad": { - "pytorch-src-repo": { + "pytorch-src-repo": { "tags": "_no-recurse-submodules,_full-history" } } @@ -230,7 +230,12 @@ "tags": "get,cmake", "version_min": "3.25.0" } - ] + ], + "ad": { + "pytorch-src-repo": { + "tags": "_no-recurse-submodules,_full-history" + } + } }, "cuda": { "deps": [