diff --git a/.github/workflows/check-broken-links.md b/.github/workflows/check-broken-links.yml similarity index 74% rename from .github/workflows/check-broken-links.md rename to .github/workflows/check-broken-links.yml index a753ec75b..af257ebf4 100644 --- a/.github/workflows/check-broken-links.md +++ b/.github/workflows/check-broken-links.yml @@ -1,13 +1,16 @@ -name: Check .md README files for broken links +name: "Check .md README files for broken links" -on: [pull_request] +on: + push: + branches: + - master jobs: markdown-link-check: runs-on: ubuntu-latest # check out the latest version of the code steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 # Checks the status of hyperlinks in .md files in verbose mode - name: Check links diff --git a/.github/workflows/code-review.yml b/.github/workflows/code-review.yml index 258b305f3..3714d8a2a 100644 --- a/.github/workflows/code-review.yml +++ b/.github/workflows/code-review.yml @@ -2,7 +2,7 @@ name: OpenAI Code Review on: pull_request_target: - types: [opened, synchronize] + types: [opened] paths: - 'automation/**' - 'script/**' @@ -15,7 +15,7 @@ permissions: jobs: code_review: runs-on: ubuntu-latest - if: github.repository_owner == 'gateoverflow' && github.event.pull_request.changed_files > 0 + if: github.repository_owner == 'gateoverflow_off' && github.event.pull_request.changed_files > 0 steps: # Run code review via OpenAI # Step to run the OpenAI Code Review using the GATEOverflow action diff --git a/.github/workflows/test-mlperf-inference-gptj.yml b/.github/workflows/test-mlperf-inference-gptj.yml index 5a7ecc7e8..1c59dea46 100644 --- a/.github/workflows/test-mlperf-inference-gptj.yml +++ b/.github/workflows/test-mlperf-inference-gptj.yml @@ -5,12 +5,12 @@ name: MLPerf inference GPT-J on: schedule: - - cron: "1 1 * * */3" + - cron: "1 2 * * *" jobs: build: if: github.repository_owner == 'gateoverflow' - runs-on: [ self-hosted, linux, x64 ] + runs-on: [ self-hosted, linux, x64, GO-spr ] strategy: fail-fast: false matrix: @@ -24,7 +24,10 @@ jobs: source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate export CM_REPOS=$HOME/GH_CM - cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} + python3 -m pip install cm4mlops + cm pull repo - name: Test MLPerf Inference GPTJ run: | cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --beam_size=1 --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean + cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/mlperf_inference_test_submissions_v5.0 --repo_branch=main --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/gh_action_submissions + diff --git a/.github/workflows/test-mlperf-inference-llama2.yml b/.github/workflows/test-mlperf-inference-llama2.yml new file mode 100644 index 000000000..97bd1bc6f --- /dev/null +++ b/.github/workflows/test-mlperf-inference-llama2.yml @@ -0,0 +1,33 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: MLPerf inference LLAMA 2 70B + +on: + schedule: + - cron: "30 19 * * 4" + +jobs: + build_reference: + if: github.repository_owner == 'gateoverflow' + runs-on: [ self-hosted, GO-i9, linux, x64 ] + strategy: + fail-fast: false + matrix: + python-version: [ "3.12" ] + backend: [ "pytorch" ] + device: [ "cpu" ] + + steps: + - name: Install dependencies + run: | + source gh_action/bin/deactivate || python3 -m venv gh_action + source gh_action/bin/activate + export CM_REPOS=$HOME/GH_CM + python3 -m pip install cm4mlops + cm pull repo + python3 -m pip install "huggingface_hub[cli]" + huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential + - name: Test MLPerf Inference LLAMA 2 70B reference implementation + run: | + cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --adr.inference-src.tags=_repo.https://github.com/anandhu-eng/inference.git --clean diff --git a/.github/workflows/test-mlperf-inference-resnet50.yml b/.github/workflows/test-mlperf-inference-resnet50.yml index acbe88b90..5867abbf6 100644 --- a/.github/workflows/test-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-resnet50.yml @@ -4,7 +4,7 @@ name: MLPerf inference ResNet50 on: - pull_request: + pull_request_target: branches: [ "main", "dev", "mlperf-inference" ] paths: - '.github/workflows/test-mlperf-inference-resnet50.yml' @@ -28,9 +28,7 @@ jobs: - os: macos-latest backend: tf - os: windows-latest -# MLPerf requires interaction when installing LLVM on Windows - that's why we excluded it here - - + implementation: cpp steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} @@ -41,6 +39,26 @@ jobs: run: | python3 -m pip install cmind cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} - - name: Test MLPerf Inference ResNet50 + - name: Test MLPerf Inference ResNet50 (Windows) + if: matrix.os == 'windows-latest' run: | - cm run script --tags=run-mlperf,inference,_submission,_short --submitter="cTuning" --hw_name=default --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_windows --model=resnet50 --adr.loadgen.tags=_from-pip --pip_loadgen=yes --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + - name: Test MLPerf Inference ResNet50 (Linux/macOS) + if: matrix.os != 'windows-latest' + run: | + cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --hw_name=gh_${{ matrix.os }}_x86 --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + - name: Push Results + if: github.repository_owner == 'gateoverflow' + env: + USER: "GitHub Action" + EMAIL: "admin@gateoverflow.com" + run: | + git config --global user.name "$USER" + git config --global user.email "$EMAIL" + git config --global credential.https://github.com.helper "" + git config --global credential.https://github.com.helper "!gh auth git-credential" + git config --global credential.https://gist.github.com.helper "" + git config --global credential.https://gist.github.com.helper "!gh auth git-credential" + + cm run script --tags=auth,gh,cli --with_token="${{ secrets.TEST_RESULTS_GITHUB_TOKEN }}" + cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/mlperf_inference_test_submissions_v5.0 --repo_branch=main --commit_message="Results from R50 GH action" --quiet diff --git a/.github/workflows/test-mlperf-inference-sdxl.yaml b/.github/workflows/test-mlperf-inference-sdxl.yaml index c7d693495..fd452c3ba 100644 --- a/.github/workflows/test-mlperf-inference-sdxl.yaml +++ b/.github/workflows/test-mlperf-inference-sdxl.yaml @@ -1,12 +1,12 @@ name: MLPerf inference SDXL - +#off now as we have SCC24 test doing the same on: schedule: - cron: "1 2 * * *" jobs: build_reference: - if: github.repository_owner == 'gateoverflow' + if: github.repository_owner == 'gateoverflow_off' runs-on: [ self-hosted, linux, x64 ] strategy: fail-fast: false @@ -15,18 +15,17 @@ jobs: backend: [ "pytorch" ] precision: [ "float16" ] steps: - - name: Install dependencies + - name: Test MLPerf Inference SDXL Reference run: | source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate export CM_REPOS=$HOME/GH_CM - cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} - - name: Test MLPerf Inference SDXL - run: | + python3 -m pip install cm4mlops + cm pull repo cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --model=sdxl --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean build_nvidia: - if: github.repository_owner == 'gateoverflow' + if: github.repository_owner == 'gateoverflow_off' runs-on: [ self-hosted, linux, x64 ] strategy: fail-fast: false @@ -36,12 +35,10 @@ jobs: precision: [ "float16" ] implementation: [ "nvidia" ] steps: - - name: Install dependencies + - name: Test MLPerf Inference SDXL Nvidia run: | source gh_action/bin/deactivate || python3 -m venv gh_action source gh_action/bin/activate export CM_REPOS=$HOME/GH_CM - cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} - - name: Test MLPerf Inference SDXL - run: | + cm pull repo cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --model=sdxl --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --clean diff --git a/.github/workflows/test-scc24-sdxl.yaml b/.github/workflows/test-scc24-sdxl.yaml index e9a2fa410..680d0f5f4 100644 --- a/.github/workflows/test-scc24-sdxl.yaml +++ b/.github/workflows/test-scc24-sdxl.yaml @@ -1,13 +1,15 @@ -name: MLPerf inference SDXL +name: MLPerf inference SDXL (SCC) on: schedule: - - cron: "43 1 * * *" + - cron: "1 3 * * *" jobs: build_reference: if: github.repository_owner == 'gateoverflow' - runs-on: [ self-hosted, linux, x64 ] + runs-on: [ self-hosted, linux, x64, GO-spr ] + env: + CM_REPOS: $HOME/GH_CM strategy: fail-fast: false matrix: @@ -16,23 +18,23 @@ jobs: precision: [ "float16" ] device: [ "cuda" ] steps: - - name: Install dependencies + - name: Test MLPerf Inference reference SDXL SCC run: | - source gh_action/bin/deactivate || python3 -m venv gh_action + if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi + python3 -m venv gh_action source gh_action/bin/activate export CM_REPOS=$HOME/GH_CM - cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} - - name: Test MLPerf Inference reference SDXL SCC - env: - GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - run: | - cm run script --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --quiet --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --precision=float16 --clean | - cm run script --tags=generate,inference,submission --clean --preprocess_submission=yes --run-checker --tar=yes --env.CM_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --env.CM_DETERMINE_MEMORY_CONFIGURATION=yes --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons | - cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet + pip install --upgrade cm4mlops + pip install tabulate + cm pull repo + cm run script --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --precision=float16 --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + cm run script --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --precision=float16 --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + cm run script --tags=generate,inference,submission --clean --preprocess_submission=yes --run-checker --tar=yes --env.CM_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results + cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions build_nvidia: if: github.repository_owner == 'gateoverflow' - runs-on: [ self-hosted, linux, x64 ] + runs-on: [ self-hosted, linux, x64, GO-spr] strategy: fail-fast: false matrix: @@ -41,16 +43,16 @@ jobs: precision: [ "float16" ] implementation: [ "nvidia" ] steps: - - name: Install dependencies + - name: Test MLPerf Inference NVIDIA SDXL SCC run: | - source gh_action/bin/deactivate || python3 -m venv gh_action + if [ -f "gh_action/bin/deactivate" ]; then source gh_action/bin/deactivate; fi + python3 -m venv gh_action source gh_action/bin/activate export CM_REPOS=$HOME/GH_CM - cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} - - name: Test MLPerf Inference NVIDIA SDXL SCC - env: - GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - run: | - cm run script --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --docker_dt=yes --quiet --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --precision=float16 --clean | - cm run script --tags=generate,inference,submission --clean --preprocess_submission=yes --run-checker --tar=yes --env.CM_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --env.CM_DETERMINE_MEMORY_CONFIGURATION=yes --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons | - cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet + pip install --upgrade cm4mlops + pip install tabulate + cm pull repo + cm run script --tags=run-mlperf,inference,_find-performance,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --precision=float16 --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --hw_name=go-spr --clean + cm run script --tags=run-mlperf,inference,_r4.1-dev,_short,_scc24-base --model=sdxl --implementation=nvidia --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --precision=${{ matrix.precision }} --docker --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --docker_dt=yes --quiet --results_dir=$HOME/scc_gh_action_results --submission_dir=$HOME/scc_gh_action_submissions --precision=float16 --env.CM_MLPERF_MODEL_SDXL_DOWNLOAD_TO_HOST=yes --clean + cm run script --tags=generate,inference,submission --clean --preprocess_submission=yes --run-checker --tar=yes --env.CM_TAR_OUTFILE=submission.tar.gz --division=open --category=datacenter --run_style=test --adr.submission-checker.tags=_short-run --quiet --submitter=MLCommons --submission_dir=$HOME/scc_gh_action_submissions --results_dir=$HOME/scc_gh_action_results/test_results + cm run script --tags=push,github,mlperf,inference,submission --repo_url=https://github.com/gateoverflow/cm4mlperf-inference --repo_branch=mlperf-inference-results-scc24 --commit_message="Results from self hosted Github actions - NVIDIARTX4090" --quiet --submission_dir=$HOME/scc_gh_action_submissions diff --git a/README.md b/README.md index 06c2cd2bc..deff5458c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,5 @@ ## Unified and cross-platform CM interface for DevOps, MLOps and MLPerf -[![arXiv](https://img.shields.io/badge/arXiv-2406.16791-b31b1b.svg)](https://arxiv.org/abs/2406.16791) [![License](https://img.shields.io/badge/License-Apache%202.0-green)](LICENSE.md) [![Python Version](https://img.shields.io/badge/python-3+-blue.svg)](https://github.com/mlcommons/ck/tree/master/cm/cmind) [![Powered by CM](https://img.shields.io/badge/Powered_by-MLCommons%20CM-blue)](https://github.com/mlcommons/ck). @@ -141,8 +140,7 @@ cm run script \ ## CM concepts -* https://doi.org/10.5281/zenodo.8105339 -* https://arxiv.org/abs/2406.16791 +Check our [ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339). ## Authors diff --git a/script/app-image-corner-detection/_cm.json b/script/app-image-corner-detection/_cm.json deleted file mode 100644 index 405654f5e..000000000 --- a/script/app-image-corner-detection/_cm.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "alias": "app-image-corner-detection", - "automation_alias": "script", - "automation_uid": "5b4e0237da074764", - "category": "Modular application pipeline", - "deps": [ - {"tags":"detect,os"}, - {"tags":"detect,cpu"} - ], - "posthook_deps": [ - { - "skip_if_env": { - "CM_SKIP_COMPILE": [ - "on" - ] - }, - "tags": "compile,cpp-program" - }, - { - "skip_if_env": { - "CM_SKIP_RUN": [ - "on" - ] - }, - "tags": "benchmark-program" - } - ], - "tags": [ - "app", - "image", - "corner-detection" - ], - "uid": "998ffee0bc534d0a" -} diff --git a/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json b/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json index 1d00f3c81..03e91596a 100644 --- a/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json +++ b/script/app-mlperf-inference-ctuning-cpp-tflite/_cm.json @@ -120,7 +120,7 @@ { "names": [ "tensorflow", - "tflite" + "tflite" ], "tags": "get,tensorflow,lib,_tflite" }, diff --git a/script/app-mlperf-inference-mlcommons-python/_cm.yaml b/script/app-mlperf-inference-mlcommons-python/_cm.yaml index df7a5a1d7..b648dc54e 100644 --- a/script/app-mlperf-inference-mlcommons-python/_cm.yaml +++ b/script/app-mlperf-inference-mlcommons-python/_cm.yaml @@ -295,7 +295,6 @@ deps: enable_if_env: CM_MLPERF_BACKEND: - tf - - tflite ## NCNN - tags: get,generic-python-lib,_package.ncnn @@ -305,15 +304,12 @@ deps: CM_MLPERF_BACKEND: - ncnn - # - tags: get,generic-python-lib - # names: - # - ml-engine-tflite - # enable_if_env: - # CM_MLPERF_BACKEND: - # - tflite - # CM_MLPERF_DEVICE: - # - tpu - + - tags: get,tensorflow,lib,_tflite + names: + - ml-engine-tflite + enable_if_env: + CM_MLPERF_BACKEND: + - tflite ######################################################################## @@ -424,6 +420,8 @@ deps: - "on" CM_MLPERF_INFERENCE_API_SERVER: - "on" + CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: + - 'yes' ## mixtral-8x7b - tags: get,ml-model,mixtral @@ -566,6 +564,9 @@ deps: CM_MODEL: - 3d-unet-99 - 3d-unet-99.9 + skip_if_env: + CM_MLPERF_DATASET_3DUNET_DOWNLOAD_TO_HOST: + - 'yes' ## Librispeech for rnnt - tags: get,dataset,librispeech,preprocessed @@ -701,11 +702,6 @@ variations: torchvision: tags: _rocm - rocm,sdxl: - add_deps: - mlperf-implementation: - tags: _repo.https://github.com/gateoverflow/inference - ray: group: framework add_deps_recursive: @@ -956,6 +952,7 @@ variations: llama2-70b_: env: CM_MLPERF_MODEL_SKIP_BATCHING: false + CM_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/b18ff890ff559e21d2e27a3b54cd26467ac1fd9e/script/get-ml-model-llama2/_cm.json#L51" deps: - tags: get,generic-python-lib,_package.transformers names: @@ -981,6 +978,8 @@ variations: - tags: get,generic-python-lib,_package.nltk names: - nltk + version_max: 3.8.1 + version_max_usable: 3.8.1 - tags: get,generic-python-lib,_package.numpy names: - numpy @@ -1075,6 +1074,10 @@ variations: CM_MLPERF_MODEL_SKIP_BATCHING: true deps: - tags: get,generic-python-lib,_package.nibabel + - tags: get,generic-python-lib,_package.scipy + names: + - scipy + version: 1.10.1 dlrm-v2-99.9: group: models @@ -1106,7 +1109,6 @@ variations: - torch - pytorch - ml-engine-pytorch - version: "1.13.1" - tags: get,generic-python-lib,_mlperf_logging - tags: get,generic-python-lib,_opencv-python - tags: get,generic-python-lib,_tensorboard @@ -1120,15 +1122,11 @@ variations: - tags: get,generic-python-lib,_package.pyre-extensions - tags: get,generic-python-lib,_package.torchsnapshot - tags: get,generic-python-lib,_package.torchmetrics - version: "0.11.0" - tags: get,generic-python-lib,_package.torchrec - version: "0.3.2" - tags: get,generic-python-lib,_package.fbgemm-gpu - version: "0.3.2" - tags: get,generic-python-lib,_package.fbgemm-gpu-cpu - version: "0.3.2" - - tags: get,generic-python-lib,_package.torch - version: "1.13.1" + - tags: get,generic-python-lib,_package.fvcore + - tags: set,user,limit,_large-nofile rnnt: diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index 9d0a64955..ca97bbf0d 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -75,7 +75,7 @@ def preprocess(i): else: env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --mlperf_conf "+ x + env['CM_MLPERF_CONF'] + x - if env.get('CM_NETWORK_LOADGEN', '') != "lon" and env.get('CM_MLPERF_INFERENCE_API_SERVER','')=='': + if env.get('CM_NETWORK_LOADGEN', '') != "lon" and env.get('CM_MLPERF_INFERENCE_API_SERVER','')=='' and "llama2-70b" not in env['CM_MODEL']: env['MODEL_DIR'] = env.get('CM_ML_MODEL_PATH') if not env['MODEL_DIR']: env['MODEL_DIR'] = os.path.dirname(env.get('CM_MLPERF_CUSTOM_MODEL_PATH', env.get('CM_ML_MODEL_FILE_WITH_PATH'))) @@ -318,7 +318,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_optio #env['CM_MLPERF_INFERENCE_API_SERVER'] = "http://localhost:8000" cmd += f" --api-server {env['CM_MLPERF_INFERENCE_API_SERVER']} --model-path {env['CM_VLLM_SERVER_MODEL_NAME']} --api-model-name {env['CM_VLLM_SERVER_MODEL_NAME']} --vllm " else: - cmd += f" --model-path {env['MODEL_DIR']}" + cmd += f" --model-path {env['LLAMA2_CHECKPOINT_PATH']}" if env.get('CM_MLPERF_INFERENCE_NUM_WORKERS', '') != '': cmd += f" --num-workers {env['CM_MLPERF_INFERENCE_NUM_WORKERS']}" @@ -348,7 +348,7 @@ def get_run_cmd_reference(os_info, env, scenario_extra_options, mode_extra_optio cmd = env['CM_PYTHON_BIN_WITH_PATH']+ " run.py --backend=" + backend + " --scenario="+env['CM_MLPERF_LOADGEN_SCENARIO'] + \ env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] + \ " --model="+env['CM_ML_MODEL_FILE_WITH_PATH'] + \ - " --preprocessed_data_dir="+env['CM_DATASET_PREPROCESSED_PATH'] + \ + " --preprocessed_data_dir="+env['CM_DATASET_KITS19_PREPROCESSED_PATH'] + \ scenario_extra_options + mode_extra_options + dataset_options env['LOG_PATH'] = env['CM_MLPERF_OUTPUT_DIR'] diff --git a/script/app-mlperf-inference-nvidia/customize.py b/script/app-mlperf-inference-nvidia/customize.py index 917102701..f7c116b14 100644 --- a/script/app-mlperf-inference-nvidia/customize.py +++ b/script/app-mlperf-inference-nvidia/customize.py @@ -232,6 +232,7 @@ def preprocess(i): for folder in folders: onnx_model_path = os.path.join(env['MLPERF_SCRATCH_PATH'], 'models', 'SDXL', 'onnx_models', folder, 'model.onnx') if not os.path.exists(onnx_model_path): + env['CM_REQUIRE_SDXL_MODEL_DOWNLOAD'] = 'yes' cmds.append(f"make download_model BENCHMARKS='{model_name}'") break else: diff --git a/script/app-mlperf-inference/_cm.yaml b/script/app-mlperf-inference/_cm.yaml index c51b5b515..2b8186c88 100644 --- a/script/app-mlperf-inference/_cm.yaml +++ b/script/app-mlperf-inference/_cm.yaml @@ -657,6 +657,14 @@ variations: - 3d-unet-accuracy-script tags: run,accuracy,mlperf,_kits19,_int8 + 3d-unet_,reference: + docker: + deps: + - enable_if_env: + CM_MLPERF_DATASET_3DUNET_DOWNLOAD_TO_HOST: + - 'yes' + tags: get,dataset,kits19,preprocessed + sdxl: group: model @@ -754,6 +762,14 @@ variations: add_deps_recursive: mlperf-inference-implementation: tags: _llama2-70b-99.9 + + llama2-70b_,reference: + docker: + deps: + - enable_if_env: + CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST: + - 'yes' + tags: get,ml-model,llama2 mixtral-8x7b: group: @@ -1588,6 +1604,7 @@ docker: - "${{ DLRM_DATA_PATH }}:/home/mlperf_inf_dlrmv2" - "${{ CM_NVIDIA_LLAMA_DATASET_FILE_PATH }}:${{ CM_NVIDIA_LLAMA_DATASET_FILE_PATH }}" - "${{ SDXL_CHECKPOINT_PATH }}:${{ SDXL_CHECKPOINT_PATH }}" + - "${{ CM_DATASET_KITS19_PREPROCESSED_PATH }}:${{ CM_DATASET_KITS19_PREPROCESSED_PATH }}" skip_run_cmd: 'no' shm_size: '32gb' interactive: True diff --git a/script/build-dockerfile/_cm.yaml b/script/build-dockerfile/_cm.yaml index 5436c6ddd..da00f24de 100644 --- a/script/build-dockerfile/_cm.yaml +++ b/script/build-dockerfile/_cm.yaml @@ -18,6 +18,7 @@ default_env: ' CM_DOCKER_OS: ubuntu + CM_DOCKER_NOT_PULL_UPDATE: False input_mapping: build: CM_BUILD_DOCKER_IMAGE @@ -48,6 +49,7 @@ input_mapping: script_tags: CM_DOCKER_RUN_SCRIPT_TAGS skip_cm_sys_upgrade: CM_DOCKER_SKIP_CM_SYS_UPGRADE push_image: CM_DOCKER_PUSH_IMAGE + docker_not_pull_update: CM_DOCKER_NOT_PULL_UPDATE new_env_keys: - CM_DOCKERFILE_* diff --git a/script/build-dockerfile/customize.py b/script/build-dockerfile/customize.py index d99e24392..896454fb9 100644 --- a/script/build-dockerfile/customize.py +++ b/script/build-dockerfile/customize.py @@ -2,6 +2,7 @@ import cmind as cm import os import json +import re def preprocess(i): @@ -54,6 +55,17 @@ def preprocess(i): if env.get("CM_MLOPS_REPO", "") != "": cm_mlops_repo = env["CM_MLOPS_REPO"] + # the below pattern matches both the HTTPS and SSH git link formats + git_link_pattern = r'^(https?://github\.com/([^/]+)/([^/]+)\.git|git@github\.com:([^/]+)/([^/]+)\.git)$' + if match := re.match(git_link_pattern, cm_mlops_repo): + if match.group(2) and match.group(3): + repo_owner = match.group(2) + repo_name = match.group(3) + elif match.group(4) and match.group(5): + repo_owner = match.group(4) + repo_name = match.group(5) + cm_mlops_repo = f"{repo_owner}@{repo_name}" + print(f"Converted repo format from {env['CM_MLOPS_REPO']} to {cm_mlops_repo}") else: cm_mlops_repo = "mlcommons@cm4mlops" @@ -221,12 +233,19 @@ def preprocess(i): skip_extra = False if 'CM_DOCKER_RUN_CMD' not in env: + env['CM_DOCKER_RUN_CMD']="" if 'CM_DOCKER_RUN_SCRIPT_TAGS' not in env: - env['CM_DOCKER_RUN_CMD']="cm version" + env['CM_DOCKER_RUN_CMD']+="cm version" skip_extra = True else: - env['CM_DOCKER_RUN_CMD']="cm run script --tags=" + env['CM_DOCKER_RUN_SCRIPT_TAGS']+ ' --quiet' - + if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False')).lower() not in ["yes", "1", "true"]: + env['CM_DOCKER_RUN_CMD'] += "cm pull repo && " + env['CM_DOCKER_RUN_CMD'] += "cm run script --tags=" + env['CM_DOCKER_RUN_SCRIPT_TAGS']+ ' --quiet' + else: + if str(env.get('CM_DOCKER_NOT_PULL_UPDATE', 'False')).lower() not in ["yes", "1", "true"]: + env['CM_DOCKER_RUN_CMD']="cm pull repo && " + env['CM_DOCKER_RUN_CMD'] + + print(env['CM_DOCKER_RUN_CMD']) fake_run = env.get("CM_DOCKER_FAKE_RUN_OPTION"," --fake_run") + dockerfile_env_input_string fake_run = fake_run + " --fake_deps" if env.get('CM_DOCKER_FAKE_DEPS') else fake_run diff --git a/script/detect-sudo/customize.py b/script/detect-sudo/customize.py index 2f6fa411a..dbc9b8970 100644 --- a/script/detect-sudo/customize.py +++ b/script/detect-sudo/customize.py @@ -1,5 +1,8 @@ from cmind import utils import os, subprocess +import select +import sys +import grp def preprocess(i): @@ -15,15 +18,103 @@ def preprocess(i): if prompt_sudo() == 0: env['CM_SUDO_USER'] = "yes" + if os.geteuid() == 0: + env['CM_SUDO'] = '' #root user does not need sudo + else: + if can_execute_sudo_without_password(): + env['CM_SUDO_USER'] = "yes" + env['CM_SUDO'] = 'sudo' + else: + env['CM_SUDO_USER'] = "no" + env['CM_SUDO'] = '' return {'return':0} +def can_execute_sudo_without_password(): + try: + # Run a harmless command using sudo + result = subprocess.run( + ['sudo', '-n', 'true'], # -n prevents sudo from prompting for a password + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + + # Check the return code; if it's 0, sudo executed without needing a password + if result.returncode == 0: + return True + else: + return False + except Exception as e: + print(f"An error occurred: {e}") + return False + + + +def reset_terminal(): + """Reset terminal to default settings.""" + subprocess.run(['stty', 'sane']) + +def prompt_retry(timeout=10, default_retry=False): + """Prompt the user with a yes/no question to retry the command, with a 10-second timeout.""" + + # Check if we're in an interactive terminal + if not sys.stdin.isatty(): + if default_retry: + print(f"Non-interactive environment detected. Automatically retrying.") + else: + print(f"Non-interactive environment detected. Skipping retry.") + return default_retry # Automatically use the default in non-interactive terminals + + print(f"Timeout occurred. Do you want to try again? (y/n): ", end='', flush=True) + + # Use select to wait for user input with a timeout + ready, _, _ = select.select([sys.stdin], [], [], timeout) + + if ready: + answer = sys.stdin.readline().strip().lower() + if answer in ['y', 'n']: + return answer == 'y' # Return True if 'y', False if 'n' + print("\nInvalid input. Please enter 'y' or 'n'.") + return prompt_retry(timeout) # Re-prompt on invalid input + else: + print("\nNo input received in 10 seconds. Exiting.") + return False # No input within the timeout, so don't retry + +def is_user_in_sudo_group(): + """Check if the current user is in the 'sudo' group.""" + try: + sudo_group = grp.getgrnam('sudo').gr_mem + return os.getlogin() in sudo_group + except KeyError: + # 'sudo' group doesn't exist (might be different on some systems) + return False + except Exception as e: + print(f"Error checking sudo group: {str(e)}") + return False + def prompt_sudo(): - if os.geteuid() != 0: + if os.geteuid() != 0 or not is_user_in_sudo_group(): # No sudo required for root user msg = "[sudo] password for %u:" - return subprocess.check_call("sudo echo 'Check sudo' -p '%s'" % msg, shell=True) - return -1 + while True: + try: + r = subprocess.check_output(["sudo", "-p", msg, "echo", "Check sudo"], + stderr=subprocess.STDOUT, timeout=20) + print(r.decode('utf-8')) # Decode bytes to string + return 0 + except subprocess.TimeoutExpired: + reset_terminal() # Reset terminal to sane state + if not prompt_retry(): # If the user chooses not to retry or times out + return -1 + except subprocess.CalledProcessError as e: + print(f"Command failed: {e.output.decode('utf-8')}") + reset_terminal() # Reset terminal in case of failure + return -1 + except Exception as e: + print(f"An error occurred: {str(e)}") + reset_terminal() # Always reset terminal after error + return -1 + return 0 def postprocess(i): diff --git a/script/get-cuda-devices/_cm.yaml b/script/get-cuda-devices/_cm.yaml index 64d49d95b..e0d348b83 100644 --- a/script/get-cuda-devices/_cm.yaml +++ b/script/get-cuda-devices/_cm.yaml @@ -55,3 +55,6 @@ variations: - tags: get,generic-python-lib,_package.pycuda names: - pycuda + - tags: get,generic-python-lib,_package.numpy + names: + - numpy diff --git a/script/get-dlrm-data-mlperf-inference/_cm.yaml b/script/get-dlrm-data-mlperf-inference/_cm.yaml index ab0e46e8b..f287e37db 100644 --- a/script/get-dlrm-data-mlperf-inference/_cm.yaml +++ b/script/get-dlrm-data-mlperf-inference/_cm.yaml @@ -17,7 +17,7 @@ new_env_keys: input_mapping: dlrm_data_path: CM_DLRM_DATA_PATH criteo_day23_raw_data_path: CM_CRITEO_DAY23_RAW_DATA_PATH - prehook_deps: +prehook_deps: - tags: get,ml-model,dlrm,_pytorch enable_if_env: CM_DLRM_MODEL_DOWNLOAD: diff --git a/script/get-generic-python-lib/_cm.json b/script/get-generic-python-lib/_cm.json index fa78d0d96..b3757091c 100644 --- a/script/get-generic-python-lib/_cm.json +++ b/script/get-generic-python-lib/_cm.json @@ -73,6 +73,16 @@ "tags_help": "get generic-python-lib", "uid": "94b62a682bc44791", "variations": { + "index-url.#": { + "env": { + "CM_GENERIC_PYTHON_PIP_INDEX_URL": "#" + } + }, + "extra-index-url.#": { + "env": { + "CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL": "#" + } + }, "Pillow": { "env": { "CM_GENERIC_PYTHON_PACKAGE_NAME": "Pillow" diff --git a/script/get-generic-sys-util/_cm.json b/script/get-generic-sys-util/_cm.json index e8ed21f1f..90fba4fc9 100644 --- a/script/get-generic-sys-util/_cm.json +++ b/script/get-generic-sys-util/_cm.json @@ -564,7 +564,8 @@ }, "rsync": { "env": { - "CM_SYS_UTIL_NAME": "rsync" + "CM_SYS_UTIL_NAME": "rsync", + "CM_SYS_UTIL_CHECK_CMD": "rsync --version" }, "state": { "rsync": { diff --git a/script/get-mlperf-inference-loadgen/_cm.yaml b/script/get-mlperf-inference-loadgen/_cm.yaml index 09d66d544..a097a1edd 100644 --- a/script/get-mlperf-inference-loadgen/_cm.yaml +++ b/script/get-mlperf-inference-loadgen/_cm.yaml @@ -40,13 +40,18 @@ deps: - CM_MLPERF_INFERENCE_LOADGEN_DOWNLOAD_URL - names: - compiler - skip_if_env: + skip_if_any_env: CM_HOST_OS_TYPE: - windows + CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: + - 'yes' tags: get,compiler - enable_if_env: CM_HOST_OS_TYPE: - windows + skip_if_env: + CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: + - 'yes' names: - compiler tags: get,cl @@ -94,6 +99,11 @@ tags: - mlcommons variations: + from-pip: + env: + CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP: 'yes' + deps: + - tags: get,generic-python-lib,_package.mlcommons-loadgen copy: add_deps: inference-src-loadgen: diff --git a/script/get-mlperf-inference-loadgen/customize.py b/script/get-mlperf-inference-loadgen/customize.py index 1298e7348..077a6fae2 100644 --- a/script/get-mlperf-inference-loadgen/customize.py +++ b/script/get-mlperf-inference-loadgen/customize.py @@ -4,9 +4,10 @@ def preprocess(i): os_info = i['os_info'] + env = i['env'] -# if os_info['platform'] == 'windows': -# return {'return':1, 'error': 'Windows is not supported in this script yet'} + if env.get('CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP', '') == 'yes': + i['run_script_input']['script_name'] = "donotrun" return {'return':0} @@ -15,6 +16,10 @@ def postprocess(i): os_info = i['os_info'] env = i['env'] + if env.get('CM_TMP_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP', '') == 'yes': + return {'return':0} + + for key in ['+PYTHONPATH', '+C_INCLUDE_PATH', '+CPLUS_INCLUDE_PATH', '+LD_LIBRARY_PATH', '+DYLD_FALLBACK_LIBRARY_PATH']: # 20221024: we save and restore env in the main script and can clean env here for determinism # if key not in env: diff --git a/script/get-mlperf-inference-sut-description/_cm.json b/script/get-mlperf-inference-sut-description/_cm.json index f9c1b0345..e5b8723c4 100644 --- a/script/get-mlperf-inference-sut-description/_cm.json +++ b/script/get-mlperf-inference-sut-description/_cm.json @@ -22,7 +22,13 @@ "names": [ "compiler" ], - "tags": "get,compiler" + "tags": "get,compiler", + "skip_if_env": { + "CM_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP": + [ + "yes" + ] + } }, { "tags": "get,cuda-devices,_with-pycuda", diff --git a/script/get-mlperf-inference-sut-description/customize.py b/script/get-mlperf-inference-sut-description/customize.py index cc36483c6..faf8556b6 100644 --- a/script/get-mlperf-inference-sut-description/customize.py +++ b/script/get-mlperf-inference-sut-description/customize.py @@ -8,7 +8,7 @@ def preprocess(i): state = i['state'] os_info = i['os_info'] - submitter = env.get('CM_MLPERF_SUBMITTER', 'CTuning') + submitter = env.get('CM_MLPERF_SUBMITTER', 'MLCommons') auto_detected_hw_name = False if env.get('CM_HW_NAME', '') == '': diff --git a/script/get-mlperf-inference-sut-description/detect_memory.sh b/script/get-mlperf-inference-sut-description/detect_memory.sh index edc338c79..8a65daa13 100644 --- a/script/get-mlperf-inference-sut-description/detect_memory.sh +++ b/script/get-mlperf-inference-sut-description/detect_memory.sh @@ -1,7 +1,7 @@ #!/bin/bash if [[ ${CM_SUDO_USER} == "yes" ]]; then - sudo dmidecode -t memory > meminfo.out + ${CM_SUDO} dmidecode -t memory > meminfo.out ${CM_PYTHON_BIN_WITH_PATH} ${CM_TMP_CURRENT_SCRIPT_PATH}/get_memory_info.py fi test $? -eq 0 || return $? diff --git a/script/get-preprocessed-dataset-kits19/customize.py b/script/get-preprocessed-dataset-kits19/customize.py index 8de059375..c8a0914d2 100644 --- a/script/get-preprocessed-dataset-kits19/customize.py +++ b/script/get-preprocessed-dataset-kits19/customize.py @@ -17,5 +17,6 @@ def postprocess(i): env = i['env'] if 'CM_DATASET_PREPROCESSED_PATH' not in env: env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() + env['CM_DATASET_KITS19_PREPROCESSED_PATH'] = env['CM_DATASET_PREPROCESSED_PATH'] return {'return': 0} diff --git a/script/process-mlperf-accuracy/_cm.json b/script/process-mlperf-accuracy/_cm.json index 7acbd4adf..cd4028a53 100644 --- a/script/process-mlperf-accuracy/_cm.json +++ b/script/process-mlperf-accuracy/_cm.json @@ -369,6 +369,15 @@ }, { "tags": "get,generic-python-lib,_package.ijson" + }, + { + "tags": "get,generic-python-lib,_package.numpy", + "version_max": "1.22", + "version_max_usable": "1.22", + "names": [ + "pip-package", + "numpy" + ] } ], "env": { diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index 9158fde9a..84114d7af 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -221,10 +221,8 @@ def postprocess(i): lines = docker_out.split("\n") for line in lines: - print(f"line = {line}") if line.startswith("ID="): ID = line[3:] - print(f"My id = {ID}") env['CM_DOCKER_CONTAINER_ID'] = ID print(docker_out) diff --git a/script/run-mlperf-inference-app/_cm.yaml b/script/run-mlperf-inference-app/_cm.yaml index efb637150..cefdf55d4 100644 --- a/script/run-mlperf-inference-app/_cm.yaml +++ b/script/run-mlperf-inference-app/_cm.yaml @@ -57,6 +57,7 @@ input_mapping: framework: CM_MLPERF_BACKEND gpu_name: CM_NVIDIA_GPU_NAME hw_name: CM_HW_NAME + pip_loadgen: CM_MLPERF_INFERENCE_LOADGEN_INSTALL_FROM_PIP hw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA imagenet_path: IMAGENET_PATH implementation: CM_MLPERF_IMPLEMENTATION diff --git a/script/run-mlperf-inference-submission-checker/customize.py b/script/run-mlperf-inference-submission-checker/customize.py index 51bb38cfa..5c863bfca 100644 --- a/script/run-mlperf-inference-submission-checker/customize.py +++ b/script/run-mlperf-inference-submission-checker/customize.py @@ -7,6 +7,8 @@ def preprocess(i): os_info = i['os_info'] env = i['env'] + q = '"' if os_info['platform'] == 'windows' else "'" + submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") version = env.get('CM_MLPERF_SUBMISSION_CHECKER_VERSION','') @@ -49,11 +51,11 @@ def preprocess(i): extra_args = ' ' + env.get('CM_MLPERF_SUBMISSION_CHECKER_EXTRA_ARGS','') - x_submitter = ' --submitter "' + submitter + '" ' if submitter!='' else '' + x_submitter = ' --submitter ' + q + submitter + q if submitter!='' else '' x_version = ' --version ' + version +' ' if version!='' else '' - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + ' \'' + submission_checker_file + '\' --input \'' + submission_dir + '\'' + \ + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + ' '+ q + submission_checker_file + q +' --input ' + q + submission_dir + q + \ x_submitter + \ x_version + \ skip_compliance + extra_map + power_check + extra_args @@ -65,7 +67,7 @@ def preprocess(i): "generate_final_report.py") env['CM_RUN_CMD'] = CMD print(CMD) - env['CM_POST_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] + ' \'' + report_generator_file + '\' --input summary.csv' + \ + env['CM_POST_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] +' ' + q + report_generator_file + q + ' --input summary.csv ' + \ x_version + \ x_submission_repository diff --git a/script/set-user-limits/_cm.yaml b/script/set-user-limits/_cm.yaml new file mode 100644 index 000000000..6097298c2 --- /dev/null +++ b/script/set-user-limits/_cm.yaml @@ -0,0 +1,14 @@ +alias: set-user-limits +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false +tags: +- set +- user +- limits +- limit +uid: 49dd1856b37342ac +variations: + large-nofile: + env: + CM_ULIMIT_NOFILE: 9999 diff --git a/script/set-user-limits/customize.py b/script/set-user-limits/customize.py new file mode 100644 index 000000000..3b67e410b --- /dev/null +++ b/script/set-user-limits/customize.py @@ -0,0 +1,29 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + cmds = [] + + if env.get('CM_ULIMIT_NOFILE', '') != '': + cmds.append(f"ulimit -n {env['CM_ULIMIT_NOFILE']}") + + env['CM_RUN_CMD'] = " && ".join(cmds) + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/set-user-limits/run.sh b/script/set-user-limits/run.sh new file mode 100644 index 000000000..4c23c380e --- /dev/null +++ b/script/set-user-limits/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +echo "Running: " +echo "${CM_RUN_CMD}" +echo "" + +if [[ ${CM_FAKE_RUN} != "yes" ]]; then + eval "${CM_RUN_CMD}" + test $? -eq 0 || exit 1 +fi