From 52814649e582425a8a5b4273d514c4030442dd0d Mon Sep 17 00:00:00 2001 From: Sakshi-1234 Date: Sat, 29 Jun 2024 11:59:47 +0530 Subject: [PATCH 1/3] refactor: replace print wih logging --- automation/cache/module.py | 20 +- automation/cache/module_misc.py | 9 +- automation/cfg/module.py | 10 +- automation/challenge/module.py | 4 +- automation/contributor/module.py | 24 +- automation/data/module.py | 4 +- automation/docker/module.py | 4 +- automation/docs/module.py | 4 +- automation/experiment/module.py | 98 ++++---- automation/project/module.py | 4 +- automation/report/module.py | 4 +- automation/script/module.py | 230 +++++++++--------- automation/script/module_help.py | 43 ++-- automation/script/module_misc.py | 25 +- automation/script/template-ae-python/main.py | 10 +- .../script/template-python/customize.py | 12 +- automation/script/template-python/main.py | 10 +- .../script/template-pytorch/customize.py | 12 +- automation/script/template-pytorch/main.py | 16 +- automation/utils/module.py | 20 +- automation/utils/module_cfg.py | 16 +- .../customize.py | 12 +- .../src/onnx_classify.py | 40 +-- .../src/pytorch_classify_preprocessed.py | 12 +- .../src/classify.py | 62 ++--- .../app-image-corner-detection/customize.py | 4 +- .../app-loadgen-generic-python/customize.py | 8 +- .../src/backend_onnxruntime.py | 4 +- .../src/backend_pytorch.py | 16 +- script/app-loadgen-generic-python/src/main.py | 12 +- .../app-loadgen-generic-python/src/utils.py | 2 +- .../app-mlperf-inference-dummy/customize.py | 6 +- .../customize.py | 7 +- .../customize.py | 3 +- .../nvidia/retinanet.py | 11 +- .../customize.py | 5 +- .../app-mlperf-inference-redhat/customize.py | 5 +- .../app-mlperf-inference/build_dockerfiles.py | 7 +- script/app-mlperf-inference/customize.py | 20 +- .../app-mlperf-training-nvidia/customize.py | 3 +- .../customize.py | 2 +- .../app-stable-diffusion-onnx-py/process.py | 10 +- .../customize.py | 4 +- script/benchmark-program/customize.py | 16 +- script/build-docker-image/customize.py | 24 +- script/calibrate-model-for.qaic/customize.py | 4 +- script/compile-model-for.qaic/customize.py | 8 +- script/compile-program/customize.py | 4 +- script/create-custom-cache-entry/customize.py | 4 +- .../customize.py | 4 +- .../customize.py | 4 +- script/create-patch/customize.py | 12 +- script/detect-cpu/customize.py | 5 +- script/download-file/customize.py | 14 +- script/extract-file/customize.py | 10 +- .../customize.py | 39 +-- .../customize.py | 28 +-- .../generate-mlperf-tiny-report/customize.py | 5 +- .../customize.py | 16 +- script/get-android-sdk/customize.py | 12 +- script/get-aria2/customize.py | 6 +- script/get-aws-cli/customize.py | 4 +- script/get-bazel/customize.py | 4 +- script/get-cl/customize.py | 6 +- script/get-cmake/customize.py | 4 +- script/get-conda/customize.py | 6 +- script/get-cuda-devices/customize.py | 4 +- script/get-cuda/customize.py | 10 +- script/get-cudnn/customize.py | 8 +- script/get-dataset-cnndm/customize.py | 4 +- script/get-dataset-coco/customize.py | 20 +- script/get-dataset-coco2014/customize.py | 4 +- .../filter.py | 4 +- script/get-dataset-openimages/customize.py | 8 +- script/get-docker/customize.py | 4 +- script/get-gcc/customize.py | 4 +- script/get-generic-python-lib/customize.py | 9 +- script/get-generic-sys-util/customize.py | 8 +- script/get-github-cli/customize.py | 5 +- script/get-go/customize.py | 4 +- script/get-ipol-src/customize.py | 10 +- script/get-java/customize.py | 12 +- script/get-javac/customize.py | 12 +- script/get-llvm/customize.py | 4 +- .../get-ml-model-3d-unet-kits19/customize.py | 4 +- .../customize.py | 4 +- script/get-ml-model-gptj/convert_gptj_ckpt.py | 9 +- script/get-ml-model-gptj/customize.py | 4 +- .../download_model.py | 17 +- script/get-ml-model-mobilenet/customize.py | 4 +- .../download_sparse.py | 4 +- .../nvidia_patch_retinanet_efficientnms.py | 6 +- .../node-precision-info.py | 5 +- script/get-ml-model-rnnt/customize.py | 4 +- .../customize.py | 4 +- .../customize.py | 7 +- .../customize.py | 14 +- script/get-onnxruntime-prebuilt/customize.py | 8 +- script/get-openssl/customize.py | 4 +- .../customize.py | 6 +- .../src/generic_preprocess.py | 6 +- .../preprocess_object_detection_dataset.py | 5 +- .../customize.py | 4 +- .../customize.py | 4 +- .../customize.py | 4 +- .../customize.py | 4 +- script/get-python3/customize.py | 4 +- script/get-qaic-apps-sdk/customize.py | 4 +- script/get-qaic-platform-sdk/customize.py | 4 +- script/get-rclone/customize.py | 8 +- script/get-rocm/customize.py | 4 +- script/get-sys-utils-cm/customize.py | 18 +- script/get-sys-utils-min/customize.py | 17 +- script/get-tensorrt/customize.py | 4 +- script/get-terraform/customize.py | 4 +- script/get-tvm-model/customize.py | 4 +- script/get-tvm-model/process.py | 13 +- script/gui/app.py | 4 +- script/gui/customize.py | 8 +- script/gui/script.py | 10 +- script/gui/tests/generate_password.py | 5 +- .../customize.py | 30 +-- .../customize.py | 46 ++-- .../customize.py | 16 +- script/install-bazel/customize.py | 4 +- script/install-cmake-prebuilt/customize.py | 14 +- script/install-gcc-src/customize.py | 4 +- .../customize.py | 3 +- script/install-gflags/customize.py | 4 +- script/install-llvm-prebuilt/customize.py | 22 +- script/install-openssl/customize.py | 4 +- script/install-python-src/customize.py | 4 +- script/install-python-venv/customize.py | 4 +- script/launch-benchmark/customize.py | 4 +- script/launch-benchmark/tests/debug.py | 4 +- .../customize.py | 4 +- script/print-any-text/customize.py | 6 +- script/print-croissant-desc/code.py | 16 +- script/print-hello-world-py/code.py | 5 +- script/process-ae-users/code.py | 16 +- script/process-mlperf-accuracy/customize.py | 14 +- script/prune-bert-models/customize.py | 14 +- script/publish-results-to-dashboard/code.py | 16 +- script/push-csv-to-spreadsheet/google_api.py | 4 +- .../customize.py | 8 +- .../main.py | 10 +- script/run-all-mlperf-models/customize.py | 6 +- script/run-docker-container/customize.py | 40 +-- script/run-mlperf-inference-app/customize.py | 28 +-- .../run-mlperf-inference-app/run_mobilenet.py | 6 +- .../customize.py | 6 +- .../code.py | 16 +- script/run-mlperf-power-server/customize.py | 4 +- script/run-terraform/customize.py | 6 +- script/set-venv/customize.py | 16 +- script/tar-my-folder/customize.py | 6 +- script/test-cm-core/src/test_cm.py | 3 +- script/test-cm-core/src/test_search_speed.py | 6 +- script/test-cm-script-pipeline/customize.py | 20 +- script/test-debug/_demo.py | 6 +- script/test-debug/customize.py | 20 +- script/test-debug/python/main.py | 12 +- .../customize.py | 4 +- setup.py | 5 +- tests/test_cm.py | 3 +- tests/test_search_speed.py | 6 +- 166 files changed, 985 insertions(+), 959 deletions(-) diff --git a/automation/cache/module.py b/automation/cache/module.py index 0f0f2be75f..395381da92 100644 --- a/automation/cache/module.py +++ b/automation/cache/module.py @@ -1,5 +1,5 @@ import os - +import logging from cmind.automation import Automation from cmind import utils @@ -47,7 +47,7 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + logging.info(json.dumps(i, indent=2)) return {'return':0} @@ -121,12 +121,12 @@ def show(self, i): version = meta.get('version','') if console: - print ('') + logging.info ('') # print ('* UID: {}'.format(uid)) - print ('* Tags: {}'.format(','.join(tags))) - print (' Path: {}'.format(path)) + logging.info ('* Tags: {}'.format(','.join(tags))) + logging.info (' Path: {}'.format(path)) if version!='': - print (' Version: {}'.format(version)) + logging.info (' Version: {}'.format(version)) if show_env and console: path_to_cached_state_file = os.path.join(path, 'cm-cached-state.json') @@ -140,13 +140,13 @@ def show(self, i): new_env = cached_state.get('new_env', {}) if len(new_env)>0: - print (' New env:') - print (json.dumps(new_env, indent=6, sort_keys=True).replace('{','').replace('}','')) + logging.info (' New env:') + logging.info (json.dumps(new_env, indent=6, sort_keys=True).replace('{','').replace('}','')) new_state = cached_state.get('new_state', {}) if len(new_state)>0: - print (' New state:') - print (json.dumps(new_env, indent=6, sort_keys=True)) + logging.info (' New state:') + logging.info (json.dumps(new_env, indent=6, sort_keys=True)) return {'return':0, 'list': lst} diff --git a/automation/cache/module_misc.py b/automation/cache/module_misc.py index cc4a6ac31b..2060bf8e0e 100644 --- a/automation/cache/module_misc.py +++ b/automation/cache/module_misc.py @@ -1,4 +1,5 @@ import os +import logging from cmind import utils @@ -54,9 +55,9 @@ def copy_to_remote(i): if len(r['list']) == 0: pass #fixme elif len(r['list']) > 1: - print("Multiple cache entries found: ") + logging.warning("Multiple cache entries found: ") for k in sorted(r['list'], key = lambda x: x.meta.get('alias','')): - print(k.path) + logging.info(k.path) x = input("Would you like to copy them all? Y/n: ") if x.lower() == 'n': return {'return': 0} @@ -68,7 +69,7 @@ def copy_to_remote(i): cacheid = os.path.basename(path) copy_cmd = f"rsync -avz --exclude cm-cached-state.json -e 'ssh -p {remote_port}' {path} {remote_user}@{remote_host}:{remote_cm_cache_location}" - print(copy_cmd) + logging.info(copy_cmd) os.system(copy_cmd) cm_cached_state_json_file = os.path.join(path, "cm-cached-state.json") @@ -92,7 +93,7 @@ def copy_to_remote(i): remote_cached_state_file_location = os.path.join(remote_cm_cache_location, cacheid, "cm-cached-state.json") copy_cmd = f"rsync -avz -e 'ssh -p {remote_port}' tmp_remote_cached_state.json {remote_user}@{remote_host}:{remote_cached_state_file_location}" - print(copy_cmd) + logging.info(copy_cmd) os.system(copy_cmd) return {'return':0} diff --git a/automation/cfg/module.py b/automation/cfg/module.py index f970c7bb26..ec21fc2832 100644 --- a/automation/cfg/module.py +++ b/automation/cfg/module.py @@ -3,7 +3,7 @@ # Written by Grigori Fursin import os - +import logging from cmind.automation import Automation from cmind import utils @@ -51,7 +51,7 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + logging.info (json.dumps(i, indent=2)) return {'return':0} @@ -124,9 +124,9 @@ def check_to_delete(d): if r['return']>0: return r # Print config - print ('Config:') - print ('') - print (json.dumps(config, indent=2)) + logging.info ('Config:') + logging.info ('') + logging.info (json.dumps(config, indent=2)) return {'return':0} diff --git a/automation/challenge/module.py b/automation/challenge/module.py index be8d6e7b1d..8abb368fb8 100644 --- a/automation/challenge/module.py +++ b/automation/challenge/module.py @@ -1,5 +1,5 @@ import os - +import logging from cmind.automation import Automation from cmind import utils @@ -47,6 +47,6 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + logging.info (json.dumps(i, indent=2)) return {'return':0} diff --git a/automation/contributor/module.py b/automation/contributor/module.py index 82807638f8..ccefeed4c6 100644 --- a/automation/contributor/module.py +++ b/automation/contributor/module.py @@ -1,5 +1,5 @@ import os - +import logging from cmind.automation import Automation from cmind import utils @@ -47,7 +47,7 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + logging.info (json.dumps(i, indent=2)) return {'return':0} @@ -131,7 +131,7 @@ def add(self, i): i['meta'] = meta - print ('') + logging.info ('') r = self.cmind.access(i) if r['return']>0: return r @@ -140,14 +140,14 @@ def add(self, i): path2 = os.path.dirname(path) - print ('') - print ('Please go to {}, add your directory to Git, commit and create PR:'.format(path2)) - print ('') - print ('cd {}'.format(path2)) - print ('git add "{}"'.format(name)) - print ('git commit "{}"'.format(name)) - print ('') - print ('Please join https://discord.gg/JjWNWXKxwT to discuss challenges!') - print ('Looking forward to your contributions!') + logging.info ('') + logging.info ('Please go to {}, add your directory to Git, commit and create PR:'.format(path2)) + logging.info ('') + logging.info ('cd {}'.format(path2)) + logging.info ('git add "{}"'.format(name)) + logging.info ('git commit "{}"'.format(name)) + logging.info ('') + logging.info ('Please join https://discord.gg/JjWNWXKxwT to discuss challenges!') + logging.info ('Looking forward to your contributions!') return r diff --git a/automation/data/module.py b/automation/data/module.py index be8d6e7b1d..8abb368fb8 100644 --- a/automation/data/module.py +++ b/automation/data/module.py @@ -1,5 +1,5 @@ import os - +import logging from cmind.automation import Automation from cmind import utils @@ -47,6 +47,6 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + logging.info (json.dumps(i, indent=2)) return {'return':0} diff --git a/automation/docker/module.py b/automation/docker/module.py index aaf0f7802c..e4c97f14ec 100644 --- a/automation/docker/module.py +++ b/automation/docker/module.py @@ -1,5 +1,5 @@ import os - +import logging from cmind.automation import Automation from cmind import utils @@ -46,6 +46,6 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + logging.info (json.dumps(i, indent=2)) return {'return':0} diff --git a/automation/docs/module.py b/automation/docs/module.py index be8d6e7b1d..8abb368fb8 100644 --- a/automation/docs/module.py +++ b/automation/docs/module.py @@ -1,5 +1,5 @@ import os - +import logging from cmind.automation import Automation from cmind import utils @@ -47,6 +47,6 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + logging.info (json.dumps(i, indent=2)) return {'return':0} diff --git a/automation/experiment/module.py b/automation/experiment/module.py index 57fa6f6458..20bcf3e5b8 100644 --- a/automation/experiment/module.py +++ b/automation/experiment/module.py @@ -7,7 +7,7 @@ import itertools import copy import json - +import logging from cmind.automation import Automation from cmind import utils @@ -58,7 +58,7 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + logging.info (json.dumps(i, indent=2)) return {'return':0} @@ -119,8 +119,8 @@ def run(self, i): experiment_path = experiment.path if console: - print ('') - print ('Path to CM experiment artifact: {}'.format(experiment_path)) + logging.info ('') + logging.info ('Path to CM experiment artifact: {}'.format(experiment_path)) # Get directory with datetime @@ -136,21 +136,21 @@ def run(self, i): if len(datetimes)==1: datetime = datetimes[0] elif len(datetimes)>1: - print ('') - print ('Select experiment:') + logging.info ('') + logging.info ('Select experiment:') datetimes = sorted(datetimes) num = 0 - print ('') + logging.info ('') for d in datetimes: - print ('{}) {}'.format(num, d.replace('.',' '))) + logging.info ('{}) {}'.format(num, d.replace('.',' '))) num += 1 if not console: return {'return':1, 'error':'more than 1 experiment found.\nPlease use "cm rerun experiment --dir={date and time}"'} - print ('') + logging.info ('') x=input('Make your selection or press Enter for 0: ') x=x.strip() @@ -192,7 +192,7 @@ def run(self, i): os.makedirs(experiment_path2) # Change current path - print ('Path to experiment: {}'.format(experiment_path2)) + logging.warning ('Path to experiment: {}'.format(experiment_path2)) os.chdir(experiment_path2) @@ -284,10 +284,10 @@ def run(self, i): step += 1 - print ('================================================================') - print ('Experiment step: {} out of {}'.format(step, num_steps)) + logging.infot ('================================================================') + logging.info ('Experiment step: {} out of {}'.format(step, num_steps)) - print ('') + logging.info ('') ii = copy.deepcopy(ii_copy) @@ -295,17 +295,17 @@ def run(self, i): l_dimensions=len(dimensions) if l_dimensions>0: - print (' Updating ENV variables during exploration:') + logging.info (' Updating ENV variables during exploration:') - print ('') + logging.info ('') for j in range(l_dimensions): v = dimensions[j] k = explore_keys[j] - print (' - Dimension {}: "{}" = {}'.format(j, k, v)) + logging.info (' - Dimension {}: "{}" = {}'.format(j, k, v)) env[k] = str(v) - print ('') + logging.info ('') # Generate UID and prepare extra directory: r = utils.gen_uid() @@ -324,8 +324,8 @@ def run(self, i): current_datetime = r['iso_datetime'] # Change current path - print ('Path to experiment step: {}'.format(experiment_path3)) - print ('') + logging.info ('Path to experiment step: {}'.format(experiment_path3)) + logging.info ('') os.chdir(experiment_path3) # Prepare and run experiment in a given placeholder directory @@ -367,10 +367,10 @@ def run(self, i): ii['command'] = cmd_step - print ('Generated CMD:') - print ('') - print (cmd_step) - print ('') + logging.info ('Generated CMD:') + logging.info ('') + logging.info (cmd_step) + logging.info ('') # Prepare experiment step input experiment_step_input_file = os.path.join(experiment_path3, self.CM_INPUT_FILE) @@ -494,8 +494,8 @@ def replay(self, i): experiment_path = experiment.path if console: - print ('') - print ('Path to CM experiment artifact: {}'.format(experiment_path)) + logging.info ('') + logging.info ('Path to CM experiment artifact: {}'.format(experiment_path)) # Check date and time folder uid = i.get('uid', '') @@ -532,21 +532,21 @@ def replay(self, i): if len(datetimes)==1: datetime = datetimes[0] else: - print ('') - print ('Available experiments:') + logging.info ('') + logging.info ('Available experiments:') datetimes = sorted(datetimes) num = 0 - print ('') + logging.info ('') for d in datetimes: - print ('{}) {}'.format(num, d.replace('.',' '))) + logging.info ('{}) {}'.format(num, d.replace('.',' '))) num += 1 if not console: return {'return':1, 'error':'more than 1 experiment found.\nPlease use "cm run experiment --dir={date and time}"'} - print ('') + logging.info ('') x=input('Make your selection or press Enter for 0: ') x=x.strip() @@ -577,21 +577,21 @@ def replay(self, i): selection = 0 else: - print ('') - print ('Available Unique IDs of results:') + logging.info ('') + logging.info ('Available Unique IDs of results:') results = sorted(results, key=lambda x: x.get('uid','')) num = 0 - print ('') + logging.info ('') for r in results: - print ('{}) {}'.format(num, r.get('uid',''))) + logging.info ('{}) {}'.format(num, r.get('uid',''))) num += 1 if not console: return {'return':1, 'error':'more than 1 result found.\nPlease use "cm run experiment --uid={result UID}"'} - print ('') + logging.info ('') x=input('Make your selection or press Enter for 0: ') x=x.strip() @@ -607,11 +607,11 @@ def replay(self, i): # Final info if console: - print ('') - print ('Path to experiment: {}'.format(experiment_path2)) + logging.info ('') + logging.info ('Path to experiment: {}'.format(experiment_path2)) - print ('') - print ('Result UID: {}'.format(uid)) + logging.info ('') + logging.info ('Result UID: {}'.format(uid)) # Attempt to load cm-input.json experiment_input_file = os.path.join(experiment_path2, self.CM_INPUT_FILE) @@ -631,11 +631,11 @@ def replay(self, i): cm_input['tags'] = tags if console: - print ('') - print ('Experiment input:') - print ('') - print (json.dumps(cm_input, indent=2)) - print ('') + logging.info ('') + logging.info ('Experiment input:') + logging.info ('') + logging.info (json.dumps(cm_input, indent=2)) + logging.info ('') # Run experiment again r = self.cmind.access(cm_input) @@ -689,21 +689,21 @@ def _find_or_add_artifact(self, i): lst = r['list'] if len(lst)>1: - print ('More than 1 experiment artifact found:') + logging.info ('More than 1 experiment artifact found:') lst = sorted(lst, key=lambda x: x.path) num = 0 - print ('') + logging.info ('') for e in lst: - print ('{}) {}'.format(num, e.path)) - print (' Tags: {}'.format(','.join(e.meta.get('tags',[])))) + logging.info ('{}) {}'.format(num, e.path)) + logging.info (' Tags: {}'.format(','.join(e.meta.get('tags',[])))) num += 1 if not console: return {'return':1, 'error':'more than 1 experiment artifact found.\nPlease use "cm run experiment {name}" or "cm run experiment --tags={tags separated by comma}"'} - print ('') + logging.info ('') x=input('Make your selection or press Enter for 0: ') x=x.strip() diff --git a/automation/project/module.py b/automation/project/module.py index be8d6e7b1d..8abb368fb8 100644 --- a/automation/project/module.py +++ b/automation/project/module.py @@ -1,5 +1,5 @@ import os - +import logging from cmind.automation import Automation from cmind import utils @@ -47,6 +47,6 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + logging.info (json.dumps(i, indent=2)) return {'return':0} diff --git a/automation/report/module.py b/automation/report/module.py index be8d6e7b1d..8abb368fb8 100644 --- a/automation/report/module.py +++ b/automation/report/module.py @@ -1,5 +1,5 @@ import os - +import logging from cmind.automation import Automation from cmind import utils @@ -47,6 +47,6 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + logging.info (json.dumps(i, indent=2)) return {'return':0} diff --git a/automation/script/module.py b/automation/script/module.py index b98a3906a2..0860ee70d7 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -10,7 +10,7 @@ # import os - +import logging from cmind.automation import Automation from cmind import utils from cmind import __version__ as current_cm_version @@ -562,8 +562,8 @@ def _run(self, i): # print ('') if not run_state.get('tmp_silent', False): - print ('') - print (recursion_spaces + '* ' + cm_script_info) + logging.info ('') + logging.info (recursion_spaces + '* ' + cm_script_info) ############################################################################# @@ -602,7 +602,7 @@ def _run(self, i): list_of_found_scripts = sorted(list_of_found_scripts, key = lambda a: (a.meta.get('sort',0), a.path)) if verbose: - print (recursion_spaces + ' - Number of scripts found: {}'.format(len(list_of_found_scripts))) + logging.info (recursion_spaces + ' - Number of scripts found: {}'.format(len(list_of_found_scripts))) # Check if script selection is remembered if not skip_remembered_selections and len(list_of_found_scripts) > 1: @@ -611,7 +611,7 @@ def _run(self, i): # Leave 1 entry in the found list list_of_found_scripts = [selection['cached_script']] if verbose: - print (recursion_spaces + ' - Found remembered selection with tags: {}'.format(script_tags_string)) + logging.info (recursion_spaces + ' - Found remembered selection with tags: {}'.format(script_tags_string)) break @@ -664,7 +664,7 @@ def _run(self, i): cache_tags_without_tmp_string = cache_tags_without_tmp_string.replace(",_-", ",-_") if verbose: - print (recursion_spaces + ' - Searching for cached script outputs with the following tags: {}'.format(cache_tags_without_tmp_string)) + logging.info (recursion_spaces + ' - Searching for cached script outputs with the following tags: {}'.format(cache_tags_without_tmp_string)) search_cache = {'action':'find', 'automation':self.meta['deps']['cache'], @@ -675,7 +675,7 @@ def _run(self, i): cache_list = rc['list'] if verbose: - print (recursion_spaces + ' - Number of cached script outputs found: {}'.format(len(cache_list))) + logging.info (recursion_spaces + ' - Number of cached script outputs found: {}'.format(len(cache_list))) # STEP 400 output: cache_list @@ -802,7 +802,7 @@ def _run(self, i): debug_script_tags=','.join(found_script_tags) if verbose: - print (recursion_spaces+' - Found script::{} in {}'.format(found_script_artifact, path)) + logging.info (recursion_spaces+' - Found script::{} in {}'.format(found_script_artifact, path)) # STEP 500 output: script_artifact - unique selected script artifact @@ -940,7 +940,7 @@ def _run(self, i): if len(notes)>0: if verbose: - print (recursion_spaces+' - Requested version: ' + ' '.join(notes)) + logging.info (recursion_spaces+' - Requested version: ' + ' '.join(notes)) # STEP 900 output: version* set # env['CM_VERSION*] set @@ -973,10 +973,10 @@ def _run(self, i): if str(env.get('CM_RUN_STATE_DOCKER', False)).lower() in ['true', '1', 'yes']: if state.get('docker'): if str(state['docker'].get('run', True)).lower() in ['false', '0', 'no']: - print (recursion_spaces+' - Skipping script::{} run as we are inside docker'.format(found_script_artifact)) + logging.info (recursion_spaces+' - Skipping script::{} run as we are inside docker'.format(found_script_artifact)) return {'return': 0} elif str(state['docker'].get('real_run', True)).lower() in ['false', '0', 'no']: - print (recursion_spaces+' - Doing fake run for script::{} as we are inside docker'.format(found_script_artifact)) + logging.info (recursion_spaces+' - Doing fake run for script::{} as we are inside docker'.format(found_script_artifact)) fake_run = True env['CM_TMP_FAKE_RUN']='yes' @@ -1098,7 +1098,7 @@ def _run(self, i): elif num_found_cached_scripts == 1: if verbose: - print (recursion_spaces+' - Found cached script output: {}'.format(found_cached_scripts[0].path)) + logging.info (recursion_spaces+' - Found cached script output: {}'.format(found_cached_scripts[0].path)) if num_found_cached_scripts > 0: @@ -1107,7 +1107,7 @@ def _run(self, i): # Check chain of dynamic dependencies on other CM scripts if len(deps)>0: if verbose: - print (recursion_spaces + ' - Checking dynamic dependencies on other CM scripts:') + logging.info (recursion_spaces + ' - Checking dynamic dependencies on other CM scripts:') r = self._call_run_deps(deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, recursion_spaces + extra_recursion_spaces, @@ -1115,7 +1115,7 @@ def _run(self, i): if r['return']>0: return r if verbose: - print (recursion_spaces + ' - Processing env after dependencies ...') + logging.info (recursion_spaces + ' - Processing env after dependencies ...') r = update_env_with_values(env) if r['return']>0: return r @@ -1123,7 +1123,7 @@ def _run(self, i): # Check chain of prehook dependencies on other CM scripts. (No execution of customize.py for cached scripts) if verbose: - print (recursion_spaces + ' - Checking prehook dependencies on other CM scripts:') + logging.info (recursion_spaces + ' - Checking prehook dependencies on other CM scripts:') r = self._call_run_deps(prehook_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, recursion_spaces + extra_recursion_spaces, @@ -1134,7 +1134,7 @@ def _run(self, i): cached_script = found_cached_scripts[selection] if verbose: - print (recursion_spaces+' - Loading state from cached entry ...') + logging.info (recursion_spaces+' - Loading state from cached entry ...') path_to_cached_state_file = os.path.join(cached_script.path, self.file_with_cached_state) @@ -1144,7 +1144,7 @@ def _run(self, i): version = r['meta'].get('version') if not run_state.get('tmp_silent', False): - print (recursion_spaces + ' ! load {}'.format(path_to_cached_state_file)) + logging.info (recursion_spaces + ' ! load {}'.format(path_to_cached_state_file)) ################################################################################################ @@ -1173,7 +1173,7 @@ def _run(self, i): # Check chain of posthook dependencies on other CM scripts. We consider them same as postdeps when # script is in cache if verbose: - print (recursion_spaces + ' - Checking posthook dependencies on other CM scripts:') + logging.info (recursion_spaces + ' - Checking posthook dependencies on other CM scripts:') clean_env_keys_post_deps = meta.get('clean_env_keys_post_deps',[]) @@ -1183,7 +1183,7 @@ def _run(self, i): if r['return']>0: return r if verbose: - print (recursion_spaces + ' - Checking post dependencies on other CM scripts:') + logging.info (recursion_spaces + ' - Checking post dependencies on other CM scripts:') # Check chain of post dependencies on other CM scripts r = self._call_run_deps(post_deps, self.local_env_keys, clean_env_keys_post_deps, env, state, const, const_state, add_deps_recursive, @@ -1227,8 +1227,8 @@ def _run(self, i): # Use update to update the tmp one if already exists if verbose: - print (recursion_spaces+' - Creating new "cache" script artifact in the CM local repository ...') - print (recursion_spaces+' - Tags: {}'.format(','.join(tmp_tags))) + logging.info (recursion_spaces+' - Creating new "cache" script artifact in the CM local repository ...') + logging.info (recursion_spaces+' - Tags: {}'.format(','.join(tmp_tags))) if version != '': cached_meta['version'] = version @@ -1255,7 +1255,7 @@ def _run(self, i): # Changing path to CM script artifact for cached output # to record data and files there if verbose: - print (recursion_spaces+' - Changing to {}'.format(cached_path)) + logging.info (recursion_spaces+' - Changing to {}'.format(cached_path)) os.chdir(cached_path) @@ -1271,7 +1271,7 @@ def _run(self, i): # Changing path to CM script artifact for cached output # to record data and files there if verbose: - print (recursion_spaces+' - Changing to {}'.format(cached_path)) + logging.info (recursion_spaces+' - Changing to {}'.format(cached_path)) os.chdir(cached_path) @@ -1296,12 +1296,12 @@ def _run(self, i): ################################ if not found_cached: if len(warnings)>0: - print ('=================================================') - print ('WARNINGS:') - print ('') + logging.info ('=================================================') + logging.info ('WARNINGS:') + logging.info ('') for w in warnings: - print (' '+w) - print ('=================================================') + logging.info (' '+w) + logging.info ('=================================================') # Update default version meta if version is not set if version == '': @@ -1333,7 +1333,7 @@ def _run(self, i): version = version_max if verbose: - print (recursion_spaces+' - Version is not specified - use either default_version from meta or min/max/usable: {}'.format(version)) + logging.info (recursion_spaces+' - Version is not specified - use either default_version from meta or min/max/usable: {}'.format(version)) env['CM_VERSION'] = version @@ -1359,7 +1359,7 @@ def _run(self, i): if len(docker_deps)>0: if verbose: - print (recursion_spaces + ' - Checking docker run dependencies on other CM scripts:') + logging.info (recursion_spaces + ' - Checking docker run dependencies on other CM scripts:') r = self._call_run_deps(docker_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, recursion_spaces + extra_recursion_spaces, @@ -1367,7 +1367,7 @@ def _run(self, i): if r['return']>0: return r if verbose: - print (recursion_spaces + ' - Processing env after docker run dependencies ...') + logging.info (recursion_spaces + ' - Processing env after docker run dependencies ...') r = update_env_with_values(env) if r['return']>0: return r @@ -1375,7 +1375,7 @@ def _run(self, i): # Check chain of dependencies on other CM scripts if len(deps)>0: if verbose: - print (recursion_spaces + ' - Checking dependencies on other CM scripts:') + logging.info (recursion_spaces + ' - Checking dependencies on other CM scripts:') r = self._call_run_deps(deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, recursion_spaces + extra_recursion_spaces, @@ -1383,7 +1383,7 @@ def _run(self, i): if r['return']>0: return r if verbose: - print (recursion_spaces + ' - Processing env after dependencies ...') + logging.info (recursion_spaces + ' - Processing env after dependencies ...') r = update_env_with_values(env) if r['return']>0: return r @@ -1467,13 +1467,13 @@ def _run(self, i): env['CM_TMP_PIP_VERSION_STRING'] = pip_version_string if pip_version_string != '': if verbose: - print (recursion_spaces+' # potential PIP version string (if needed): '+pip_version_string) + logging.info (recursion_spaces+' # potential PIP version string (if needed): '+pip_version_string) # Check if pre-process and detect if 'preprocess' in dir(customize_code) and not fake_run: if verbose: - print (recursion_spaces+' - Running preprocess ...') + logging.info (recursion_spaces+' - Running preprocess ...') # Update env and state with const utils.merge_dicts({'dict1':env, 'dict2':const, 'append_lists':True, 'append_unique':True}) @@ -1495,7 +1495,7 @@ def _run(self, i): if skip: if verbose: - print (recursion_spaces+' - this script is skipped!') + logging.info (recursion_spaces+' - this script is skipped!') # Check if script asks to run other dependencies instead of the skipped one another_script = r.get('script', {}) @@ -1504,7 +1504,7 @@ def _run(self, i): return {'return':0, 'skipped': True} if verbose: - print (recursion_spaces+' - another script is executed instead!') + logging.info (recursion_spaces+' - another script is executed instead!') ii = { 'action':'run', @@ -1543,12 +1543,12 @@ def _run(self, i): if print_env: import json if verbose: - print (json.dumps(env, indent=2, sort_keys=True)) + logging.info (json.dumps(env, indent=2, sort_keys=True)) # Check chain of pre hook dependencies on other CM scripts if len(prehook_deps)>0: if verbose: - print (recursion_spaces + ' - Checking prehook dependencies on other CM scripts:') + logging.info (recursion_spaces + ' - Checking prehook dependencies on other CM scripts:') r = self._call_run_deps(prehook_deps, self.local_env_keys, local_env_keys_from_meta, env, state, const, const_state, add_deps_recursive, recursion_spaces + extra_recursion_spaces, @@ -1675,7 +1675,7 @@ def _run(self, i): # Remove tmp tag from the "cached" arifact to finalize caching if verbose: - print (recursion_spaces+' - Removing tmp tag in the script cached output {} ...'.format(cached_uid)) + logging.info (recursion_spaces+' - Removing tmp tag in the script cached output {} ...'.format(cached_uid)) # Check if version was detected and record in meta) if detected_version != '': @@ -1796,7 +1796,7 @@ def _run(self, i): elapsed_time = time.time() - start_time if verbose and cached_uid!='': - print (recursion_spaces+' - cache UID: {}'.format(cached_uid)) + logging.info (recursion_spaces+' - cache UID: {}'.format(cached_uid)) if print_deps: print_deps_data = self._print_deps(run_state['deps']) @@ -1837,8 +1837,8 @@ def _run(self, i): if i.get('json', False) or i.get('j', False): import json - print ('') - print (json.dumps(rr, indent=2)) + logging.info ('') + logging.info (json.dumps(rr, indent=2)) @@ -1851,7 +1851,7 @@ def _run(self, i): dump_repro(repro_prefix, rr, run_state) if verbose or show_time: - print (recursion_spaces+' - running time of script "{}": {:.2f} sec.'.format(','.join(found_script_tags), elapsed_time)) + logging.info (recursion_spaces+' - running time of script "{}": {:.2f} sec.'.format(','.join(found_script_tags), elapsed_time)) if not recursion and show_space: @@ -1860,14 +1860,14 @@ def _run(self, i): used_disk_space_in_mb = int((start_disk_stats.free - stop_disk_stats.free) / (1024*1024)) if used_disk_space_in_mb > 0: - print (recursion_spaces+' - used disk space: {} MB'.format(used_disk_space_in_mb)) + logging.info (recursion_spaces+' - used disk space: {} MB'.format(used_disk_space_in_mb)) # Check if need to print some final info such as path to model, etc if not run_state.get('tmp_silent', False): print_env_at_the_end = meta.get('print_env_at_the_end',{}) if len(print_env_at_the_end)>0: - print ('') + logging.info ('') for p in sorted(print_env_at_the_end): t = print_env_at_the_end[p] @@ -1875,9 +1875,9 @@ def _run(self, i): v = new_env.get(p, None) - print ('{}: {}'.format(t, str(v))) + logging.info ('{}: {}'.format(t, str(v))) - print ('') + logging.info ('') # Check if print nice versions if print_versions: @@ -1885,7 +1885,7 @@ def _run(self, i): # Check if pause (useful if running a given script in a new terminal that may close automatically) if i.get('pause', False): - print ('') + logging.info ('') input ('Press Enter to continue ...') return rr @@ -1931,11 +1931,11 @@ def _fix_cache_paths(self, env): def _dump_version_info_for_script(self, output_dir = os.getcwd(), quiet = False, silent = False): if not quiet and not silent: - print ('') + logging.info ('') for f in ['cm-run-script-versions.json', 'version_info.json']: if not quiet and not silent: - print ('Dumping versions to {}'.format(f)) + logging.info ('Dumping versions to {}'.format(f)) r = utils.save_json(f, self.run_state.get('version_info', [])) if r['return']>0: return r @@ -2019,7 +2019,7 @@ def _update_state_from_variations(self, i, meta, variation_tags, variations, env variation_tags_string += x if verbose: - print (recursion_spaces+' Prepared variations: {}'.format(variation_tags_string)) + logging.info (recursion_spaces+' Prepared variations: {}'.format(variation_tags_string)) # Update env and other keys if variations if len(variation_tags)>0: @@ -2261,7 +2261,7 @@ def version(self, i): version = self.__version__ if console: - print (version) + logging.info (version) return {'return':0, 'version':version} @@ -2376,7 +2376,7 @@ def search(self, i): # Print filtered paths if console if console: for script in r['list']: - print (script.path) + logging.info (script.path) # Finalize output r['script_tags'] = script_tags @@ -2443,9 +2443,9 @@ def test(self, i): uid = meta.get('uid','') if console: - print ('') - print (path) - print (' Test: TBD') + logging.infot ('') + logging.info (path) + logging.info (' Test: TBD') return {'return':0, 'list': lst} @@ -2687,7 +2687,7 @@ def add(self, i): new_script_path = r_obj['path'] if console: - print ('Created script in {}'.format(new_script_path)) + logging.info ('Created script in {}'.format(new_script_path)) # Copy files from template (only if exist) files = [ @@ -2733,7 +2733,7 @@ def add(self, i): f2 = os.path.join(new_script_path, f2) if console: - print (' * Copying {} to {}'.format(f1, f2)) + logging.info (' * Copying {} to {}'.format(f1, f2)) shutil.copyfile(f1,f2) @@ -3249,9 +3249,9 @@ def _print_versions(self, run_state): version_info = run_state.get('version_info', []) - print ('=========================') - print ('Versions of dependencies:') - print ('') + logging.info ('=========================') + logging.info ('Versions of dependencies:') + logging.info ('') for v in version_info: k = list(v.keys())[0] @@ -3260,9 +3260,9 @@ def _print_versions(self, run_state): version = version_info_dict.get('version','') if version !='' : - print ('* {}: {}'.format(k, version)) + logging.info ('* {}: {}'.format(k, version)) - print ('=========================') + logging.info ('=========================') return {} @@ -3284,10 +3284,10 @@ def _print_deps(self, deps): print_deps_data = [] run_cmds = self._get_deps_run_cmds(deps) - print ('') + logging.info ('') for cmd in run_cmds: print_deps_data.append(cmd) - print(cmd) + logging.info(cmd) return print_deps_data @@ -3496,15 +3496,15 @@ def find_file_in_paths(self, i): if version_max != '': x += ' <= {}'.format(version_max) if x!='': - print (recursion_spaces + ' - Searching for versions: {}'.format(x)) + logging.info (recursion_spaces + ' - Searching for versions: {}'.format(x)) new_recursion_spaces = recursion_spaces + ' ' for path_to_file in found_files: - print ('') - print (recursion_spaces + ' * ' + path_to_file) + logging.info ('') + logging.info (recursion_spaces + ' * ' + path_to_file) run_script_input['env'] = env run_script_input['env'][env_path_key] = path_to_file @@ -3523,7 +3523,7 @@ def find_file_in_paths(self, i): if detected_version != '': if detected_version == -1: - print (recursion_spaces + ' SKIPPED due to incompatibility ...') + logging.info (recursion_spaces + ' SKIPPED due to incompatibility ...') else: ry = check_version_constraints({'detected_version': detected_version, 'version': version, @@ -3535,7 +3535,7 @@ def find_file_in_paths(self, i): if not ry['skip']: found_files_with_good_version.append(path_to_file) else: - print (recursion_spaces + ' SKIPPED due to version constraints ...') + logging.info (recursion_spaces + ' SKIPPED due to version constraints ...') found_files = found_files_with_good_version @@ -3545,16 +3545,16 @@ def find_file_in_paths(self, i): selection = 0 else: # Select 1 and proceed - print (recursion_spaces+' - More than 1 path found:') + logging.info (recursion_spaces+' - More than 1 path found:') - print ('') + logging.info ('') num = 0 for file in found_files: - print (recursion_spaces+' {}) {}'.format(num, file)) + logging.info (recursion_spaces+' {}) {}'.format(num, file)) num += 1 - print ('') + logging.info ('') x=input(recursion_spaces+' Make your selection or press Enter for 0: ') x=x.strip() @@ -3565,8 +3565,8 @@ def find_file_in_paths(self, i): if selection < 0 or selection >= num: selection = 0 - print ('') - print (recursion_spaces+' Selected {}: {}'.format(selection, found_files[selection])) + logging.info ('') + logging.info (recursion_spaces+' Selected {}: {}'.format(selection, found_files[selection])) found_files = [found_files[selection]] @@ -3616,7 +3616,7 @@ def detect_version_using_script(self, i): if version_max != '': x += ' <= {}'.format(version_max) if x!='': - print (recursion_spaces + ' - Searching for versions: {}'.format(x)) + logging.info (recursion_spaces + ' - Searching for versions: {}'.format(x)) new_recursion_spaces = recursion_spaces + ' ' @@ -3732,7 +3732,7 @@ def find_artifact(self, i): if path == '': path_list_tmp = default_path_list else: - print (recursion_spaces + ' # Requested paths: {}'.format(path)) + logging.info (recursion_spaces + ' # Requested paths: {}'.format(path)) path_list_tmp = path.split(os_info['env_separator']) # Check soft links @@ -3792,8 +3792,8 @@ def find_artifact(self, i): if extra_paths[extra_path] not in env: env[extra_paths[extra_path]] = [] env[extra_paths[extra_path]].append(epath) - print () - print (recursion_spaces + ' # Found artifact in {}'.format(file_path)) + logging.info () + logging.info (recursion_spaces + ' # Found artifact in {}'.format(file_path)) if env_path_key != '': env[env_path_key] = file_path @@ -4208,7 +4208,7 @@ def find_cached_script(i): found_cached_scripts = [] if verbose: - print (recursion_spaces + ' - Checking if script execution is already cached ...') + logging.info (recursion_spaces + ' - Checking if script execution is already cached ...') # Create a search query to find that we already ran this script with the same or similar input # It will be gradually enhanced with more "knowledge" ... @@ -4240,7 +4240,7 @@ def find_cached_script(i): explicit_cached_tags.append(x) if verbose: - print (recursion_spaces+' - Prepared explicit variations: {}'.format(explicit_variation_tags_string)) + logging.info (recursion_spaces+' - Prepared explicit variations: {}'.format(explicit_variation_tags_string)) if len(variation_tags)>0: variation_tags_string = '' @@ -4258,7 +4258,7 @@ def find_cached_script(i): cached_tags.append(x) if verbose: - print (recursion_spaces+' - Prepared variations: {}'.format(variation_tags_string)) + logging.info (recursion_spaces+' - Prepared variations: {}'.format(variation_tags_string)) # Add version if version !='': @@ -4294,7 +4294,7 @@ def find_cached_script(i): search_tags += ',' + ','.join(explicit_cached_tags) if verbose: - print (recursion_spaces+' - Searching for cached script outputs with the following tags: {}'.format(search_tags)) + logging.info (recursion_spaces+' - Searching for cached script outputs with the following tags: {}'.format(search_tags)) r = self_obj.cmind.access({'action':'find', 'automation':self_obj.meta['deps']['cache'], @@ -4317,7 +4317,7 @@ def find_cached_script(i): else: found_cached_scripts = [selection['cached_script']] if verbose: - print (recursion_spaces + ' - Found remembered selection with tags "{}"!'.format(search_tags)) + logging.info (recursion_spaces + ' - Found remembered selection with tags "{}"!'.format(search_tags)) break @@ -4356,7 +4356,7 @@ def enable_or_skip_script(meta, env): (AND function) """ if type(meta) != dict: - print( "The meta entry is not a dictionary for skip/enable if_env {}".format(meta)) + logging.error( "The meta entry is not a dictionary for skip/enable if_env {}".format(meta)) for key in meta: meta_key = [str(v).lower() for v in meta[key]] @@ -4594,13 +4594,13 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): run_script_without_cm = tmp_file_run + '-without-cm' + bat_ext if verbose: - print ('') - print (recursion_spaces + ' - Running native script "{}" from temporal script "{}" in "{}" ...'.format(path_to_run_script, run_script, cur_dir)) - print ('') + logging.info ('') + logging.info (recursion_spaces + ' - Running native script "{}" from temporal script "{}" in "{}" ...'.format(path_to_run_script, run_script, cur_dir)) + logging.info ('') if not run_state.get('tmp_silent', False): - print (recursion_spaces + ' ! cd {}'.format(cur_dir)) - print (recursion_spaces + ' ! call {} from {}'.format(path_to_run_script, run_script)) + logging.info (recursion_spaces + ' ! cd {}'.format(cur_dir)) + logging.info (recursion_spaces + ' ! call {} from {}'.format(path_to_run_script, run_script)) # Prepare env variables @@ -4641,9 +4641,9 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): import shutil shutil.copy(run_script, run_script_without_cm) - print ('================================================================================') - print ('Debug script to run without CM was recorded: {}'.format(run_script_without_cm)) - print ('================================================================================') + logging.info ('================================================================================') + logging.info ('Debug script to run without CM was recorded: {}'.format(run_script_without_cm)) + logging.info ('================================================================================') # Run final command cmd = os_info['run_local_bat_from_python'].replace('${bat_file}', run_script) @@ -4658,11 +4658,11 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): if os.path.isfile(pr): r = utils.load_txt(file_name = pr) if r['return'] == 0: - print ("========================================================") - print ("Print file {}:".format(pr)) - print ("") - print (r['string']) - print ("") + logging.info ("========================================================") + logging.info ("Print file {}:".format(pr)) + logging.info ("") + logging.info (r['string']) + logging.info ("") # Check where to report errors and failures @@ -4722,7 +4722,7 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): if postprocess != '' and customize_code is not None and postprocess in dir(customize_code): if not run_state.get('tmp_silent', False): - print (recursion_spaces+' ! call "{}" from {}'.format(postprocess, customize_code.__file__)) + logging.info (recursion_spaces+' ! call "{}" from {}'.format(postprocess, customize_code.__file__)) if len(posthook_deps)>0 and (postprocess == "postprocess"): r = script_automation._call_run_deps(posthook_deps, local_env_keys, local_env_keys_from_meta, env, state, const, const_state, @@ -4745,7 +4745,7 @@ def run_detect_version(customize_code, customize_common_input, recursion_spaces, import copy if verbose: - print (recursion_spaces+' - Running detect_version ...') + logging.info (recursion_spaces+' - Running detect_version ...') # Update env and state with const utils.merge_dicts({'dict1':env, 'dict2':const, 'append_lists':True, 'append_unique':True}) @@ -4768,7 +4768,7 @@ def run_postprocess(customize_code, customize_common_input, recursion_spaces, en import copy if verbose: - print (recursion_spaces+' - Running postprocess ...') + logging.info (recursion_spaces+' - Running postprocess ...') # Update env and state with const utils.merge_dicts({'dict1':env, 'dict2':const, 'append_lists':True, 'append_unique':True}) @@ -5184,16 +5184,16 @@ def select_script_artifact(lst, text, recursion_spaces, can_skip, script_tags_st # If quiet, select 0 (can be sorted for determinism) if quiet: if verbose: - print (string1) - print ('') - print ('Selected default due to "quiet" mode') + logging.info (string1) + logging.info ('') + logging.info ('Selected default due to "quiet" mode') return 0 # Select 1 and proceed - print (string1) + logging.info (string1) - print ('') + logging.info ('') num = 0 for a in lst: @@ -5210,10 +5210,10 @@ def select_script_artifact(lst, text, recursion_spaces, can_skip, script_tags_st if version!='': x+=' (Version {})'.format(version) - print (x) + logging.info (x) num+=1 - print ('') + logging.info ('') s = 'Make your selection or press Enter for 0' if can_skip: @@ -5230,14 +5230,14 @@ def select_script_artifact(lst, text, recursion_spaces, can_skip, script_tags_st if selection <0: - print ('') - print (recursion_spaces+' Skipped') + logging.info ('') + logging.info(recursion_spaces+' Skipped') else: if selection >= num: selection = 0 - print ('') - print (recursion_spaces+' Selected {}: {}'.format(selection, lst[selection].path)) + logging.info ('') + logging.info (recursion_spaces+' Selected {}: {}'.format(selection, lst[selection].path)) return selection @@ -5452,4 +5452,4 @@ def dump_repro(repro_prefix, rr, run_state): r=auto.test({'x':'y'}) - print (r) + logging.info (r) diff --git a/automation/script/module_help.py b/automation/script/module_help.py index e27d756877..dbd8c73ac5 100644 --- a/automation/script/module_help.py +++ b/automation/script/module_help.py @@ -1,4 +1,5 @@ import os +import logging from cmind import utils # Pring help about script @@ -10,29 +11,29 @@ def print_help(i): if len(meta)==0 and path=='': return {'return':0} - print ('') - print ('Help for this CM script ({},{}):'.format(meta.get('alias',''), meta.get('uid',''))) + logging.info ('') + logging.info ('Help for this CM script ({},{}):'.format(meta.get('alias',''), meta.get('uid',''))) - print ('') - print ('Path to this automation recipe: {}'.format(path)) + logging.info ('') + logging.info ('Path to this automation recipe: {}'.format(path)) variations = meta.get('variations',{}) if len(variations)>0: - print ('') - print ('Available variations:') - print ('') + logging.info ('') + logging.info ('Available variations:') + logging.info ('') for v in sorted(variations): - print (' _'+v) + logging.info (' _'+v) input_mapping = meta.get('input_mapping', {}) if len(input_mapping)>0: - print ('') - print ('Available flags mapped to environment variables:') - print ('') + logging.info ('') + logging.info ('Available flags mapped to environment variables:') + logging.info ('') for k in sorted(input_mapping): v = input_mapping[k] - print (' --{} -> --env.{}'.format(k,v)) + logging.info (' --{} -> --env.{}'.format(k,v)) input_description = meta.get('input_description', {}) if len(input_description)>0: @@ -46,9 +47,9 @@ def print_help(i): sorted_keys.append(k) - print ('') - print ('Available flags (Python API dict keys):') - print ('') + logging.info ('') + logging.info ('Available flags (Python API dict keys):') + logging.info ('') for k in all_keys: v = input_description[k] n = v.get('desc','') @@ -56,12 +57,12 @@ def print_help(i): x = ' --'+k if n!='': x+=' ({})'.format(n) - print (x) + logging.info (x) if len(sorted_keys)>0: - print ('') - print ('Main flags:') - print ('') + logging.info ('') + logging.info ('Main flags:') + logging.info ('') for k in sorted_keys: v = input_description[k] n = v.get('desc','') @@ -81,11 +82,11 @@ def print_help(i): if n!='': x+=' ({})'.format(n) - print (x) + logging.info (x) - print ('') + logging.info ('') x = input ('Would you like to see a Python API with a list of common keys/flags for all scripts including this one (y/N)? ') x = x.strip().lower() diff --git a/automation/script/module_misc.py b/automation/script/module_misc.py index 6fe01bf900..2e9e8a8717 100644 --- a/automation/script/module_misc.py +++ b/automation/script/module_misc.py @@ -1,4 +1,5 @@ import os +import logging from cmind import utils # Meta deps @@ -158,7 +159,7 @@ def doc(i): meta = artifact.meta original_meta = artifact.original_meta - print ('Documenting {}'.format(path)) + logging.info ('Documenting {}'.format(path)) alias = meta.get('alias','') uid = meta.get('uid','') @@ -1428,7 +1429,7 @@ def dockerfile(i): docker_settings = state['docker'] if not docker_settings.get('run', True) and not i.get('docker_run_override', False): - print("docker.run set to False in _cm.json") + logging.error("docker.run set to False in _cm.json") continue '''run_config_path = os.path.join(script_path,'run_config.yml') if not os.path.exists(run_config_path): @@ -1575,8 +1576,8 @@ def dockerfile(i): if r['return'] > 0: return r - print ('') - print ("Dockerfile generated at " + dockerfile_path) + logging.info ('') + logging.info ("Dockerfile generated at " + dockerfile_path) return {'return':0} @@ -1783,7 +1784,7 @@ def docker(i): docker_settings = state['docker'] if not docker_settings.get('run', True) and not i.get('docker_run_override', False): - print("docker.run set to False in _cm.json") + logging.error("docker.run set to False in _cm.json") continue ''' if not docker_settings or not docker_settings.get('build') or not run_config.get('run_with_default_inputs'): @@ -1990,11 +1991,11 @@ def docker(i): final_run_cmd = run_cmd if docker_skip_run_cmd not in [ 'yes', True, 'True' ] else 'cm version' - print ('') - print ('CM command line regenerated to be used inside Docker:') - print ('') - print (final_run_cmd) - print ('') + logging.info ('') + logging.info ('CM command line regenerated to be used inside Docker:') + logging.info ('') + logging.info (final_run_cmd) + logging.info ('') docker_recreate_image = 'yes' if not norecreate_docker_image else 'no' @@ -2054,7 +2055,7 @@ def docker(i): if i.get('docker_save_script', ''): cm_docker_input['save_script'] = i['docker_save_script'] - print ('') + logging.info ('') r = self_module.cmind.access(cm_docker_input) if r['return'] > 0: @@ -2073,7 +2074,7 @@ def check_gh_token(i, docker_settings, quiet): if quiet: return rx - print ('') + logging.info ('') gh_token = input ('Enter GitHub token to access private CM repositories required for this CM script: ') if gh_token == '': diff --git a/automation/script/template-ae-python/main.py b/automation/script/template-ae-python/main.py index d851f1450f..e25378c0c1 100644 --- a/automation/script/template-ae-python/main.py +++ b/automation/script/template-ae-python/main.py @@ -1,10 +1,10 @@ import os - +import logging if __name__ == "__main__": - print ('') - print ('Main script:') - print ('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT',''))) - print ('') + logging.info ('') + logging.info ('Main script:') + logging.info ('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT',''))) + logging.info ('') exit(0) diff --git a/automation/script/template-python/customize.py b/automation/script/template-python/customize.py index 10214b87df..0586473398 100644 --- a/automation/script/template-python/customize.py +++ b/automation/script/template-python/customize.py @@ -1,10 +1,10 @@ from cmind import utils import os - +import logging def preprocess(i): - print ('') - print ('Preprocessing ...') + logging.info ('') + logging.info ('Preprocessing ...') os_info = i['os_info'] @@ -16,14 +16,14 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - print (' ENV CM_VAR1: {}'.format(env.get('CM_VAR1',''))) + logging.info (' ENV CM_VAR1: {}'.format(env.get('CM_VAR1',''))) return {'return':0} def postprocess(i): - print ('') - print ('Postprocessing ...') + logging.info ('') + logging.info ('Postprocessing ...') env = i['env'] diff --git a/automation/script/template-python/main.py b/automation/script/template-python/main.py index 9ba7bb751d..6a4d292ab8 100644 --- a/automation/script/template-python/main.py +++ b/automation/script/template-python/main.py @@ -1,10 +1,10 @@ import os - +import logging if __name__ == "__main__": - print ('') - print ('Main script:') - print ('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1',''))) - print ('') + logging.info ('') + logging.info ('Main script:') + logging.info ('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1',''))) + logging.info ('') exit(0) diff --git a/automation/script/template-pytorch/customize.py b/automation/script/template-pytorch/customize.py index 10214b87df..0586473398 100644 --- a/automation/script/template-pytorch/customize.py +++ b/automation/script/template-pytorch/customize.py @@ -1,10 +1,10 @@ from cmind import utils import os - +import logging def preprocess(i): - print ('') - print ('Preprocessing ...') + logging.info ('') + logging.info ('Preprocessing ...') os_info = i['os_info'] @@ -16,14 +16,14 @@ def preprocess(i): quiet = (env.get('CM_QUIET', False) == 'yes') - print (' ENV CM_VAR1: {}'.format(env.get('CM_VAR1',''))) + logging.info (' ENV CM_VAR1: {}'.format(env.get('CM_VAR1',''))) return {'return':0} def postprocess(i): - print ('') - print ('Postprocessing ...') + logging.info ('') + logging.info ('Postprocessing ...') env = i['env'] diff --git a/automation/script/template-pytorch/main.py b/automation/script/template-pytorch/main.py index 3e49da450f..dbfd0b6739 100644 --- a/automation/script/template-pytorch/main.py +++ b/automation/script/template-pytorch/main.py @@ -1,15 +1,15 @@ import os - +import logging import torch if __name__ == "__main__": - print ('') - print ('Main script:') - print ('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1',''))) - print ('ENV USE_CUDA: {}'.format(os.environ.get('USE_CUDA',''))) - print ('') - print ('PyTorch version: {}'.format(torch.__version__)) - print ('') + logging.info ('') + logging.info ('Main script:') + logging.info ('ENV CM_VAR1: {}'.format(os.environ.get('CM_VAR1',''))) + logging.info ('ENV USE_CUDA: {}'.format(os.environ.get('USE_CUDA',''))) + logging.info ('') + logging.info ('PyTorch version: {}'.format(torch.__version__)) + logging.info ('') exit(0) diff --git a/automation/utils/module.py b/automation/utils/module.py index df4898410d..334593b5b0 100644 --- a/automation/utils/module.py +++ b/automation/utils/module.py @@ -1,5 +1,5 @@ import os - +import logging from cmind.automation import Automation from cmind import utils @@ -47,7 +47,7 @@ def test(self, i): """ import json - print (json.dumps(i, indent=2)) + logging.info (json.dumps(i, indent=2)) return {'return':0} @@ -206,8 +206,8 @@ def download_file(self, i): if os.path.isfile(path_to_file): os.remove(path_to_file) - print ('Downloading to {}'.format(path_to_file)) - print ('') + logging.info ('Downloading to {}'.format(path_to_file)) + logging.info ('') # Download size = -1 @@ -255,7 +255,7 @@ def download_file(self, i): except Exception as e: return {'return':1, 'error':format(e)} - print ('') + logging.info ('') if size == 0: file_stats=os.stat(path_to_file) size = file_stats.st_size @@ -763,7 +763,7 @@ def list_files_recursively(self, i): s+=dir_path2+f - print (s) + logging.info (s) return {'return':0} @@ -786,7 +786,7 @@ def generate_secret(self, i): import secrets s = secrets.token_urlsafe(16) - print (s) + logging.info (s) return {'return':0, 'secret': s} @@ -883,7 +883,7 @@ def uid(self, i): r = utils.gen_uid() if console: - print (r['uid']) + logging.info (r['uid']) return r @@ -1034,7 +1034,7 @@ def print_yaml(self, i): meta = r['meta'] import json - print (json.dumps(meta, indent=2)) + logging.info (json.dumps(meta, indent=2)) return {'return':0} @@ -1065,6 +1065,6 @@ def print_json(self, i): meta = r['meta'] import json - print (json.dumps(meta, indent=2)) + logging.info (json.dumps(meta, indent=2)) return {'return':0} diff --git a/automation/utils/module_cfg.py b/automation/utils/module_cfg.py index 145c388f2a..2a3355df43 100644 --- a/automation/utils/module_cfg.py +++ b/automation/utils/module_cfg.py @@ -1,7 +1,7 @@ import os import cmind import copy - +import logging base_path={} base_path_meta={} @@ -93,7 +93,7 @@ def load_cfg(i): r = cmind.utils.load_yaml_and_json(full_path_without_ext) if r['return']>0: - print ('Warning: problem loading file {}'.format(full_path)) + logging.info ('Warning: problem loading file {}'.format(full_path)) else: meta = r['meta'] @@ -263,10 +263,10 @@ def select_cfg(i): select = 0 if len(selector) > 1: xtitle = ' ' + title if title!='' else '' - print ('') - print ('Available{} configurations:'.format(xtitle)) + logging.info ('') + logging.info ('Available{} configurations:'.format(xtitle)) - print ('') + logging.info ('') for s in range(0, len(selector)): ss = selector[s] @@ -277,7 +277,7 @@ def select_cfg(i): r = cmind.utils.load_yaml_and_json(full_path_without_ext) if r['return']>0: - print ('Warning: problem loading configuration file {}'.format(path)) + logging.warning ('Warning: problem loading configuration file {}'.format(path)) meta = r['meta'] ss['meta'] = meta @@ -292,11 +292,11 @@ def select_cfg(i): if x!='': x+=' ' x += '('+alias+')' - print ('{}) {}'.format(s, x)) + logging.info ('{}) {}'.format(s, x)) s+=1 - print ('') + logging.info ('') select = input ('Enter configuration number of press Enter for 0: ') if select.strip() == '': select = '0' diff --git a/script/app-image-classification-onnx-py/customize.py b/script/app-image-classification-onnx-py/customize.py index 0b2c7c0a47..1c8a7e1888 100644 --- a/script/app-image-classification-onnx-py/customize.py +++ b/script/app-image-classification-onnx-py/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import shutil - +import logging def preprocess(i): os_info = i['os_info'] @@ -43,7 +43,7 @@ def postprocess(i): with open(fjson, 'w', encoding='utf-8') as f: json.dump(data, f, ensure_ascii=False, indent=4) except Exception as e: - print ('CM warning: {}'.format(e)) + logging.warning ('CM warning: {}'.format(e)) try: @@ -51,15 +51,15 @@ def postprocess(i): with open(fyaml, 'w', encoding='utf-8') as f: yaml.dump(data, f) except Exception as e: - print ('CM warning: {}'.format(e)) + logging.warning ('CM warning: {}'.format(e)) top_classification = data.get('top_classification','') if env.get('CM_TMP_SILENT','')!='yes': if top_classification!='': - print ('') + logging.info ('') x = 'Top classification: {}'.format(top_classification) - print ('='*len(x)) - print (x) + logging.info ('='*len(x)) + logging.info (x) return {'return':0} diff --git a/script/app-image-classification-onnx-py/src/onnx_classify.py b/script/app-image-classification-onnx-py/src/onnx_classify.py index 00baaab149..eeda5f39a4 100644 --- a/script/app-image-classification-onnx-py/src/onnx_classify.py +++ b/script/app-image-classification-onnx-py/src/onnx_classify.py @@ -7,7 +7,7 @@ import numpy as np import time import json - +import logging from PIL import Image model_path = os.environ['CK_ENV_ONNX_MODEL_ONNX_FILEPATH'] @@ -108,26 +108,26 @@ def load_a_batch(batch_filenames): else: (samples, channels, height, width) = model_input_shape -print("") -print("Data layout: {}".format(data_layout) ) -print("Input layers: {}".format([ str(x) for x in sess.get_inputs()])) -print("Output layers: {}".format([ str(x) for x in sess.get_outputs()])) -print("Input layer name: " + input_layer_name) -print("Expected input shape: {}".format(model_input_shape)) -print("Output layer name: " + output_layer_name) -print("Data normalization: {}".format(normalize_data_bool)) -print("Subtract mean: {}".format(subtract_mean_bool)) -print('Per-channel means to subtract: {}'.format(given_channel_means)) -print("Background/unlabelled classes to skip: {}".format(bg_class_offset)) -print("") +logging.info("") +logging.info("Data layout: {}".format(data_layout) ) +logging.info("Input layers: {}".format([ str(x) for x in sess.get_inputs()])) +logging.info("Output layers: {}".format([ str(x) for x in sess.get_outputs()])) +logging.info("Input layer name: " + input_layer_name) +logging.info("Expected input shape: {}".format(model_input_shape)) +logging.info("Output layer name: " + output_layer_name) +logging.info("Data normalization: {}".format(normalize_data_bool)) +logging.info("Subtract mean: {}".format(subtract_mean_bool)) +logging.info('Per-channel means to subtract: {}'.format(given_channel_means)) +logging.info("Background/unlabelled classes to skip: {}".format(bg_class_offset)) +logging.info("") starting_index = 1 start_time = time.time() for batch_idx in range(batch_count): - print ('') - print ("Batch {}/{}:".format(batch_idx+1, batch_count)) + logging.info ('') + logging.info ("Batch {}/{}:".format(batch_idx+1, batch_count)) batch_filenames = [ imagenet_path + '/' + "ILSVRC2012_val_00000{:03d}.JPEG".format(starting_index + batch_idx*batch_size + i) for i in range(batch_size) ] @@ -142,26 +142,26 @@ def load_a_batch(batch_filenames): cm_status = {'classifications':[]} - print ('') + logging.info ('') top_classification = '' for in_batch_idx in range(batch_size): softmax_vector = batch_predictions[in_batch_idx][bg_class_offset:] # skipping the background class on the left (if present) top5_indices = list(reversed(softmax_vector.argsort()))[:5] - print(' * ' + batch_filenames[in_batch_idx] + ' :') + logging.info(' * ' + batch_filenames[in_batch_idx] + ' :') for class_idx in top5_indices: if top_classification == '': top_classification = labels[class_idx] - print("\t{}\t{}\t{}".format(class_idx, softmax_vector[class_idx], labels[class_idx])) + logging.info("\t{}\t{}\t{}".format(class_idx, softmax_vector[class_idx], labels[class_idx])) cm_status['classifications'].append({'class_idx':int(class_idx), 'softmax': float(softmax_vector[class_idx]), 'label':labels[class_idx]}) - print ('') - print ('Top classification: {}'.format(top_classification)) + logging.info ('') + logging.info ('Top classification: {}'.format(top_classification)) cm_status['top_classification'] = top_classification avg_time = (time.time() - start_time) / batch_count diff --git a/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py b/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py index f3ee0b587d..ec676453f1 100644 --- a/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py +++ b/script/app-image-classification-torch-py/src/pytorch_classify_preprocessed.py @@ -5,7 +5,7 @@ import os import shutil import numpy as np - +import logging import torch import torchvision.models as models @@ -120,7 +120,7 @@ def load_and_resize_image(image_filepath, height, width): for batch_index in range(BATCH_COUNT): batch_number = batch_index+1 if FULL_REPORT or (batch_number % 10 == 0): - print("\nBatch {} of {}".format(batch_number, BATCH_COUNT)) + logging.info("\nBatch {} of {}".format(batch_number, BATCH_COUNT)) begin_time = time.time() @@ -136,7 +136,7 @@ def load_and_resize_image(image_filepath, height, width): total_load_time += load_time images_loaded += BATCH_SIZE if FULL_REPORT: - print("Batch loaded in %fs" % (load_time)) + logging.info("Batch loaded in %fs" % (load_time)) # Classify one batch begin_time = time.time() @@ -150,7 +150,7 @@ def load_and_resize_image(image_filepath, height, width): classification_time = time.time() - begin_time if FULL_REPORT: - print("Batch classified in %fs" % (classification_time)) + logging.info("Batch classified in %fs" % (classification_time)) total_classification_time += classification_time # Remember first batch prediction time @@ -170,8 +170,8 @@ def load_and_resize_image(image_filepath, height, width): top5_indices = list(reversed(softmax_vector.argsort()))[:5] for class_idx in top5_indices: - print("\t{}\t{}\t{}".format(class_idx, softmax_vector[class_idx], labels[class_idx])) - print("") + logging.info("\t{}\t{}\t{}".format(class_idx, softmax_vector[class_idx], labels[class_idx])) + logging.info("") test_time = time.time() - test_time_begin diff --git a/script/app-image-classification-tvm-onnx-py/src/classify.py b/script/app-image-classification-tvm-onnx-py/src/classify.py index 0eb299f2df..77f8eb18c7 100644 --- a/script/app-image-classification-tvm-onnx-py/src/classify.py +++ b/script/app-image-classification-tvm-onnx-py/src/classify.py @@ -7,7 +7,7 @@ import os import argparse import json - +import logging from PIL import Image import cv2 @@ -100,7 +100,7 @@ def run_case(dtype, image, target): # Load model model_path=os.environ.get('CM_ML_MODEL_FILE_WITH_PATH','') if model_path=='': - print ('Error: environment variable CM_ML_MODEL_FILE_WITH_PATH is not defined') + logging.info ('Error: environment variable CM_ML_MODEL_FILE_WITH_PATH is not defined') exit(1) opt = rt.SessionOptions() @@ -114,8 +114,8 @@ def run_case(dtype, image, target): inputs = [meta.name for meta in sess.get_inputs()] outputs = [meta.name for meta in sess.get_outputs()] - print (inputs) - print (outputs) + logging.info (inputs) + logging.info (outputs) @@ -128,9 +128,9 @@ def run_case(dtype, image, target): del sess # Load model via ONNX to be used with TVM - print ('') - print ('ONNX: load model ...') - print ('') + logging.info ('') + logging.info ('ONNX: load model ...') + logging.info ('') onnx_model = onnx.load(model_path) @@ -160,28 +160,28 @@ def run_case(dtype, image, target): input_shape = (1, 3, 224, 224) shape_dict = {inputs[0]: input_shape} - print ('') - print ('TVM: import model ...') - print ('') + logging.info ('') + logging.info ('TVM: import model ...') + logging.info ('') # Extra param: opset=12 mod, params = relay.frontend.from_onnx(onnx_model, shape_dict, freeze_params=True) - print ('') - print ('TVM: transform to static ...') - print ('') + logging.info ('') + logging.info ('TVM: transform to static ...') + logging.info ('') mod = relay.transform.DynamicToStatic()(mod) - print ('') - print ('TVM: apply extra optimizations ...') - print ('') + logging.info ('') + logging.info ('TVM: apply extra optimizations ...') + logging.info ('') # Padding optimization # Adds extra optimizations mod = relay.transform.FoldExplicitPadding()(mod) - print ('') - print ('TVM: build model ...') - print ('') + logging.info ('') + logging.info ('TVM: build model ...') + logging.info ('') executor=os.environ.get('MLPERF_TVM_EXECUTOR','graph') @@ -195,9 +195,9 @@ def run_case(dtype, image, target): params=params) lib = graph_module - print ('') - print ('TVM: init graph engine ...') - print ('') + logging.info ('') + logging.info ('TVM: init graph engine ...') + logging.info ('') sess = graph_executor.GraphModule(lib['default'](ctx)) @@ -211,9 +211,9 @@ def run_case(dtype, image, target): r_exec = vm_exec - print ('') - print ('TVM: init VM ...') - print ('') + logging.info ('') + logging.info ('TVM: init VM ...') + logging.info ('') sess = VirtualMachine(r_exec, ctx) @@ -247,15 +247,15 @@ def run_case(dtype, image, target): top5=[] atop5 = get_top5(output[1][0]) #.asnumpy()) - print ('') - print('Prediction Top1:', top1, synset[top1]) + logging.info ('') + logging.info('Prediction Top1:', top1, synset[top1]) - print ('') - print('Prediction Top5:') + logging.info ('') + logging.info('Prediction Top5:') for p in atop5: out=p[1]-1 name=synset[out] - print (' * {} {}'.format(out, name)) + logging.info (' * {} {}'.format(out, name)) ck_results={ 'prediction':synset[top1] @@ -273,7 +273,7 @@ def run_case(dtype, image, target): args = parser.parse_args() if args.image.strip().lower()=='': - print ('Please specify path to an image using CM_IMAGE environment variable!') + logging.info ('Please specify path to an image using CM_IMAGE environment variable!') exit(1) # set parameter diff --git a/script/app-image-corner-detection/customize.py b/script/app-image-corner-detection/customize.py index 88d65d1534..a21115a225 100644 --- a/script/app-image-corner-detection/customize.py +++ b/script/app-image-corner-detection/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -36,6 +36,6 @@ def preprocess(i): def postprocess(i): env = i['env'] - print(env['CM_OUTPUT'] + " generated in " + env['CM_RUN_DIR']) + logging.info(env['CM_OUTPUT'] + " generated in " + env['CM_RUN_DIR']) return {'return':0} diff --git a/script/app-loadgen-generic-python/customize.py b/script/app-loadgen-generic-python/customize.py index c8810dcd7b..816cad0016 100644 --- a/script/app-loadgen-generic-python/customize.py +++ b/script/app-loadgen-generic-python/customize.py @@ -3,7 +3,7 @@ from cmind import utils import os import shutil - +import logging def preprocess(i): os_info = i['os_info'] @@ -83,9 +83,9 @@ def preprocess(i): env['CM_RUN_OPTS'] = run_opts - print ('') - print ('Assembled flags: {}'.format(run_opts)) - print ('') + logging.info ('') + logging.info ('Assembled flags: {}'.format(run_opts)) + logging.info ('') return {'return':0} diff --git a/script/app-loadgen-generic-python/src/backend_onnxruntime.py b/script/app-loadgen-generic-python/src/backend_onnxruntime.py index e95e467b9f..b4c7599cea 100644 --- a/script/app-loadgen-generic-python/src/backend_onnxruntime.py +++ b/script/app-loadgen-generic-python/src/backend_onnxruntime.py @@ -1,5 +1,5 @@ import typing - +import logging import numpy as np import onnx import onnxruntime as ort @@ -58,7 +58,7 @@ def __init__( self.session_options.inter_op_num_threads = inter_op_threads def create(self) -> Model: - print ('Loading model: {}'.format(self.model_path)) + logging.info ('Loading model: {}'.format(self.model_path)) # model = onnx.load(self.model_path) session_eps = [self.execution_provider] session = ort.InferenceSession( diff --git a/script/app-loadgen-generic-python/src/backend_pytorch.py b/script/app-loadgen-generic-python/src/backend_pytorch.py index 1fef350b44..033651630c 100644 --- a/script/app-loadgen-generic-python/src/backend_pytorch.py +++ b/script/app-loadgen-generic-python/src/backend_pytorch.py @@ -4,7 +4,7 @@ import importlib import os import psutil - +import logging import utils import numpy as np @@ -23,10 +23,10 @@ def __init__(self, session): def predict(self, input: ModelInput): - print ('') + logging.info ('') utils.print_host_memory_use('Host memory used') - print ('Running inference ...') + logging.info ('Running inference ...') with torch.no_grad(): output = self.session(input) @@ -56,8 +56,8 @@ def __init__( def create(self) -> Model: - print ('') - print ('Loading model: {}'.format(self.model_path)) + logging.info ('') + logging.info ('Loading model: {}'.format(self.model_path)) if self.execution_provider == 'CPUExecutionProvider': torch_provider = 'cpu' @@ -86,8 +86,8 @@ def create(self) -> Model: if not os.path.isfile(cm_model_module): raise Exception('cm.py interface for a PyTorch model was not found in {}'.format(self.model_code)) - print ('') - print ('Collective Mind Connector for the model found: {}'.format(cm_model_module)) + logging.info ('') + logging.info ('Collective Mind Connector for the model found: {}'.format(cm_model_module)) # Load CM interface for the model @@ -98,7 +98,7 @@ def create(self) -> Model: # Init model if len(self.model_cfg)>0: - print ('Model cfg: {}'.format(self.model_cfg)) + logging.info ('Model cfg: {}'.format(self.model_cfg)) r = model_module.model_init(checkpoint, self.model_cfg) if r['return']>0: diff --git a/script/app-loadgen-generic-python/src/main.py b/script/app-loadgen-generic-python/src/main.py index 0055ecaf2f..fc661332a1 100644 --- a/script/app-loadgen-generic-python/src/main.py +++ b/script/app-loadgen-generic-python/src/main.py @@ -4,7 +4,7 @@ import os import re import typing - +import logging import mlperf_loadgen import psutil @@ -38,7 +38,7 @@ def main( loadgen_duration_sec: float ): - print ('=====================================================================') + logging.info ('=====================================================================') if backend == 'onnxruntime': from backend_onnxruntime import XModelFactory @@ -135,7 +135,7 @@ def main( harness.issue_query, harness.flush_queries ) - print ('=====================================================================') + logging.info ('=====================================================================') logger.info("Test Started") mlperf_loadgen.StartTestWithLogSettings( @@ -143,7 +143,7 @@ def main( ) logger.info("Test Finished") - print ('=====================================================================') + logging.info ('=====================================================================') # Parse output file output_summary = {} @@ -159,11 +159,11 @@ def main( mlperf_loadgen.DestroySUT(system_under_test) mlperf_loadgen.DestroyQSL(query_sample_libary) logger.info("Test Completed") - print ('=====================================================================') + logging.info ('=====================================================================') if __name__ == "__main__": - print ('') + logging.info ('') logging.basicConfig( level=logging.DEBUG, diff --git a/script/app-loadgen-generic-python/src/utils.py b/script/app-loadgen-generic-python/src/utils.py index 8c182650c5..bec4f39612 100644 --- a/script/app-loadgen-generic-python/src/utils.py +++ b/script/app-loadgen-generic-python/src/utils.py @@ -2,7 +2,7 @@ import os import psutil - +import logging def print_host_memory_use(text=''): pid = os.getpid() diff --git a/script/app-mlperf-inference-dummy/customize.py b/script/app-mlperf-inference-dummy/customize.py index 36f310babc..fb878c6f4a 100644 --- a/script/app-mlperf-inference-dummy/customize.py +++ b/script/app-mlperf-inference-dummy/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import shutil - +import logging def preprocess(i): os_info = i['os_info'] @@ -25,8 +25,8 @@ def preprocess(i): return r run_cmd = r['run_cmd'] run_dir = r ['run_dir'] - print(run_cmd) - print(run_dir) + logging.info(run_cmd) + logging.info(run_dir) return {'return':1, 'error': 'Run command needs to be tested!'} def get_run_cmd(model, i): diff --git a/script/app-mlperf-inference-mlcommons-cpp/customize.py b/script/app-mlperf-inference-mlcommons-cpp/customize.py index ebe8cf7d97..7c18442214 100644 --- a/script/app-mlperf-inference-mlcommons-cpp/customize.py +++ b/script/app-mlperf-inference-mlcommons-cpp/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os import shutil +import logging def preprocess(i): @@ -11,9 +12,9 @@ def preprocess(i): meta = i['meta'] if os_info['platform'] == 'windows': - print ('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') - print ('WARNING: this script was not thoroughly tested on Windows and compilation may fail - please help us test and improve it!') - print ('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') + logging.info ('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') + logging.warning ('WARNING: this script was not thoroughly tested on Windows and compilation may fail - please help us test and improve it!') + logging.info ('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~') # # Currently support only LLVM on Windows # print ('# Forcing LLVM on Windows') # r = automation.update_deps({'deps':meta['post_deps'], 'update_deps':{'compile-program': {'adr':{'compiler':{'tags':'llvm'}}}}}) diff --git a/script/app-mlperf-inference-mlcommons-python/customize.py b/script/app-mlperf-inference-mlcommons-python/customize.py index b172293c0f..24503f4115 100644 --- a/script/app-mlperf-inference-mlcommons-python/customize.py +++ b/script/app-mlperf-inference-mlcommons-python/customize.py @@ -3,6 +3,7 @@ import json import shutil import subprocess +import logging def preprocess(i): @@ -63,7 +64,7 @@ def preprocess(i): if env.get('CM_MLPERF_LOADGEN_QUERY_COUNT','') != '' and not env.get('CM_TMP_IGNORE_MLPERF_QUERY_COUNT', False) and (env['CM_MLPERF_LOADGEN_MODE'] == 'accuracy' or 'gptj' in env['CM_MODEL']) and env.get('CM_MLPERF_RUN_STYLE','') != "valid": env['CM_MLPERF_LOADGEN_EXTRA_OPTIONS'] += " --count " + env['CM_MLPERF_LOADGEN_QUERY_COUNT'] - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + logging.info("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") if 'CM_MLPERF_CONF' not in env: env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") diff --git a/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py b/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py index 705f1e3539..3e2ff4606c 100644 --- a/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py +++ b/script/app-mlperf-inference-mlcommons-python/nvidia/retinanet.py @@ -25,6 +25,7 @@ import random import time import pycuda +import logging from PIL import Image from importlib import import_module from typing import Dict, Tuple, List, Optional @@ -155,7 +156,7 @@ def __init__(self, engine_file, batch_size, precision, onnx_path, trt.init_libnvinfer_plugins(self.logger, "") if self.onnx_path is not None and not skip_engine_build: - print(f"Creating engines from onnx: {self.onnx_path}") + logging.info(f"Creating engines from onnx: {self.onnx_path}") self.create_trt_engine() else: if not os.path.exists(engine_file): @@ -264,14 +265,14 @@ def run_openimage(self, num_samples=8): image_ids = cocoGt.getImgIds() cat_ids = cocoGt.getCatIds() num_images = min(num_samples, len(image_ids)) - print(f"Total number of images: {len(image_ids)}, number of categories: {len(cat_ids)}, running num_images: {num_images}") + logging.info(f"Total number of images: {len(image_ids)}, number of categories: {len(cat_ids)}, running num_images: {num_images}") detections = [] batch_idx = 0 for image_idx in range(0, num_images, self.batch_size): # Print Progress if batch_idx % 20 == 0: - print(f"Processing batch: {batch_idx} image: {image_idx}/{num_images}") + logging.info(f"Processing batch: {batch_idx} image: {image_idx}/{num_images}") end_idx = min(image_idx + self.batch_size, num_images) imgs = [] @@ -409,7 +410,7 @@ def run_openimage(self, num_samples=8): image_ids = cocoGt.getImgIds() cat_ids = cocoGt.getCatIds() num_images = min(num_samples, len(image_ids)) - print(f"Total number of images: {len(image_ids)}, number of categories: {len(cat_ids)}, running num_images: {num_images}") + logging.info(f"Total number of images: {len(image_ids)}, number of categories: {len(cat_ids)}, running num_images: {num_images}") coco_detections = [] for image_idx in range(0, num_images, self.batch_size): @@ -427,7 +428,7 @@ def run_openimage(self, num_samples=8): for idx in range(image_idx, end_idx): image_id = image_ids[idx] tensor = load_img_pytorch(os.path.join(self.image_dir, cocoGt.imgs[image_id]["file_name"]), do_transform=True).numpy() - print(tensor.shape) + logging.info(tensor.shape) img.append(tensor) img = np.ascontiguousarray(np.stack(img), dtype=np.float32) diff --git a/script/app-mlperf-inference-qualcomm/customize.py b/script/app-mlperf-inference-qualcomm/customize.py index fc858d9539..c2b701a0aa 100644 --- a/script/app-mlperf-inference-qualcomm/customize.py +++ b/script/app-mlperf-inference-qualcomm/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os import shutil +import logging def preprocess(i): @@ -22,7 +23,7 @@ def preprocess(i): kilt_root = env['CM_KILT_CHECKOUT_PATH'] - print(f"Harness Root: {kilt_root}") + logging.info(f"Harness Root: {kilt_root}") source_files = [] env['CM_SOURCE_FOLDER_PATH'] = env['CM_KILT_CHECKOUT_PATH'] @@ -119,7 +120,7 @@ def preprocess(i): elif env['CM_MLPERF_DEVICE'] == 'qaic': source_files.append(os.path.join(kilt_root, "devices", "qaic", "api", "master", "QAicInfApi.cpp")) - print(f"Compiling the source files: {source_files}") + logging.info(f"Compiling the source files: {source_files}") env['CM_CXX_SOURCE_FILES'] = ";".join(source_files) env['+ CXXFLAGS'].append("-std=c++17") diff --git a/script/app-mlperf-inference-redhat/customize.py b/script/app-mlperf-inference-redhat/customize.py index 36d0bafb88..bd54755d39 100644 --- a/script/app-mlperf-inference-redhat/customize.py +++ b/script/app-mlperf-inference-redhat/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os import shutil +import logging def preprocess(i): @@ -25,8 +26,8 @@ def preprocess(i): return r run_cmd = r['run_cmd'] run_dir = r ['run_dir'] - print(run_cmd) - print(run_dir) + logging.info(run_cmd) + logging.info(run_dir) return {'return':1, 'error': 'Run command needs to be tested'} def get_run_cmd(model, i): diff --git a/script/app-mlperf-inference/build_dockerfiles.py b/script/app-mlperf-inference/build_dockerfiles.py index 10579d33ea..a923596f1c 100644 --- a/script/app-mlperf-inference/build_dockerfiles.py +++ b/script/app-mlperf-inference/build_dockerfiles.py @@ -1,6 +1,7 @@ import cmind import os import pathlib +import logging current_file_path = pathlib.Path(__file__).parent.resolve() docker_os = { "ubuntu": ["18.04","20.04","22.04"], @@ -90,9 +91,9 @@ } r = cmind.access(cm_docker_input) if r['return'] > 0: - print(r) + logging.info(r) exit(1) - print ('') - print ("Dockerfile generated at " + dockerfile_path) + logging.info ('') + logging.info ("Dockerfile generated at " + dockerfile_path) diff --git a/script/app-mlperf-inference/customize.py b/script/app-mlperf-inference/customize.py index 1832908280..13a2c7cc90 100644 --- a/script/app-mlperf-inference/customize.py +++ b/script/app-mlperf-inference/customize.py @@ -9,7 +9,7 @@ import platform import sys import mlperf_utils - +import logging def preprocess(i): env = i['env'] @@ -132,7 +132,7 @@ def postprocess(i): pattern["Offline"] = "Samples per second: (.*)\n" pattern["SingleStream"] = "Mean latency \(ns\)\s*:(.*)" pattern["MultiStream"] = "Mean latency \(ns\)\s*:(.*)" - print("\n") + logging.info("\n") with open("mlperf_log_summary.txt", "r") as fp: summary = fp.read() @@ -152,8 +152,8 @@ def postprocess(i): sut_config[model_full_name][scenario] = {} sut_config[model_full_name][scenario][metric] = value - print(f"SUT: {sut_name}, model: {model_full_name}, scenario: {scenario}, {metric} updated as {value}") - print(f"New config stored in {sut_config_path}") + logging.info(f"SUT: {sut_name}, model: {model_full_name}, scenario: {scenario}, {metric} updated as {value}") + logging.info(f"New config stored in {sut_config_path}") with open(sut_config_path, "w") as f: yaml.dump(sut_config, f) @@ -184,8 +184,8 @@ def postprocess(i): state['app_mlperf_inference_log_summary'][y[0].strip().lower()]=y[1].strip() if env.get("CM_MLPERF_PRINT_SUMMARY", "").lower() not in [ "no", "0", "false"]: - print("\n") - print(mlperf_log_summary) + logging.info("\n") + logging.info(mlperf_log_summary) with open ("measurements.json", "w") as fp: json.dump(measurements, fp, indent=2) @@ -359,7 +359,7 @@ def postprocess(i): SCRIPT_PATH = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test, "run_verification.py") cmd = env['CM_PYTHON_BIN_WITH_PATH'] + " " + SCRIPT_PATH + " -r " + RESULT_DIR + " -c " + COMPLIANCE_DIR + " -o "+ OUTPUT_DIR - print(cmd) + logging.info(cmd) os.system(cmd) if test == "TEST01": @@ -376,7 +376,7 @@ def postprocess(i): ACCURACY_DIR = os.path.join(RESULT_DIR, "accuracy") if not os.path.exists(ACCURACY_DIR): - print("Accuracy run not yet completed") + logging.info("Accuracy run not yet completed") return {'return':1, 'error': 'TEST01 needs accuracy run to be completed first'} cmd = "cd " + TEST01_DIR + " && bash " + SCRIPT_PATH + " " + os.path.join(ACCURACY_DIR, "mlperf_log_accuracy.json") + " " + \ @@ -391,7 +391,7 @@ def postprocess(i): data = file.read().replace('\n', '\t') if 'TEST PASS' not in data: - print("\nDeterministic TEST01 failed... Trying with non-determinism.\n") + logging.info("\nDeterministic TEST01 failed... Trying with non-determinism.\n") # #Normal test failed, trying the check with non-determinism CMD = "cd "+ ACCURACY_DIR+" && "+ env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + accuracy_filepath + accuracy_log_file_option_name + \ @@ -414,7 +414,7 @@ def postprocess(i): state['cm-mlperf-inference-results'][state['CM_SUT_CONFIG_NAME']][model][scenario][test] = "passed" if is_valid else "failed" else: - print(test) + logging.info(test) if state.get('mlperf-inference-implementation') and state['mlperf-inference-implementation'].get('version_info'): diff --git a/script/app-mlperf-training-nvidia/customize.py b/script/app-mlperf-training-nvidia/customize.py index 3c5fdf6d8a..a24f8c96e9 100644 --- a/script/app-mlperf-training-nvidia/customize.py +++ b/script/app-mlperf-training-nvidia/customize.py @@ -3,6 +3,7 @@ import json import shutil import subprocess +import logging def preprocess(i): @@ -35,7 +36,7 @@ def preprocess(i): else: env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') - print("Using MLCommons Training source from '" + env['CM_MLPERF_TRAINING_SOURCE'] +"'") + logging.info("Using MLCommons Training source from '" + env['CM_MLPERF_TRAINING_SOURCE'] +"'") NUM_THREADS = env['CM_NUM_THREADS'] diff --git a/script/app-mlperf-training-reference/customize.py b/script/app-mlperf-training-reference/customize.py index f7c77bc55a..5fa07dafed 100644 --- a/script/app-mlperf-training-reference/customize.py +++ b/script/app-mlperf-training-reference/customize.py @@ -35,7 +35,7 @@ def preprocess(i): else: env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') - print("Using MLCommons Training source from '" + env['CM_MLPERF_TRAINING_SOURCE'] +"'") + logging.info("Using MLCommons Training source from '" + env['CM_MLPERF_TRAINING_SOURCE'] +"'") NUM_THREADS = env['CM_NUM_THREADS'] diff --git a/script/app-stable-diffusion-onnx-py/process.py b/script/app-stable-diffusion-onnx-py/process.py index 0f759089b7..06f215400c 100644 --- a/script/app-stable-diffusion-onnx-py/process.py +++ b/script/app-stable-diffusion-onnx-py/process.py @@ -1,7 +1,7 @@ # https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/onnx import os - +import logging from optimum.onnxruntime import ORTStableDiffusionPipeline output = os.environ.get('CM_APP_STABLE_DIFFUSION_ONNX_PY_OUTPUT','') @@ -13,7 +13,7 @@ cm_model_path = os.environ.get('CM_ML_MODEL_PATH','') if cm_model_path == '': - print ('Error: CM_ML_MODEL_PATH env is not defined') + logging.info ('Error: CM_ML_MODEL_PATH env is not defined') exit(1) device = os.environ.get('CM_DEVICE','') @@ -24,11 +24,11 @@ if text == '': text = "a photo of an astronaut riding a horse on mars" -print ('') -print ('Generating imaged based on "{}"'.format(text)) +logging.info ('') +logging.info ('Generating imaged based on "{}"'.format(text)) image = pipeline(text).images[0] image.save(f) -print ('Image recorded to "{}"'.format(f)) +logging.info ('Image recorded to "{}"'.format(f)) diff --git a/script/benchmark-any-mlperf-inference-implementation/customize.py b/script/benchmark-any-mlperf-inference-implementation/customize.py index ae6462118b..8e2002788a 100644 --- a/script/benchmark-any-mlperf-inference-implementation/customize.py +++ b/script/benchmark-any-mlperf-inference-implementation/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -133,7 +133,7 @@ def preprocess(i): with open(os.path.join(script_path, run_file_name+".sh"), 'w') as f: f.write(run_script_content) - print(run_script_content) + logging.info(run_script_content) run_script_input = i['run_script_input'] r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':run_file_name}) diff --git a/script/benchmark-program/customize.py b/script/benchmark-program/customize.py index 5fe34ec09f..93b4d4c729 100644 --- a/script/benchmark-program/customize.py +++ b/script/benchmark-program/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] env = i['env'] @@ -48,15 +48,15 @@ def preprocess(i): env['CM_RUN_CMD'] += " 2>&1 ; echo \$? > exitstatus | tee " + q+ os.path.join(logs_dir, "console.out") + q # Print info - print ('***************************************************************************') - print ('CM script::benchmark-program/run.sh') - print ('') - print ('Run Directory: {}'.format(env.get('CM_RUN_DIR',''))) + logging.info ('***************************************************************************') + logging.info ('CM script::benchmark-program/run.sh') + logging.info ('') + logging.info ('Run Directory: {}'.format(env.get('CM_RUN_DIR',''))) - print ('') - print ('CMD: {}'.format(env.get('CM_RUN_CMD',''))) + logging.info ('') + logging.info ('CMD: {}'.format(env.get('CM_RUN_CMD',''))) - print ('') + logging.info ('') return {'return':0} diff --git a/script/build-docker-image/customize.py b/script/build-docker-image/customize.py index 90684e036d..4726d80dba 100644 --- a/script/build-docker-image/customize.py +++ b/script/build-docker-image/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os from os.path import exists - +import logging def preprocess(i): os_info = i['os_info'] @@ -77,12 +77,12 @@ def preprocess(i): CMD = ''.join(XCMD) - print ('================================================') - print ('CM generated the following Docker build command:') - print ('') - print (CMD) + logging.info ('================================================') + logging.info ('CM generated the following Docker build command:') + logging.info ('') + logging.info (CMD) - print ('') + logging.info ('') env['CM_DOCKER_BUILD_CMD'] = CMD @@ -115,15 +115,15 @@ def postprocess(i): with open(dockerfile_path + '.build.bat', 'w') as f: f.write(PCMD + '\n') - print ('================================================') - print ('CM generated the following Docker push command:') - print ('') - print (PCMD) + logging.info ('================================================') + logging.info ('CM generated the following Docker push command:') + logging.info ('') + logging.info (PCMD) - print ('') + logging.info ('') r = os.system(PCMD) - print ('') + logging.info ('') if r>0: return {'return':1, 'error':'pushing to Docker Hub failed'} diff --git a/script/calibrate-model-for.qaic/customize.py b/script/calibrate-model-for.qaic/customize.py index 62c4dbdbae..12b2be1dc1 100644 --- a/script/calibrate-model-for.qaic/customize.py +++ b/script/calibrate-model-for.qaic/customize.py @@ -2,7 +2,7 @@ import os import sys import yaml - +import logging def preprocess(i): os_info = i['os_info'] @@ -25,7 +25,7 @@ def preprocess(i): return r cmd = r['cmd'] - print("Profiling from "+ os.getcwd()) + logging.info("Profiling from "+ os.getcwd()) env['CM_RUN_CMD'] = cmd diff --git a/script/compile-model-for.qaic/customize.py b/script/compile-model-for.qaic/customize.py index 1e178f1897..7bebc5fa23 100644 --- a/script/compile-model-for.qaic/customize.py +++ b/script/compile-model-for.qaic/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -20,16 +20,16 @@ def preprocess(i): return r cmd = r['cmd'] - print("Compiling from "+ os.getcwd()) + logging.info("Compiling from "+ os.getcwd()) env['CM_QAIC_MODEL_FINAL_COMPILATION_CMD'] = cmd env['CM_RUN_CMD'] = cmd else: import shutil - print("Creating cache entry from " + env['CM_REGISTER_CACHE'] + " to " + os.getcwd()) + logging.info("Creating cache entry from " + env['CM_REGISTER_CACHE'] + " to " + os.getcwd()) r = shutil.copytree(env['CM_REGISTER_CACHE'], os.path.join(os.getcwd(), "elfs")) - print(r) + logging.info(r) return {'return':0} diff --git a/script/compile-program/customize.py b/script/compile-program/customize.py index 73a3eeb82b..d86200398a 100644 --- a/script/compile-program/customize.py +++ b/script/compile-program/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -17,7 +17,7 @@ def preprocess(i): # If windows, need to extend it more ... if os_info['platform'] == 'windows' and env.get('CM_COMPILER_FAMILY','')!='LLVM': - print ("WARNING: compile-program script should be extended to support flags for non-LLVM compilers on Windows") + logging.warning ("WARNING: compile-program script should be extended to support flags for non-LLVM compilers on Windows") return {'return':0} LDFLAGS = env.get('+ LDFLAGS', []) diff --git a/script/create-custom-cache-entry/customize.py b/script/create-custom-cache-entry/customize.py index 8d2d31db32..73d92e70c4 100644 --- a/script/create-custom-cache-entry/customize.py +++ b/script/create-custom-cache-entry/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import shutil - +import logging def preprocess(i): # CM script internal variables @@ -9,7 +9,7 @@ def preprocess(i): extra_cache_tags = [] if env.get('CM_EXTRA_CACHE_TAGS','').strip() == '': - print ('') + logging.info ('') extra_cache_tags_str = input('Enter extra tags for the custom CACHE entry separated by comma: ') extra_cache_tags = extra_cache_tags_str.strip().split(',') diff --git a/script/create-fpgaconvnet-app-tinyml/customize.py b/script/create-fpgaconvnet-app-tinyml/customize.py index c139e3a476..ffdfabf213 100644 --- a/script/create-fpgaconvnet-app-tinyml/customize.py +++ b/script/create-fpgaconvnet-app-tinyml/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -31,7 +31,7 @@ def postprocess(i): network = env['CM_TINY_NETWORK_NAME'] json_location = os.path.join(env['CM_RUN_DIR'], env['CM_TINY_NETWORK_NAME'] + ".json") if os.path.exists(json_location): - print(f"JSON configuration file for {network} created at {json_location}") + logging.info(f"JSON configuration file for {network} created at {json_location}") else: return {'return':1, 'error': "JSON configuration file generation failed"} diff --git a/script/create-fpgaconvnet-config-tinyml/customize.py b/script/create-fpgaconvnet-config-tinyml/customize.py index 8590890bb9..e1e5bbf572 100644 --- a/script/create-fpgaconvnet-config-tinyml/customize.py +++ b/script/create-fpgaconvnet-config-tinyml/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -43,7 +43,7 @@ def postprocess(i): json_location = os.path.join(env['CM_RUN_DIR'], env['CM_TINY_NETWORK_NAME'] + ".json") if os.path.exists(json_location): - print(f"JSON configuration file for {network} created at {json_location}") + logging.info(f"JSON configuration file for {network} created at {json_location}") else: return {'return':1, 'error': "JSON configuration file generation failed"} diff --git a/script/create-patch/customize.py b/script/create-patch/customize.py index 2990d29ff0..be484f4708 100644 --- a/script/create-patch/customize.py +++ b/script/create-patch/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -35,11 +35,11 @@ def preprocess(i): cmd = 'diff -Naur {} {} {} > patch.patch'.format(x_exclude, old_dir, new_dir) if not quiet: - print ('') - print ('Running command:') - print ('') - print (cmd) - print ('') + logging.info ('') + logging.info ('Running command:') + logging.info ('') + logging.info (cmd) + logging.info ('') os.system(cmd) diff --git a/script/detect-cpu/customize.py b/script/detect-cpu/customize.py index 7a5586667a..310762b3b2 100644 --- a/script/detect-cpu/customize.py +++ b/script/detect-cpu/customize.py @@ -1,5 +1,6 @@ from cmind import utils import os +import logging lscpu_out = 'tmp-lscpu.out' @@ -34,7 +35,7 @@ def postprocess(i): f = 'tmp-systeminfo.csv' if not os.path.isfile(f): - print ('WARNING: {} file was not generated!'.format(f)) + logging.info ('WARNING: {} file was not generated!'.format(f)) else: keys = {} j = 0 @@ -98,7 +99,7 @@ def postprocess(i): ############################################################################### # Linux if not os.path.isfile(lscpu_out): - print ('WARNING: lscpu.out file was not generated!') + logging.warning ('WARNING: lscpu.out file was not generated!') # Currently ignore this error though probably should fail? # But need to check that is supported on all platforms. diff --git a/script/download-file/customize.py b/script/download-file/customize.py index 78362181db..e4308acc33 100644 --- a/script/download-file/customize.py +++ b/script/download-file/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import hashlib - +import logging def preprocess(i): os_info = i['os_info'] @@ -26,16 +26,16 @@ def preprocess(i): env['CM_DOWNLOAD_FILENAME'] = filepath if not quiet: - print ('') - print ('Using local file: {}'.format(filepath)) + logging.info ('') + logging.info ('Using local file: {}'.format(filepath)) else: url = env.get('CM_DOWNLOAD_URL','') if url=='': return {'return':1, 'error': 'please specify URL using --url={URL} or --env.CM_DOWNLOAD_URL={URL}'} - print ('') - print ('Downloading from {}'.format(url)) + logging.info ('') + logging.info ('Downloading from {}'.format(url)) if '&' in url and tool != "cmutil": if os_info['platform'] == 'windows': @@ -74,7 +74,7 @@ def preprocess(i): env['CM_DOWNLOAD_FILENAME'] = "index.html" if tool == "cmutil": - print ('') + logging.info ('') cm = automation.cmind for i in range(1,5): @@ -87,7 +87,7 @@ def preprocess(i): url = env.get('CM_DOWNLOAD_URL'+str(i),'') if url == '': break - print(f"Download from {oldurl} failed, trying from {url}") + logging.info(f"Download from {oldurl} failed, trying from {url}") if r['return']>0: return r diff --git a/script/extract-file/customize.py b/script/extract-file/customize.py index a8da7ec0d5..98332a50a9 100644 --- a/script/extract-file/customize.py +++ b/script/extract-file/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import hashlib - +import logging def preprocess(i): variation_tags = i.get('variation_tags',[]) @@ -113,10 +113,10 @@ def preprocess(i): env.get('CM_EXTRACT_TOOL_EXTRA_OPTIONS', '') + \ ' ' + env.get('CM_EXTRACT_TOOL_OPTIONS', '')+ ' '+ x + filename + x - print ('') - print ('Current directory: {}'.format(os.getcwd())) - print ('Command line: "{}"'.format(env['CM_EXTRACT_CMD'])) - print ('') + logging.info ('') + logging.info ('Current directory: {}'.format(os.getcwd())) + logging.info ('Command line: "{}"'.format(env['CM_EXTRACT_CMD'])) + logging.info ('') final_file = env.get('CM_EXTRACT_EXTRACTED_FILENAME', '') diff --git a/script/generate-mlperf-inference-submission/customize.py b/script/generate-mlperf-inference-submission/customize.py index c29b34e241..f864018d77 100644 --- a/script/generate-mlperf-inference-submission/customize.py +++ b/script/generate-mlperf-inference-submission/customize.py @@ -6,6 +6,7 @@ import sys from tabulate import tabulate import mlperf_utils +import logging def preprocess(i): return {'return': 0} @@ -37,17 +38,17 @@ def generate_submission(i): submission_dir = env.get('CM_MLPERF_INFERENCE_SUBMISSION_DIR', '') if env.get('CM_MLPERF_CLEAN_SUBMISSION_DIR','')!='': - print ('=================================================') - print ('Cleaning {} ...'.format(env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'])) + logging.info ('=================================================') + logging.info ('Cleaning {} ...'.format(env['CM_MLPERF_INFERENCE_SUBMISSION_DIR'])) if os.path.exists(env['CM_MLPERF_INFERENCE_SUBMISSION_DIR']): shutil.rmtree(env['CM_MLPERF_INFERENCE_SUBMISSION_DIR']) - print ('=================================================') + logging.info ('=================================================') if not os.path.isdir(submission_dir): os.makedirs(submission_dir) - print('* MLPerf inference submission dir: {}'.format(submission_dir)) - print('* MLPerf inference results dir: {}'.format(results_dir)) + logging.info('* MLPerf inference submission dir: {}'.format(submission_dir)) + logging.info('* MLPerf inference results dir: {}'.format(results_dir)) results = [f for f in os.listdir(results_dir) if not os.path.isfile(os.path.join(results_dir, f))] system_meta_default = state['CM_SUT_META'] @@ -72,7 +73,7 @@ def generate_submission(i): if division not in ['open','closed']: return {'return':1, 'error':'"division" must be "open" or "closed"'} - print('* MLPerf inference division: {}'.format(division)) + logging.info('* MLPerf inference division: {}'.format(division)) path_submission_root = submission_dir path_submission_division=os.path.join(path_submission_root, division) @@ -87,7 +88,7 @@ def generate_submission(i): submitter = system_meta_default['submitter'] env['CM_MLPERF_SUBMITTER'] = submitter - print('* MLPerf inference submitter: {}'.format(submitter)) + logging.info('* MLPerf inference submitter: {}'.format(submitter)) if 'Collective' not in system_meta_default.get('sw_notes'): system_meta['sw_notes'] = "Automated by MLCommons CM v{}. ".format(cmind.__version__) + system_meta_default['sw_notes'] @@ -119,19 +120,19 @@ def generate_submission(i): framework_version = parts[4] run_config = parts[5] - print('* System: {}'.format(system)) - print('* Implementation: {}'.format(implementation)) - print('* Device: {}'.format(device)) - print('* Framework: {}'.format(framework)) - print('* Framework Version: {}'.format(framework_version)) - print('* Run Config: {}'.format(run_config)) + logging.info('* System: {}'.format(system)) + logging.info('* Implementation: {}'.format(implementation)) + logging.info('* Device: {}'.format(device)) + logging.info('* Framework: {}'.format(framework)) + logging.info('* Framework Version: {}'.format(framework_version)) + logging.info('* Run Config: {}'.format(run_config)) new_res = system + "-" + "-".join(parts[1:]) # Override framework and framework versions from the folder name system_meta_default['framework'] = framework + " " + framework_version else: - print(parts) + logging.info(parts) return {'return': 1} result_path = os.path.join(results_dir, res) platform_prefix = inp.get('platform_prefix', '') @@ -168,7 +169,7 @@ def generate_submission(i): with open(os.path.join(submission_code_path, "README.md"), mode='w') as f: f.write("TBD") #create an empty README - print('* MLPerf inference model: {}'.format(model)) + logging.info('* MLPerf inference model: {}'.format(model)) for scenario in scenarios: results[model][scenario] = {} result_scenario_path = os.path.join(result_model_path, scenario) @@ -178,11 +179,11 @@ def generate_submission(i): if duplicate and scenario=='singlestream': if not os.path.exists(os.path.join(result_model_path, "offline")): - print('Duplicating results from {} to offline:'.format(scenario)) + logging.info('Duplicating results from {} to offline:'.format(scenario)) shutil.copytree(result_scenario_path, os.path.join(result_model_path, "offline")) scenarios.append("offline") if not os.path.exists(os.path.join(result_model_path, "multistream")): - print('Duplicating results from {} to multistream:'.format(scenario)) + logging.info('Duplicating results from {} to multistream:'.format(scenario)) shutil.copytree(result_scenario_path, os.path.join(result_model_path, "multistream")) scenarios.append("multistream") @@ -299,7 +300,7 @@ def generate_submission(i): shutil.copytree(os.path.join(result_mode_path, "images"), os.path.join(submission_results_path, "images")) for f in files: - print(' * ' + f) + logging.info(' * ' + f) p_target = os.path.join(submission_results_path, f) shutil.copy(os.path.join(result_mode_path, f), p_target) @@ -322,7 +323,7 @@ def generate_submission(i): result_table, headers = mlperf_utils.get_result_table(results) - print(tabulate(result_table, headers = headers, tablefmt="pretty")) + logging.info(tabulate(result_table, headers = headers, tablefmt="pretty")) sut_readme_file = os.path.join(measurement_path, "README.md") with open(sut_readme_file, mode='w') as f: f.write(tabulate(result_table, headers = headers, tablefmt="github")) diff --git a/script/generate-mlperf-inference-user-conf/customize.py b/script/generate-mlperf-inference-user-conf/customize.py index 22684b5710..f8c0a157a3 100644 --- a/script/generate-mlperf-inference-user-conf/customize.py +++ b/script/generate-mlperf-inference-user-conf/customize.py @@ -5,7 +5,7 @@ import subprocess import cmind as cm import sys - +import logging def preprocess(i): @@ -31,7 +31,7 @@ def preprocess(i): env['CM_MLPERF_LOADGEN_SCENARIO'] = "Offline" if 'CM_MLPERF_LOADGEN_MODE' not in env: - print("\nNo mode given. Using accuracy as default\n") + logging.info("\nNo mode given. Using accuracy as default\n") env['CM_MLPERF_LOADGEN_MODE'] = "accuracy" @@ -46,7 +46,7 @@ def preprocess(i): env['CM_NUM_THREADS'] = env.get('CM_HOST_CPU_TOTAL_CORES', '1') - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + logging.info("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") if 'CM_MLPERF_CONF' not in env: env['CM_MLPERF_CONF'] = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "mlperf.conf") @@ -127,22 +127,22 @@ def preprocess(i): conf[metric] = value else: if metric in conf: - print("Original configuration value {} {}".format(conf[metric], metric)) + logging.info("Original configuration value {} {}".format(conf[metric], metric)) metric_value = str(float(conf[metric]) * tolerance) #some tolerance - print("Adjusted configuration value {} {}".format(metric_value, metric)) + logging.info("Adjusted configuration value {} {}".format(metric_value, metric)) else: #if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": if metric == "target_qps": if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": - print("In find performance mode: using 1 as target_qps") + logging.info("In find performance mode: using 1 as target_qps") else: - print("No target_qps specified. Using 1 as target_qps") + logging.info("No target_qps specified. Using 1 as target_qps") conf[metric] = 1 if metric == "target_latency": if env.get("CM_MLPERF_FIND_PERFORMANCE_MODE", '') == "yes": - print("In find performance mode: using 0.5ms as target_latency") + logging.info("In find performance mode: using 0.5ms as target_latency") else: - print("No target_latency specified. Using default") + logging.info("No target_latency specified. Using default") if env.get('CM_MLPERF_USE_MAX_DURATION', 'yes').lower() in [ "no", "false", "0" ] or env.get('CM_MLPERF_MODEL_EQUAL_ISSUE_MODE', 'no').lower() in [ "yes", "1", "true" ]: # Total number of queries needed is a multiple of dataset size. So we dont use max_duration and so we need to be careful with the input latency if '3d-unet' in env['CM_MODEL']: @@ -322,13 +322,13 @@ def preprocess(i): if not run_exists or rerun: - print("Output Dir: '" + OUTPUT_DIR + "'") - print(user_conf) + logging.info("Output Dir: '" + OUTPUT_DIR + "'") + logging.info(user_conf) if env.get('CM_MLPERF_POWER','') == "yes" and os.path.exists(env.get('CM_MLPERF_POWER_LOG_DIR', '')): shutil.rmtree(env['CM_MLPERF_POWER_LOG_DIR']) else: if not env.get('CM_MLPERF_COMPLIANCE_RUN_POSTPONED', False): - print("Run files exist, skipping run...\n") + logging.info("Run files exist, skipping run...\n") env['CM_MLPERF_SKIP_RUN'] = "yes" if not run_exists or rerun or not measure_files_exist(OUTPUT_DIR, \ @@ -342,7 +342,7 @@ def preprocess(i): else: env['CM_MLPERF_USER_CONF'] = os.path.join(os.path.dirname(user_conf_path), key+".conf")# user_conf_path else: - print(f"Measure files exist at {OUTPUT_DIR}. Skipping regeneration...\n") + logging.info(f"Measure files exist at {OUTPUT_DIR}. Skipping regeneration...\n") env['CM_MLPERF_USER_CONF'] = '' os.makedirs(OUTPUT_DIR, exist_ok=True) @@ -398,7 +398,7 @@ def run_files_exist(mode, OUTPUT_DIR, run_files, env): SCRIPT_PATH = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "compliance", "nvidia", test, "run_verification.py") cmd = env['CM_PYTHON_BIN'] + " " + SCRIPT_PATH + " -r " + RESULT_DIR + " -c " + COMPLIANCE_DIR + " -o "+ OUTPUT_DIR - print(cmd) + logging.info(cmd) os.system(cmd) is_valid = checker.check_compliance_perf_dir(COMPLIANCE_DIR) diff --git a/script/generate-mlperf-tiny-report/customize.py b/script/generate-mlperf-tiny-report/customize.py index 59b16019fb..d5bf5fa5e2 100644 --- a/script/generate-mlperf-tiny-report/customize.py +++ b/script/generate-mlperf-tiny-report/customize.py @@ -5,6 +5,7 @@ import subprocess import json import shutil +import logging def preprocess(i): @@ -48,8 +49,8 @@ def preprocess(i): env['CM_TINYMLPERF_CURRENT_DIR'] = cur_dir env['CM_TINYMLPERF_REPO_VERSION'] = version - print ('') - print ('Repo path: {}'.format(path)) + logging.info ('') + logging.info ('Repo path: {}'.format(path)) r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, diff --git a/script/generate-mlperf-tiny-submission/customize.py b/script/generate-mlperf-tiny-submission/customize.py index 026c6d623f..02e1650207 100644 --- a/script/generate-mlperf-tiny-submission/customize.py +++ b/script/generate-mlperf-tiny-submission/customize.py @@ -24,8 +24,8 @@ def generate_submission(i): if not os.path.isdir(submission_dir): os.makedirs(submission_dir) - print('* MLPerf tiny submission dir: {}'.format(submission_dir)) - print('* MLPerf tiny results dir: {}'.format(results_dir)) + logging.info('* MLPerf tiny submission dir: {}'.format(submission_dir)) + logging.info('* MLPerf tiny results dir: {}'.format(results_dir)) results = [f for f in os.listdir(results_dir) if not os.path.isfile(os.path.join(results_dir, f))] division=inp.get('division','open') @@ -35,7 +35,7 @@ def generate_submission(i): system_meta = state['CM_SUT_META'] division = system_meta['division'] - print('* MLPerf tiny division: {}'.format(division)) + logging.info('* MLPerf tiny division: {}'.format(division)) path_submission_root = submission_dir path_submission_division=os.path.join(path_submission_root, division) @@ -46,7 +46,7 @@ def generate_submission(i): submitter = system_meta['submitter'] env['CM_MLPERF_SUBMITTER'] = submitter - print('* MLPerf tiny submitter: {}'.format(submitter)) + logging.info('* MLPerf tiny submitter: {}'.format(submitter)) path_submission=os.path.join(path_submission_division, submitter) if not os.path.isdir(path_submission): @@ -62,8 +62,8 @@ def generate_submission(i): target = parts[1] framework = backend - print('* Target: {}'.format(target)) - print('* Framework: {}'.format(framework)) + logging.info('* Target: {}'.format(target)) + logging.info('* Framework: {}'.format(framework)) result_path = os.path.join(results_dir, res) platform_prefix = inp.get('platform_prefix', '') if platform_prefix: @@ -95,7 +95,7 @@ def generate_submission(i): if not os.path.exists(os.path.join(submission_code_path, "README.md")): with open(os.path.join(submission_code_path, "README.md"), mode='w'): pass #create an empty README - print('* MLPerf inference model: {}'.format(model)) + logging.info('* MLPerf inference model: {}'.format(model)) for scenario in scenarios: result_scenario_path = os.path.join(result_model_path, scenario) submission_scenario_path = os.path.join(submission_model_path, scenario) @@ -141,7 +141,7 @@ def generate_submission(i): files.append("accuracy.txt") for f in files: - print(' * ' + f) + logging.info(' * ' + f) p_target = os.path.join(submission_results_path, f) shutil.copy(os.path.join(result_mode_path, f), p_target) diff --git a/script/get-android-sdk/customize.py b/script/get-android-sdk/customize.py index 88248df9d9..69abc4fd58 100644 --- a/script/get-android-sdk/customize.py +++ b/script/get-android-sdk/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -41,7 +41,7 @@ def preprocess(i): sdk_manager_file = 'sdkmanager'+ext - print ('') + logging.info ('') found = False @@ -69,8 +69,8 @@ def preprocess(i): env['CM_ANDROID_CMDLINE_TOOLS_URL'] = package_url - print ('') - print ('Downloading from {} ...'.format(package_url)) + logging.info ('') + logging.info ('Downloading from {} ...'.format(package_url)) cm = automation.cmind @@ -81,7 +81,7 @@ def preprocess(i): filename = r['filename'] - print ('Unzipping file {}'.format(filename)) + logging.info ('Unzipping file {}'.format(filename)) r = cm.access({'action':'unzip_file', 'automation':'utils,dc2743f8450541e3', @@ -109,7 +109,7 @@ def preprocess(i): paths.append(sdk_manager_dir) # Prepare SDK - print ('Preparing Android SDK manager ...') + logging.info ('Preparing Android SDK manager ...') r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'prepare-sdk-manager'}) if r['return']>0: return r diff --git a/script/get-aria2/customize.py b/script/get-aria2/customize.py index 3c65bbe4f6..3e6f2d8771 100644 --- a/script/get-aria2/customize.py +++ b/script/get-aria2/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): # Pre-set by CM @@ -59,7 +59,7 @@ def preprocess(i): url = 'https://github.com/aria2/aria2/releases/download/release-{}/{}'.format(version, archive_with_ext) env['CM_ARIA2_DOWNLOAD_URL'] = url - print ('URL to download ARIA2: {}'.format(url)) + logging.info ('URL to download ARIA2: {}'.format(url)) r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install'}) if r['return']>0: return r @@ -99,7 +99,7 @@ def detect_version(i): if r['return'] >0: return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} diff --git a/script/get-aws-cli/customize.py b/script/get-aws-cli/customize.py index af7fd1603b..d1f8423cb0 100644 --- a/script/get-aws-cli/customize.py +++ b/script/get-aws-cli/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -38,7 +38,7 @@ def detect_version(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} def postprocess(i): diff --git a/script/get-bazel/customize.py b/script/get-bazel/customize.py index c4622a7f4a..ee0dee77f2 100644 --- a/script/get-bazel/customize.py +++ b/script/get-bazel/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -38,7 +38,7 @@ def detect_version(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} def postprocess(i): diff --git a/script/get-cl/customize.py b/script/get-cl/customize.py index 1d205d8fdf..3de783824f 100644 --- a/script/get-cl/customize.py +++ b/script/get-cl/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -34,7 +34,7 @@ def preprocess(i): if env.get('CM_INPUT','').strip()=='' and env.get('CM_TMP_PATH','').strip()=='': - print (i['recursion_spaces'] + ' Starting deep search for {} - it may take some time ...'.format(file_name)) + logging.info (i['recursion_spaces'] + ' Starting deep search for {} - it may take some time ...'.format(file_name)) paths = ['C:\\Program Files\\Microsoft Visual Studio', 'C:\\Program Files (x86)\\Microsoft Visual Studio', @@ -112,7 +112,7 @@ def detect_version(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} diff --git a/script/get-cmake/customize.py b/script/get-cmake/customize.py index ebfd0c319a..a411e29f3e 100644 --- a/script/get-cmake/customize.py +++ b/script/get-cmake/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -37,7 +37,7 @@ def detect_version(i): if r['return'] >0: return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} diff --git a/script/get-conda/customize.py b/script/get-conda/customize.py index d8ef13e343..5febd431f8 100644 --- a/script/get-conda/customize.py +++ b/script/get-conda/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -43,7 +43,7 @@ def preprocess(i): if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': return r - print (recursion_spaces+' # {}'.format(r['error'])) + logging.info (recursion_spaces+' # {}'.format(r['error'])) # Attempt to run installer r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install'}) @@ -90,6 +90,6 @@ def postprocess(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} diff --git a/script/get-cuda-devices/customize.py b/script/get-cuda-devices/customize.py index 54fa9094f2..dd4157610c 100644 --- a/script/get-cuda-devices/customize.py +++ b/script/get-cuda-devices/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import subprocess - +import logging def postprocess(i): env = i['env'] @@ -20,7 +20,7 @@ def postprocess(i): p = {} for line in lst: - print (line) + logging.info (line) j = line.find(':') if j>=0: diff --git a/script/get-cuda/customize.py b/script/get-cuda/customize.py index 95984515bc..c16289bbf4 100644 --- a/script/get-cuda/customize.py +++ b/script/get-cuda/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import json - +import logging def preprocess(i): os_info = i['os_info'] @@ -90,14 +90,14 @@ def detect_version_nvcc(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} def detect_version_cuda_lib(i): env = i['env'] - print(env) + logging.info(env) cuda_rt_file_path = env['CM_CUDA_RT_WITH_PATH'] cuda_lib_path=os.path.dirname(cuda_rt_file_path) cuda_path = os.path.abspath(os.path.join(cuda_lib_path, os.pardir)) @@ -116,7 +116,7 @@ def detect_version_cuda_lib(i): env['CM_CUDA_VERSION'] = cuda_version version = cuda_version - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} @@ -151,7 +151,7 @@ def postprocess(i): parent_path = os.path.dirname(parent_path) while os.path.isdir(parent_path): if os.path.exists(os.path.join(parent_path, "include")): - print("Path is "+parent_path) + logging.info("Path is "+parent_path) found_path = parent_path cuda_path = found_path env['CM_CUDA_INSTALLED_PATH'] = cuda_path diff --git a/script/get-cudnn/customize.py b/script/get-cudnn/customize.py index db43d93d31..bef4c16bd6 100644 --- a/script/get-cudnn/customize.py +++ b/script/get-cudnn/customize.py @@ -2,7 +2,7 @@ import os import tarfile import shutil - +import logging def preprocess(i): recursion_spaces = i['recursion_spaces'] @@ -100,7 +100,7 @@ def preprocess(i): if env.get('CM_CUDNN_TAR_FILE_PATH','')=='': return {'return': 1, 'error': 'Please envoke cm run script "get cudnn" --tar_file={full path to the cuDNN tar file}'} - print ('Untaring file - can take some time ...') + logging.info ('Untaring file - can take some time ...') my_tar = tarfile.open(os.path.expanduser(env['CM_CUDNN_TAR_FILE_PATH'])) folder_name = my_tar.getnames()[0] @@ -123,9 +123,9 @@ def preprocess(i): env['CM_CUDA_PATH_INCLUDE_CUDNN'] = env['CM_CUDA_PATH_INCLUDE'] try: - print("Copying cudnn include files to {}(CUDA_INCLUDE_PATH)".format(cuda_inc_path)) + logging.info("Copying cudnn include files to {}(CUDA_INCLUDE_PATH)".format(cuda_inc_path)) shutil.copytree(inc_path, cuda_inc_path, dirs_exist_ok = True) - print("Copying cudnn lib files to {}CUDA_LIB_PATH".format(cuda_lib_path)) + logging.info("Copying cudnn lib files to {}CUDA_LIB_PATH".format(cuda_lib_path)) shutil.copytree(lib_path, cuda_lib_path, dirs_exist_ok = True) except: #Need to copy to system path via run.sh diff --git a/script/get-dataset-cnndm/customize.py b/script/get-dataset-cnndm/customize.py index 27363d8000..95e02e3d97 100644 --- a/script/get-dataset-cnndm/customize.py +++ b/script/get-dataset-cnndm/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import shutil - +import logging def preprocess(i): env = i['env'] @@ -9,7 +9,7 @@ def preprocess(i): if env.get('CM_CNNDM_INTEL_VARIATION', '') == 'yes': i['run_script_input']['script_name'] = "run-intel" else: - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + logging.info("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") return {'return': 0} diff --git a/script/get-dataset-coco/customize.py b/script/get-dataset-coco/customize.py index 78ced4d7bd..63d900f2e6 100644 --- a/script/get-dataset-coco/customize.py +++ b/script/get-dataset-coco/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import shutil - +import logging def preprocess(i): # CM script internal variables @@ -34,8 +34,8 @@ def preprocess(i): if not detected: return {'return':1, 'error':'COCO dataset is not detected in "{}"'.format(path)} - print ('') - print ('Detected COCO dataset {} {}'.format(tp,ver)) + logging.info ('') + logging.info ('Detected COCO dataset {} {}'.format(tp,ver)) env['CM_DATASET_COCO_DETECTED'] = 'yes' env['CM_DATASET_COCO_PATH'] = path @@ -134,9 +134,9 @@ def preprocess(i): env['CM_DATASET_COCO_MD5SUM_ANN'] = md5sum_ann if not detected: - print ('') - print ('URL for data: {}'.format(url_data_full)) - print ('URL for annotations: {}'.format(url_ann_full)) + logging.info ('') + logging.info ('URL for data: {}'.format(url_data_full)) + logging.info ('URL for annotations: {}'.format(url_ann_full)) # Add version and type to tags extra_cache_tags = [] @@ -170,9 +170,9 @@ def postprocess(i): path_data = env['CM_DATASET_COCO_DATA_PATH'] path_ann = env['CM_DATASET_COCO_ANNOTATIONS_PATH'] - print ('') - print (path_all) - print ('') + logging.info ('') + logging.info (path_all) + logging.info ('') path_data_full = os.path.join(path_data, tp_ver) path_ann_full = os.path.join(path_ann, 'annotations') @@ -192,7 +192,7 @@ def postprocess(i): command2 = ' ln -s ' + path_ann_full + ' annotations' for command in [command1, command2]: - print (command) + logging.info (command) os.system(command) diff --git a/script/get-dataset-coco2014/customize.py b/script/get-dataset-coco2014/customize.py index c48f71616d..068ffd3da1 100644 --- a/script/get-dataset-coco2014/customize.py +++ b/script/get-dataset-coco2014/customize.py @@ -1,12 +1,12 @@ from cmind import utils import os import shutil - +import logging def preprocess(i): env = i['env'] - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + logging.info("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") run_dir = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools") diff --git a/script/get-dataset-openimages-calibration/filter.py b/script/get-dataset-openimages-calibration/filter.py index 81b768249c..0ce665a627 100644 --- a/script/get-dataset-openimages-calibration/filter.py +++ b/script/get-dataset-openimages-calibration/filter.py @@ -1,7 +1,7 @@ import json import sys import os - +import logging with open(sys.argv[1], "r") as f: data = json.load(f) @@ -17,4 +17,4 @@ sorted_image_data = sorted(data['images'], key=lambda x: x['num_boxes'], reverse= os.environ.get('CM_CALIBRATION_FILTER_ORDER_BY_NUM_BOXES_ASC', '') == "yes") for image in data['images']: - print(image['file_name']) + logging.info(image['file_name']) diff --git a/script/get-dataset-openimages/customize.py b/script/get-dataset-openimages/customize.py index 3040fff2ba..f398b032a3 100644 --- a/script/get-dataset-openimages/customize.py +++ b/script/get-dataset-openimages/customize.py @@ -1,15 +1,15 @@ from cmind import utils import os import shutil - +import logging def preprocess(i): os_info = i['os_info'] env = i['env'] - print ("") - print ("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") - print ("") + logging.info ("") + logging.info ("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + logging.info ("") if os_info['platform'] == 'windows': MLPERF_CLASSES=['Airplane','Antelope','Apple','Backpack','Balloon','Banana', diff --git a/script/get-docker/customize.py b/script/get-docker/customize.py index c4a99f1a23..ddea2049e9 100644 --- a/script/get-docker/customize.py +++ b/script/get-docker/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -42,7 +42,7 @@ def detect_version(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} def postprocess(i): diff --git a/script/get-gcc/customize.py b/script/get-gcc/customize.py index b29f38e13b..00b7a41af6 100644 --- a/script/get-gcc/customize.py +++ b/script/get-gcc/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -51,7 +51,7 @@ def detect_version(i): return r version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} diff --git a/script/get-generic-python-lib/customize.py b/script/get-generic-python-lib/customize.py index 6dcf770f3d..62f304a6b2 100644 --- a/script/get-generic-python-lib/customize.py +++ b/script/get-generic-python-lib/customize.py @@ -1,6 +1,7 @@ from cmind import utils import os import cmind as cm +import logging def preprocess(i): @@ -86,9 +87,9 @@ def preprocess(i): if env.get('CM_GENERIC_PYTHON_PIP_UPDATE','') in [True,'true','yes','on']: extra +=' -U' - print ('') - print (recursion_spaces + ' Extra PIP CMD: ' + extra) - print ('') + logging.info ('') + logging.info (recursion_spaces + ' Extra PIP CMD: ' + extra) + logging.info ('') env['CM_GENERIC_PYTHON_PIP_EXTRA'] = extra @@ -118,7 +119,7 @@ def detect_version(i): current_detected_version = version if env.get('CM_TMP_SILENT','')!='yes': - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} diff --git a/script/get-generic-sys-util/customize.py b/script/get-generic-sys-util/customize.py index 6ec4bfe05a..cfd0481c86 100644 --- a/script/get-generic-sys-util/customize.py +++ b/script/get-generic-sys-util/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -10,9 +10,9 @@ def preprocess(i): pm = env.get('CM_HOST_OS_PACKAGE_MANAGER') if os_info['platform'] == 'windows': - print ('') - print ('WARNING: for now skipping get-generic-sys-util on Windows ...') - print ('') + logging.info ('') + logging.info ('WARNING: for now skipping get-generic-sys-util on Windows ...') + logging.info ('') return {'return':0} diff --git a/script/get-github-cli/customize.py b/script/get-github-cli/customize.py index 8c64641189..6402cbed1a 100644 --- a/script/get-github-cli/customize.py +++ b/script/get-github-cli/customize.py @@ -1,5 +1,6 @@ from cmind import utils import os +import logging def preprocess(i): @@ -25,7 +26,7 @@ def preprocess(i): if env.get('CM_TMP_FAIL_IF_NOT_FOUND','').lower() == 'yes': return r - print (recursion_spaces+' # {}'.format(r['error'])) + logging.info (recursion_spaces+' # {}'.format(r['error'])) # Attempt to run installer r = {'return':0, 'skip':True, 'script':{'tags':'install,github-cli'}} @@ -48,7 +49,7 @@ def postprocess(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} diff --git a/script/get-go/customize.py b/script/get-go/customize.py index d65126585b..89ac219294 100644 --- a/script/get-go/customize.py +++ b/script/get-go/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -38,7 +38,7 @@ def detect_version(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} def postprocess(i): diff --git a/script/get-ipol-src/customize.py b/script/get-ipol-src/customize.py index f2b5dd1fca..9a17d968d2 100644 --- a/script/get-ipol-src/customize.py +++ b/script/get-ipol-src/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -22,7 +22,7 @@ def preprocess(i): url = url.replace('{{CM_IPOL_YEAR}}', year).replace('{{CM_IPOL_NUMBER}}', number) - print ('Downloading from {}'.format(url)) + logging.info ('Downloading from {}'.format(url)) r = cm.access({'action':'download_file', 'automation':'utils,dc2743f8450541e3', @@ -31,7 +31,7 @@ def preprocess(i): filename = r['filename'] - print ('Unzipping file {}'.format(filename)) + logging.info ('Unzipping file {}'.format(filename)) r = cm.access({'action':'unzip_file', 'automation':'utils,dc2743f8450541e3', @@ -39,7 +39,7 @@ def preprocess(i): if r['return']>0: return r if os.path.isfile(filename): - print ('Removing file {}'.format(filename)) + logging.info ('Removing file {}'.format(filename)) os.remove(filename) # Get sub-directory from filename @@ -52,7 +52,7 @@ def preprocess(i): # Applying patch cmd = 'patch -p0 < {}'.format(os.path.join(script_path, 'patch', '20240127.patch')) - print ('Patching code: {}'.format(cmd)) + logging.info ('Patching code: {}'.format(cmd)) os.system(cmd) return {'return':0} diff --git a/script/get-java/customize.py b/script/get-java/customize.py index 8cfc211bcf..e1ce8ef339 100644 --- a/script/get-java/customize.py +++ b/script/get-java/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -66,8 +66,8 @@ def preprocess(i): env['CM_JAVA_PREBUILT_URL'] = url env['CM_JAVA_PREBUILT_FILENAME'] = filename - print ('') - print (recursion_spaces + ' Downloading and installing prebuilt Java from {} ...'.format(url+filename)) + logging.info ('') + logging.info (recursion_spaces + ' Downloading and installing prebuilt Java from {} ...'.format(url+filename)) rr = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install-prebuilt'}) if rr['return']>0: return rr @@ -78,8 +78,8 @@ def preprocess(i): if not os.path.isfile(target_file): return {'return':1, 'error':'can\'t find target file {}'.format(target_file)} - print ('') - print (recursion_spaces + ' Registering file {} ...'.format(target_file)) + logging.info ('') + logging.info (recursion_spaces + ' Registering file {} ...'.format(target_file)) env[env_path_key] = target_file @@ -111,7 +111,7 @@ def detect_version(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} diff --git a/script/get-javac/customize.py b/script/get-javac/customize.py index f7e076bd93..bf6dc9cd42 100644 --- a/script/get-javac/customize.py +++ b/script/get-javac/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -66,8 +66,8 @@ def preprocess(i): env['CM_JAVAC_PREBUILT_URL'] = url env['CM_JAVAC_PREBUILT_FILENAME'] = filename - print ('') - print (recursion_spaces + ' Downloading and installing prebuilt Java from {} ...'.format(url+filename)) + logging.info ('') + logging.info (recursion_spaces + ' Downloading and installing prebuilt Java from {} ...'.format(url+filename)) rr = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install-prebuilt'}) @@ -79,8 +79,8 @@ def preprocess(i): if not os.path.isfile(target_file): return {'return':1, 'error':'can\'t find target file {}'.format(target_file)} - print ('') - print (recursion_spaces + ' Registering file {} ...'.format(target_file)) + logging.info ('') + logging.info (recursion_spaces + ' Registering file {} ...'.format(target_file)) env[env_path_key] = target_file @@ -112,7 +112,7 @@ def detect_version(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} diff --git a/script/get-llvm/customize.py b/script/get-llvm/customize.py index c9d872a23f..6e019df6fd 100644 --- a/script/get-llvm/customize.py +++ b/script/get-llvm/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -41,7 +41,7 @@ def detect_version(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} diff --git a/script/get-ml-model-3d-unet-kits19/customize.py b/script/get-ml-model-3d-unet-kits19/customize.py index 65961f1565..1c8539c4c8 100644 --- a/script/get-ml-model-3d-unet-kits19/customize.py +++ b/script/get-ml-model-3d-unet-kits19/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -15,7 +15,7 @@ def preprocess(i): url = env['CM_PACKAGE_URL'] - print ('Downloading from {}'.format(url)) + logging.info ('Downloading from {}'.format(url)) r = cm.access({'action':'download_file', 'automation':'utils,dc2743f8450541e3', diff --git a/script/get-ml-model-efficientnet-lite/customize.py b/script/get-ml-model-efficientnet-lite/customize.py index 5571383453..d80b28e67c 100644 --- a/script/get-ml-model-efficientnet-lite/customize.py +++ b/script/get-ml-model-efficientnet-lite/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -16,7 +16,7 @@ def preprocess(i): url = env['CM_PACKAGE_URL'] env['CM_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url - print ('Downloading from {}'.format(url)) + logging.info ('Downloading from {}'.format(url)) r = cm.access({'action':'download_file', 'automation':'utils,dc2743f8450541e3', diff --git a/script/get-ml-model-gptj/convert_gptj_ckpt.py b/script/get-ml-model-gptj/convert_gptj_ckpt.py index 34f404932a..3dd03e0006 100644 --- a/script/get-ml-model-gptj/convert_gptj_ckpt.py +++ b/script/get-ml-model-gptj/convert_gptj_ckpt.py @@ -23,6 +23,7 @@ from paxml import train_states from praxis import py_utils from transformers import AutoModelForCausalLM +import logging # 6B example num_layers = 28 @@ -34,13 +35,13 @@ def convert(base_model_path, pax_model_path): """Convert from gpt-j-6b to pax.""" - print(f'Loading the base model from {base_model_path}') + logging.info(f'Loading the base model from {base_model_path}') base = AutoModelForCausalLM.from_pretrained( base_model_path, low_cpu_mem_usage=True ) for key, value in base.state_dict().items(): - print('%s %s' % (key, value.data.numpy().shape)) + logging.info('%s %s' % (key, value.data.numpy().shape)) jax_weights = { 'lm': { @@ -147,7 +148,7 @@ def convert(base_model_path, pax_model_path): } jax_weights['lm']['transformer']['x_layers_%d' % layer_idx] = layer_weight - print(f'Saving the pax model to {pax_model_path}') + logging.info(f'Saving the pax model to {pax_model_path}') jax_states = train_states.TrainState( step=0, mdl_vars={'params': jax_weights}, opt_states={} ) @@ -167,7 +168,7 @@ def identity(x): pax_model_path, checkpoint_type=checkpoints.CheckpointType.GDA, ) - print('done') + logging.info('done') if __name__ == '__main__': diff --git a/script/get-ml-model-gptj/customize.py b/script/get-ml-model-gptj/customize.py index 4c52200930..d094b124ec 100644 --- a/script/get-ml-model-gptj/customize.py +++ b/script/get-ml-model-gptj/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -9,7 +9,7 @@ def preprocess(i): if env.get('CM_GPTJ_INTEL_MODEL', '') == 'yes': i['run_script_input']['script_name'] = 'run-intel' harness_root = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'code', 'gptj-99', 'pytorch-cpu') - print(f"Harness Root: {harness_root}") + logging.info(f"Harness Root: {harness_root}") env['CM_HARNESS_CODE_ROOT'] = harness_root env['CM_CALIBRATION_CODE_ROOT'] = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'calibration') diff --git a/script/get-ml-model-huggingface-zoo/download_model.py b/script/get-ml-model-huggingface-zoo/download_model.py index 4e6e9c86e8..28d9725b5e 100644 --- a/script/get-ml-model-huggingface-zoo/download_model.py +++ b/script/get-ml-model-huggingface-zoo/download_model.py @@ -1,5 +1,6 @@ from huggingface_hub import hf_hub_download import os +import logging model_stub = os.environ.get('CM_MODEL_ZOO_STUB', '') model_task = os.environ.get('CM_MODEL_TASK', '') @@ -7,7 +8,7 @@ revision = os.environ.get('CM_HF_REVISION','') if model_task == "prune": - print("Downloading model: " + model_stub) + logging.info("Downloading model: " + model_stub) for filename in ["pytorch_model.bin", "config.json"]: @@ -40,8 +41,8 @@ # List all files in a directory path = model_stub+'/'+full_subfolder - print ('') - print ('Listing files in {} ...'.format(path)) + logging.info ('') + logging.info ('Listing files in {} ...'.format(path)) def list_hf_files(path): all_files = [] @@ -63,8 +64,8 @@ def list_hf_files(path): files=list_hf_files(path) - print ('') - print ('Found {} files'.format(len(files))) + logging.info ('') + logging.info ('Found {} files'.format(len(files))) for f in files: @@ -79,10 +80,10 @@ def list_hf_files(path): model_filenames.append(ff) - print ('') + logging.info ('') for model_filename in model_filenames: - print("Downloading file {} / {} ...".format(model_stub, model_filename)) + logging.info("Downloading file {} / {} ...".format(model_stub, model_filename)) extra_dir = os.path.dirname(model_filename) @@ -101,7 +102,7 @@ def list_hf_files(path): cache_dir=os.getcwd()) - print ('') + logging.info ('') with open('tmp-run-env.out', 'w') as f: f.write(f"CM_ML_MODEL_FILE_WITH_PATH={os.path.join(os.getcwd(),base_model_filename)}") diff --git a/script/get-ml-model-mobilenet/customize.py b/script/get-ml-model-mobilenet/customize.py index 5571383453..d80b28e67c 100644 --- a/script/get-ml-model-mobilenet/customize.py +++ b/script/get-ml-model-mobilenet/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -16,7 +16,7 @@ def preprocess(i): url = env['CM_PACKAGE_URL'] env['CM_ML_MODEL_STARTING_WEIGHTS_FILENAME'] = url - print ('Downloading from {}'.format(url)) + logging.info ('Downloading from {}'.format(url)) r = cm.access({'action':'download_file', 'automation':'utils,dc2743f8450541e3', diff --git a/script/get-ml-model-neuralmagic-zoo/download_sparse.py b/script/get-ml-model-neuralmagic-zoo/download_sparse.py index 1da36774bd..d2278b535b 100644 --- a/script/get-ml-model-neuralmagic-zoo/download_sparse.py +++ b/script/get-ml-model-neuralmagic-zoo/download_sparse.py @@ -1,8 +1,8 @@ from sparsezoo import Model import os - +import logging model_stub= os.environ.get('CM_MODEL_ZOO_STUB', '') -print(f"Downloading model {model_stub}") +logging.info(f"Downloading model {model_stub}") stub = f"{model_stub}" model = Model(stub) diff --git a/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py b/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py index e076e4072e..c504a80acc 100644 --- a/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py +++ b/script/get-ml-model-retinanet-nvidia/nvidia_patch_retinanet_efficientnms.py @@ -17,7 +17,7 @@ import argparse import json import re - +import logging import onnx_graphsurgeon as gs import numpy as np import os @@ -56,9 +56,9 @@ attrs.update(node_attrs) anchors = np.load(anchor_xywh_1x1_npy) -print(f"anchors shape: {anchors.shape}, top 4: {anchors[0, :]}") +logging.info(f"anchors shape: {anchors.shape}, top 4: {anchors[0, :]}") anchors = np.expand_dims(anchors, axis=0) -print(f"anchors shape: {anchors.shape}") +logging.info(f"anchors shape: {anchors.shape}") anchor_tensor = gs.Constant(name="anchor", values=anchors) diff --git a/script/get-ml-model-retinanet/node-precision-info.py b/script/get-ml-model-retinanet/node-precision-info.py index 100a64ecbb..888b05a7ee 100644 --- a/script/get-ml-model-retinanet/node-precision-info.py +++ b/script/get-ml-model-retinanet/node-precision-info.py @@ -3,6 +3,7 @@ import sys import argparse import yaml +import logging def parse_args(add_help=True): parser = argparse.ArgumentParser(description='Print node precision info for the onnx file', add_help=add_help) @@ -45,7 +46,7 @@ def main(args): elif set(list2) < set(node_names): valid_list = list2 else: - print("Node names are not matching with the expected ones in the input onnx file.") + logging.info("Node names are not matching with the expected ones in the input onnx file.") sys.exit(1) node_precision_info = {} @@ -61,7 +62,7 @@ def main(args): with open(args.output, "w") as f: f.write(yaml_output) - print(f"Node precision info successfully printed out to {args.output}") + logging.info(f"Node precision info successfully printed out to {args.output}") if __name__ == "__main__": diff --git a/script/get-ml-model-rnnt/customize.py b/script/get-ml-model-rnnt/customize.py index 65961f1565..1c8539c4c8 100644 --- a/script/get-ml-model-rnnt/customize.py +++ b/script/get-ml-model-rnnt/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -15,7 +15,7 @@ def preprocess(i): url = env['CM_PACKAGE_URL'] - print ('Downloading from {}'.format(url)) + logging.info ('Downloading from {}'.format(url)) r = cm.access({'action':'download_file', 'automation':'utils,dc2743f8450541e3', diff --git a/script/get-mlperf-inference-sut-configs/customize.py b/script/get-mlperf-inference-sut-configs/customize.py index 8bca2a4012..f9c85fcc0d 100644 --- a/script/get-mlperf-inference-sut-configs/customize.py +++ b/script/get-mlperf-inference-sut-configs/customize.py @@ -2,7 +2,7 @@ import os import yaml import shutil - +import logging def postprocess(i): env = i['env'] state = i['state'] @@ -50,7 +50,7 @@ def postprocess(i): if os.path.exists(config_path_default): shutil.copy(config_path_default, config_path) else: - print(f"Config file missing for given hw_name: '{env['CM_HW_NAME']}', implementation: '{implementation_string}', device: '{device}, backend: '{backend}', copying from default") + logging.info(f"Config file missing for given hw_name: '{env['CM_HW_NAME']}', implementation: '{implementation_string}', device: '{device}, backend: '{backend}', copying from default") src_config = os.path.join(env['CM_TMP_CURRENT_SCRIPT_PATH'], "configs", "default", "config.yaml") shutil.copy(src_config, config_path) os.makedirs(os.path.dirname(config_path_default), exist_ok=True) diff --git a/script/get-mlperf-inference-sut-description/customize.py b/script/get-mlperf-inference-sut-description/customize.py index 71636941f7..392041a4c9 100644 --- a/script/get-mlperf-inference-sut-description/customize.py +++ b/script/get-mlperf-inference-sut-description/customize.py @@ -2,6 +2,7 @@ import os import json import shutil +import logging def preprocess(i): env = i['env'] @@ -37,19 +38,19 @@ def preprocess(i): sut_path = os.path.join(sut_desc_path, "suts", sut + ".json") if os.path.exists(sut_path) and env.get('CM_SUT_DESC_CACHE', '') == "yes": - print(f"Reusing SUT description file {sut}") + logging.info(f"Reusing SUT description file {sut}") state['CM_SUT_META'] = json.load(open(sut_path)) else: if not os.path.exists(os.path.dirname(sut_path)): os.makedirs(os.path.dirname(sut_path)) - print("Generating SUT description file for " + sut) + logging.info("Generating SUT description file for " + sut) hw_path = os.path.join(os.getcwd(), "hardware", hw_name + ".json") if not os.path.exists(os.path.dirname(hw_path)): os.makedirs(os.path.dirname(hw_path)) if not os.path.exists(hw_path): default_hw_path = os.path.join(script_path, "hardware", "default.json") - print("HW description file for " + hw_name + " not found. Copying from default!!!") + logging.info("HW description file for " + hw_name + " not found. Copying from default!!!") shutil.copy(default_hw_path, hw_path) state['CM_HW_META'] = json.load(open(hw_path)) diff --git a/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py b/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py index 93a162b980..884c378062 100644 --- a/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py +++ b/script/get-mlperf-tiny-eembc-energy-runner-src/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import shutil - +import logging def preprocess(i): os_info = i['os_info'] @@ -32,8 +32,8 @@ def postprocess(i): sessions_path = os.path.join(home_directory, 'eembc', 'runner', 'sessions') - print ('') - print ('Path to EEMBC runner sessions: {}'.format(sessions_path)) + logging.info ('') + logging.info ('Path to EEMBC runner sessions: {}'.format(sessions_path)) env['CM_EEMBC_ENERGY_RUNNER_SESSIONS'] = sessions_path @@ -42,16 +42,16 @@ def postprocess(i): datasets_path = os.path.join(home_directory, 'eembc', 'runner', 'benchmarks', 'ulp-mlperf', 'datasets') - print ('') - print ('Path to EEMBC runner datasets: {}'.format(datasets_path)) + logging.info ('') + logging.info ('Path to EEMBC runner datasets: {}'.format(datasets_path)) if not os.path.isdir(datasets_path): os.makedirs(datasets_path) env['CM_EEMBC_ENERGY_RUNNER_DATASETS'] = datasets_path - print ('') - print ('Copying datasets to EEMBC user space ...') + logging.info ('') + logging.info ('Copying datasets to EEMBC user space ...') shutil.copytree(datasets_src_path, datasets_path, dirs_exist_ok=True) diff --git a/script/get-onnxruntime-prebuilt/customize.py b/script/get-onnxruntime-prebuilt/customize.py index 786bc8122c..14996f0869 100644 --- a/script/get-onnxruntime-prebuilt/customize.py +++ b/script/get-onnxruntime-prebuilt/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] env = i['env'] @@ -29,9 +29,9 @@ def preprocess(i): URL = 'https://github.com/microsoft/onnxruntime/releases/download/v{}/{}'.format(version, FILENAME) - print ('') - print ('Downloading from {}'.format(URL)) - print ('') + logging.infot ('') + logging.info ('Downloading from {}'.format(URL)) + logging.info ('') env['FOLDER'] = FOLDER env['FILENAME'] = FILENAME diff --git a/script/get-openssl/customize.py b/script/get-openssl/customize.py index 9d126fd79e..f7932c84ec 100644 --- a/script/get-openssl/customize.py +++ b/script/get-openssl/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -36,7 +36,7 @@ def detect_version(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} diff --git a/script/get-preprocessed-dataset-criteo/customize.py b/script/get-preprocessed-dataset-criteo/customize.py index d6826e38cb..58371f97b6 100644 --- a/script/get-preprocessed-dataset-criteo/customize.py +++ b/script/get-preprocessed-dataset-criteo/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import shutil - +import logging def preprocess(i): env = i['env'] @@ -12,7 +12,7 @@ def preprocess(i): Path with preprocessed dataset given as input ''' skip_preprocessing = True - print("Using preprocessed criteo dataset from '" + env['CM_DATASET_PREPROCESSED_PATH'] +"'") + logging.info("Using preprocessed criteo dataset from '" + env['CM_DATASET_PREPROCESSED_PATH'] +"'") if not skip_preprocessing and env.get('CM_DATASET_PREPROCESSED_OUTPUT_PATH','') != '': env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() @@ -26,6 +26,6 @@ def preprocess(i): run_dir = os.path.join(env['CM_MLPERF_TRAINING_SOURCE'], "recommendation_v2", "torchrec_dlrm", "scripts") env['CM_RUN_CMD'] = f'cd {run_dir} && bash ./process_Criteo_1TB_Click_Logs_dataset.sh {dataset_path} {tmp_dir} {output_dir} ' - print("Using MLCommons Training source from '" + env['CM_MLPERF_TRAINING_SOURCE'] +"'") + logging.info("Using MLCommons Training source from '" + env['CM_MLPERF_TRAINING_SOURCE'] +"'") return {'return': 0} diff --git a/script/get-preprocessed-dataset-generic/src/generic_preprocess.py b/script/get-preprocessed-dataset-generic/src/generic_preprocess.py index 752895db88..4359f681ff 100644 --- a/script/get-preprocessed-dataset-generic/src/generic_preprocess.py +++ b/script/get-preprocessed-dataset-generic/src/generic_preprocess.py @@ -5,7 +5,7 @@ import os import cv2 import numpy as np - +import logging # Load and preprocess image def load_image(image_path, # Full path to processing image target_size, # Desired size of resulting image @@ -114,7 +114,7 @@ def preprocess_files(selected_filenames, source_dir, destination_dir, crop_perce full_output_path = os.path.join(destination_dir, output_filename) image_data.tofile(full_output_path) - print("[{}]: Stored {}".format(current_idx+1, full_output_path) ) + logging.info("[{}]: Stored {}".format(current_idx+1, full_output_path) ) output_filenames.append(output_filename) @@ -171,7 +171,7 @@ def preprocess(): interpolation_method = os.getenv('CM_DATASET_INTERPOLATION_METHOD', '') - print(("From: {}, To: {}, Size: {}, Crop: {}, InterSize: {}, 2BGR: {}, OFF: {}, VOL: '{}', FOF: {},"+ + logging.info(("From: {}, To: {}, Size: {}, Crop: {}, InterSize: {}, 2BGR: {}, OFF: {}, VOL: '{}', FOF: {},"+ " DTYPE: {}, DLAYOUT: {}, EXT: {}, NORM: {}, SMEAN: {}, GCM: {}, GSTD: {}, QUANTIZE: {}, QUANT_SCALE: {}, QUANT_OFFSET: {}, CONV_UNSIGNED: {}, INTER: {}").format( source_dir, destination_dir, square_side, crop_percentage, inter_size, convert_to_bgr, offset, volume, fof_name, data_type, data_layout, new_file_extension, normalize_data, subtract_mean, given_channel_means, given_channel_stds, quantize, quant_scale, quant_offset, convert_to_unsigned, interpolation_method) ) diff --git a/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py b/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py index 84e18ee397..3abd01cf99 100644 --- a/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py +++ b/script/get-preprocessed-dataset-generic/src/preprocess_object_detection_dataset.py @@ -6,6 +6,7 @@ from PIL import Image import torch import torchvision +import logging SUPPORTED_EXTENSIONS = ['jpeg', 'jpg', 'gif', 'png'] @@ -72,7 +73,7 @@ def preprocess_files(selected_filenames, source_dir, destination_dir, square_sid full_output_path = os.path.join(destination_dir, output_filename) image_data.tofile(full_output_path) - print(f"[{current_idx+1}]: Stored {full_output_path}") + logging.info(f"[{current_idx+1}]: Stored {full_output_path}") output_signatures.append(f'{output_filename};{original_width};{original_height}') return output_signatures @@ -123,7 +124,7 @@ def preprocess(): if convert_to_bgr: given_channel_stds = given_channel_stds[::-1] - print(f"From: {source_directory}, To: {destination_directory}, Size: {square_side}, Crop: {crop_percentage}, InterSize: {inter_size}, 2BGR: {convert_to_bgr}, " + + logging.info(f"From: {source_directory}, To: {destination_directory}, Size: {square_side}, Crop: {crop_percentage}, InterSize: {inter_size}, 2BGR: {convert_to_bgr}, " + f"OFF: {offset}, VOL: '{volume}', FOF: {fof_name}, DTYPE: {data_type}, DLAYOUT: {data_layout}, EXT: {new_file_extension}, " + f"NORM: {normalize_data}, SMEAN: {subtract_mean}, GCM: {given_channel_means}, GSTD: {given_channel_stds}, QUANTIZE: {quantize}, QUANT_SCALE: {quant_scale}, " + f"QUANT_OFFSET: {quant_offset}, CONV_UNSIGNED: {convert_to_unsigned}, INTER: {interpolation_method}") diff --git a/script/get-preprocessed-dataset-imagenet/customize.py b/script/get-preprocessed-dataset-imagenet/customize.py index f744e1330f..df0c9e5794 100644 --- a/script/get-preprocessed-dataset-imagenet/customize.py +++ b/script/get-preprocessed-dataset-imagenet/customize.py @@ -3,7 +3,7 @@ from os.path import exists import shutil import glob - +import logging def preprocess(i): env = i['env'] @@ -15,7 +15,7 @@ def preprocess(i): return {'return': 1, 'error': 'No preprocessed images found in '+env['CM_IMAGENET_PREPROCESSED_PATH']} else: if env.get('CM_DATASET_REFERENCE_PREPROCESSOR',"0") == "1": - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + logging.info("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() if env['CM_DATASET_TYPE'] == "validation" and not exists(os.path.join(env['CM_DATASET_PATH'], "val_map.txt")): diff --git a/script/get-preprocessed-dataset-kits19/customize.py b/script/get-preprocessed-dataset-kits19/customize.py index 8de0593753..5df7de6132 100644 --- a/script/get-preprocessed-dataset-kits19/customize.py +++ b/script/get-preprocessed-dataset-kits19/customize.py @@ -1,12 +1,12 @@ from cmind import utils import os import shutil - +import logging def preprocess(i): env = i['env'] - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + logging.info("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") preprocess_src = os.path.join(env['CM_MLPERF_INFERENCE_3DUNET_PATH'], 'preprocess.py') cmd = 'cd '+ env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + ' && ${CM_PYTHON_BIN_WITH_PATH} preprocess.py --raw_data_dir ' + env['CM_DATASET_PATH'] + ' --results_dir ' + os.getcwd() + ' --mode preprocess' env['CM_TMP_CMD'] = cmd diff --git a/script/get-preprocessed-dataset-librispeech/customize.py b/script/get-preprocessed-dataset-librispeech/customize.py index e5a8a12e2b..18978da48c 100644 --- a/script/get-preprocessed-dataset-librispeech/customize.py +++ b/script/get-preprocessed-dataset-librispeech/customize.py @@ -1,12 +1,12 @@ from cmind import utils import os import shutil - +import logging def preprocess(i): env = i['env'] - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + logging.info("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") preprocess_src = os.path.join(env['CM_MLPERF_INFERENCE_RNNT_PATH'], 'pytorch', 'utils', 'convert_librispeech.py') cmd = 'cd '+ env['CM_MLPERF_INFERENCE_3DUNET_PATH'] + ' && ${CM_PYTHON_BIN_WITH_PATH} ' + preprocess_src + ' --input_dir ' + env['CM_DATASET_LIBRISPEECH_PATH'] + ' --dest_dir ' + os.path.join(os.getcwd(), 'dev-clean-wav') + ' --output_json ' + os.path.join(os.getcwd(), 'dev-clean-wav.json') env['CM_TMP_CMD'] = cmd diff --git a/script/get-preprocessed-dataset-openimages/customize.py b/script/get-preprocessed-dataset-openimages/customize.py index fd2adcb5f6..9cfd4cf8b6 100644 --- a/script/get-preprocessed-dataset-openimages/customize.py +++ b/script/get-preprocessed-dataset-openimages/customize.py @@ -2,7 +2,7 @@ import os import shutil import glob - +import logging def preprocess(i): env = i['env'] @@ -11,7 +11,7 @@ def preprocess(i): env['CM_DATASET_PREPROCESSED_PATH'] = os.getcwd() if env.get('CM_DATASET_REFERENCE_PREPROCESSOR',"0") == "1": - print("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") + logging.info("Using MLCommons Inference source from '" + env['CM_MLPERF_INFERENCE_SOURCE'] +"'") if env.get('CM_ML_MODEL_NAME', '') == 'retinanet': if env.get('CM_DATASET_QUANTIZE', '') == '1': diff --git a/script/get-python3/customize.py b/script/get-python3/customize.py index 5d07f6ac86..c5c58e2f6c 100644 --- a/script/get-python3/customize.py +++ b/script/get-python3/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -56,7 +56,7 @@ def detect_version(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} diff --git a/script/get-qaic-apps-sdk/customize.py b/script/get-qaic-apps-sdk/customize.py index b84d58b178..d83ddb57b9 100644 --- a/script/get-qaic-apps-sdk/customize.py +++ b/script/get-qaic-apps-sdk/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import xml.etree.ElementTree as et - +import logging def preprocess(i): os_info = i['os_info'] @@ -59,7 +59,7 @@ def detect_version(i): if not version: return {'return':1, 'error': f'qaic apps sdk version info not found'} - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} def postprocess(i): diff --git a/script/get-qaic-platform-sdk/customize.py b/script/get-qaic-platform-sdk/customize.py index 5a68188bd5..17f2967ccf 100644 --- a/script/get-qaic-platform-sdk/customize.py +++ b/script/get-qaic-platform-sdk/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import xml.etree.ElementTree as et - +import logging def preprocess(i): os_info = i['os_info'] @@ -60,7 +60,7 @@ def detect_version(i): if not version: return {'return':1, 'error': f'qaic platform sdk version info not found'} - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} def postprocess(i): diff --git a/script/get-rclone/customize.py b/script/get-rclone/customize.py index 84804d6e59..0d2491d8b4 100644 --- a/script/get-rclone/customize.py +++ b/script/get-rclone/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import configparser - +import logging def preprocess(i): os_info = i['os_info'] @@ -48,7 +48,7 @@ def preprocess(i): env['CM_RCLONE_ARCHIVE'] = filename env['CM_RCLONE_ARCHIVE_WITH_EXT'] = filename+'.zip' - print(recursion_spaces + 'Downloading {}'.format(env['CM_RCLONE_URL'])) + logging.info(recursion_spaces + 'Downloading {}'.format(env['CM_RCLONE_URL'])) cur_dir = os.getcwd() path_bin = os.path.join(cur_dir, file_name) @@ -73,7 +73,7 @@ def detect_version(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} @@ -101,7 +101,7 @@ def postprocess(i): with open(default_config_path, 'w') as configfile: default_config.write(configfile) - print({section: dict(default_config[section]) for section in default_config.sections()}) + logging.info({section: dict(default_config[section]) for section in default_config.sections()}) r = detect_version(i) diff --git a/script/get-rocm/customize.py b/script/get-rocm/customize.py index 667c29f4da..ef4647c78c 100644 --- a/script/get-rocm/customize.py +++ b/script/get-rocm/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -40,7 +40,7 @@ def detect_version(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} def postprocess(i): diff --git a/script/get-sys-utils-cm/customize.py b/script/get-sys-utils-cm/customize.py index 893384b648..190518c9db 100644 --- a/script/get-sys-utils-cm/customize.py +++ b/script/get-sys-utils-cm/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -22,9 +22,9 @@ def preprocess(i): # Windows has moved to get-sys-utils-min and will be always run with "detect,os"! if os_info['platform'] == 'windows': - print ('') - print ('This script is not used on Windows') - print ('') + logging.info ('') + logging.info ('This script is not used on Windows') + logging.info ('') # If windows, download here otherwise use run.sh @@ -78,10 +78,10 @@ def preprocess(i): # env['+PATH']=[os.path.join(path, 'bin')] # else: - print ('') - print ('***********************************************************************') - print ('This script will attempt to install minimal system dependencies for CM.') - print ('Note that you may be asked for your SUDO password ...') - print ('***********************************************************************') + logging.info ('') + logging.info ('***********************************************************************') + logging.info ('This script will attempt to install minimal system dependencies for CM.') + logging.info ('Note that you may be asked for your SUDO password ...') + logging.info ('***********************************************************************') return {'return':0} diff --git a/script/get-sys-utils-min/customize.py b/script/get-sys-utils-min/customize.py index a8b9020c50..654f19dfc9 100644 --- a/script/get-sys-utils-min/customize.py +++ b/script/get-sys-utils-min/customize.py @@ -1,5 +1,6 @@ from cmind import utils import os +import logging def preprocess(i): @@ -21,22 +22,22 @@ def preprocess(i): for cd in clean_dirs.split(','): if cd != '': if os.path.isdir(cd): - print ('Clearning directory {}'.format(cd)) + logging.info ('Clearning directory {}'.format(cd)) shutil.rmtree(cd) url = env['CM_PACKAGE_WIN_URL'] urls = [url] if ';' not in url else url.split(';') - print ('') - print ('Current directory: {}'.format(os.getcwd())) + logging.info ('') + logging.info ('Current directory: {}'.format(os.getcwd())) for url in urls: url = url.strip() - print ('') - print ('Downloading from {}'.format(url)) + logging.info ('') + logging.info ('Downloading from {}'.format(url)) r = cm.access({'action':'download_file', 'automation':'utils,dc2743f8450541e3', @@ -45,7 +46,7 @@ def preprocess(i): filename = r['filename'] - print ('Unzipping file {}'.format(filename)) + logging.info ('Unzipping file {}'.format(filename)) r = cm.access({'action':'unzip_file', 'automation':'utils,dc2743f8450541e3', @@ -53,10 +54,10 @@ def preprocess(i): if r['return']>0: return r if os.path.isfile(filename): - print ('Removing file {}'.format(filename)) + logging.info ('Removing file {}'.format(filename)) os.remove(filename) - print ('') + logging.info ('') # Add to path env['+PATH']=[os.path.join(path, 'bin')] diff --git a/script/get-tensorrt/customize.py b/script/get-tensorrt/customize.py index b18fe35c3a..9cbe79a66a 100644 --- a/script/get-tensorrt/customize.py +++ b/script/get-tensorrt/customize.py @@ -1,7 +1,7 @@ from cmind import utils import os import tarfile - +import logging def preprocess(i): recursion_spaces = i['recursion_spaces'] @@ -90,7 +90,7 @@ def preprocess(i): return {'return': 1, 'error': 'Please envoke cmr "' + " ".join(tags) + '" --tar_file={full path to the TensorRT tar file}'} - print ('Untaring file - can take some time ...') + logging.info ('Untaring file - can take some time ...') file_name = "trtexec" my_tar = tarfile.open(os.path.expanduser(env['CM_TENSORRT_TAR_FILE_PATH'])) diff --git a/script/get-terraform/customize.py b/script/get-terraform/customize.py index c091322bc5..10f6b7e193 100644 --- a/script/get-terraform/customize.py +++ b/script/get-terraform/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -38,7 +38,7 @@ def detect_version(i): version = r['version'] - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} def postprocess(i): diff --git a/script/get-tvm-model/customize.py b/script/get-tvm-model/customize.py index 26732a279c..c86b917ff6 100644 --- a/script/get-tvm-model/customize.py +++ b/script/get-tvm-model/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -29,7 +29,7 @@ def preprocess(i): "Error: the found workdir does not contain database_tuning_record.json") if env.get('CM_TUNE_TVM_MODEL', '') != '': - print("The \"tune-model\" variation is selected, but at the same time the path to the existing \"work_dir\" is also specified. The compiled model will be based on the found existing \"work_dir\".") + logging.info("The \"tune-model\" variation is selected, but at the same time the path to the existing \"work_dir\" is also specified. The compiled model will be based on the found existing \"work_dir\".") env["CM_TUNE_TVM_MODEL"] = "no" diff --git a/script/get-tvm-model/process.py b/script/get-tvm-model/process.py index 53543e0f83..a3214dbad2 100644 --- a/script/get-tvm-model/process.py +++ b/script/get-tvm-model/process.py @@ -1,5 +1,6 @@ import os import tempfile +import logging from typing import Dict, Tuple, Optional, List, Any, Union if os.environ.get("CM_TVM_FRONTEND_FRAMEWORK", None) == "pytorch": @@ -58,7 +59,7 @@ def get_mod_params( raise RuntimeError( "Error: Cannot find proper shapes in environment variables" ) - print(f"Shape dict {shape_dict}") + logging.info(f"Shape dict {shape_dict}") if frontend == "pytorch": torch_model = getattr(torchvision.models, model_name)(weights=None) torch_model.load_state_dict(torch.load(model_path)) @@ -91,7 +92,7 @@ def tune_model( work_dir = os.path.join(os.getcwd(), "metaschedule_workdir") if not os.path.exists(work_dir): os.mkdir(work_dir) - print("Extracting tasks...") + logging.info("Extracting tasks...") extracted_tasks = meta_schedule.relay_integration.extract_tasks( mod, target, params ) @@ -99,7 +100,7 @@ def tune_model( extracted_tasks, work_dir, strategy="evolutionary" ) - print("Begin tuning...") + logging.info("Begin tuning...") evaluator_config = meta_schedule.runner.config.EvaluatorConfig( number=1, repeat=10, @@ -189,11 +190,11 @@ def serialize_vm( def main() -> None: model_path = os.environ.get('CM_ML_MODEL_FILE_WITH_PATH', None) compiled_model = os.path.join(os.getcwd(), 'model-tvm.so') - print('TVM model: ' + model_path) + logging.info('TVM model: ' + model_path) if model_path.endswith('.so') or model_path.endswith('.dylib'): compiled_model = model_path if not os.path.isfile(compiled_model): - print('') + logging.info('') raise RuntimeError( f"Error: Model file {compiled_model} not found!" ) @@ -246,7 +247,7 @@ def main() -> None: with open(os.path.join(os.getcwd(), "tvm_executor"), "w") as file: file.write("virtual_machine" if use_vm else "graph_executor") lib.export_library(compiled_model) - print('TVM compiled model: ' + compiled_model) + logging.info('TVM compiled model: ' + compiled_model) if __name__ == "__main__": main() diff --git a/script/gui/app.py b/script/gui/app.py index 0f4f93d21f..88fa009109 100644 --- a/script/gui/app.py +++ b/script/gui/app.py @@ -3,7 +3,7 @@ import streamlit as st import os import cmind - +import logging import misc def main(): @@ -30,7 +30,7 @@ def main(): if ' ' in script_tags: script_tags = script_tags.replace(' ',',') - print ('Searching CM scripts using tags "{}"'.format(script_tags)) + logging.info ('Searching CM scripts using tags "{}"'.format(script_tags)) r = cmind.access({'action':'find', 'automation':'script,5b4e0237da074764', diff --git a/script/gui/customize.py b/script/gui/customize.py index 9c920ab2c4..aa7b9f16b9 100644 --- a/script/gui/customize.py +++ b/script/gui/customize.py @@ -6,7 +6,7 @@ import json import shutil import subprocess - +import logging def preprocess(i): os_info = i['os_info'] @@ -23,7 +23,7 @@ def preprocess(i): if ' ' in script_tags: script_tags = script_tags.replace(' ',',') - print ('Searching CM scripts using tags "{}"'.format(script_tags)) + logging.info ('Searching CM scripts using tags "{}"'.format(script_tags)) r = cm.access({'action':'find', 'automation':'script', @@ -37,7 +37,7 @@ def preprocess(i): env['CM_GUI_SCRIPT_PATH'] = script.path env['CM_GUI_SCRIPT_ALIAS'] = script.meta['alias'] - print ('Script found in path {}'.format(script.path)) + logging.info ('Script found in path {}'.format(script.path)) env['CM_GUI_SCRIPT_TAGS'] = script_tags @@ -59,6 +59,6 @@ def preprocess(i): env['CM_GUI_EXTRA_CMD'] = extra_cmd - print ('Prepared extra CMD for streamlit: {}'.format(extra_cmd)) + logging.info ('Prepared extra CMD for streamlit: {}'.format(extra_cmd)) return {'return':0} diff --git a/script/gui/script.py b/script/gui/script.py index 9a8bc0cfeb..e86fb2dfc9 100644 --- a/script/gui/script.py +++ b/script/gui/script.py @@ -3,7 +3,7 @@ import streamlit as st import os import cmind - +import logging import misc def page(i): @@ -460,10 +460,10 @@ def page(i): cmd2 = prefix + 'bash -c "{}"'.format(cli2) - print ('Running command:') - print ('') - print (' {}'.format(cmd2)) - print ('') + logging.info ('Running command:') + logging.info ('') + logging.info (' {}'.format(cmd2)) + logging.info ('') os.system(cmd2) diff --git a/script/gui/tests/generate_password.py b/script/gui/tests/generate_password.py index 145a46dbd3..7350583778 100644 --- a/script/gui/tests/generate_password.py +++ b/script/gui/tests/generate_password.py @@ -1,5 +1,5 @@ import bcrypt - +import logging #salt = bcrypt.gensalt() # TBD: temporal hack to demo password protection for experiments #salt = bcrypt.gensalt() @@ -9,5 +9,4 @@ password_salt = b'$2b$12$ionIRWe5Ft7jkn4y/7C6/e' password_hash2 = bcrypt.hashpw(pwd.encode('utf-8'), password_salt) - -print ('"password_hash":"{}"'.format(password_hash2.decode('utf-8'))) +logging.info ('"password_hash":"{}"'.format(password_hash2.decode('utf-8'))) diff --git a/script/import-mlperf-inference-to-experiment/customize.py b/script/import-mlperf-inference-to-experiment/customize.py index 486bc76d15..51dd8bf6d5 100644 --- a/script/import-mlperf-inference-to-experiment/customize.py +++ b/script/import-mlperf-inference-to-experiment/customize.py @@ -6,7 +6,7 @@ import csv import json import copy - +import logging file_summary = 'summary.csv' file_summary_json = 'mlperf-inference-summary-{}.json' @@ -64,10 +64,10 @@ def preprocess(i): skip_submission_checker = env.get('CM_SKIP_SUBMISSION_CHECKER','') in ['yes','True'] - print ('') - print ('Processing results in path: {}'.format(path)) - print ('Version: {}'.format(version)) - print ('') + logging.info ('') + logging.info ('Processing results in path: {}'.format(path)) + logging.info ('Version: {}'.format(version)) + logging.info ('') if skip_submission_checker: if not os.path.isfile(file_summary): @@ -76,7 +76,7 @@ def preprocess(i): if os.path.isfile(file_summary): os.remove(file_summary) - print ('* Running submission checker ...') + logging.info ('* Running submission checker ...') xenv = {} @@ -94,7 +94,7 @@ def preprocess(i): ii['env'] = xenv if version!='': - print (' Version detected from cache tags: {}'.format(version)) + logging.info (' Version detected from cache tags: {}'.format(version)) ii['version']=version r = cm.access(ii) @@ -103,11 +103,11 @@ def preprocess(i): return r if r['return']>0: - print ('') - print ('WARNING: script returned non-zero value - possible issue - please check!') - print ('') + logging.info ('') + logging.info ('WARNING: script returned non-zero value - possible issue - please check!') + logging.info ('') input ('Press Enter to continue') - print ('') + logging.info ('') r = convert_summary_csv_to_experiment(path, version, env) if r['return']>0: return r @@ -116,7 +116,7 @@ def preprocess(i): def convert_summary_csv_to_experiment(path, version, env): - print ('* Processing MLPerf repo in cache path: {}'.format(path)) + logging.info ('* Processing MLPerf repo in cache path: {}'.format(path)) cur_dir = os.getcwd() @@ -126,7 +126,7 @@ def convert_summary_csv_to_experiment(path, version, env): burl = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']) url = burl.decode('UTF-8').strip() - print (' Git URL: {}'.format(url)) + logging.info (' Git URL: {}'.format(url)) os.chdir(cur_dir) @@ -224,9 +224,9 @@ def convert_summary_csv_to_experiment(path, version, env): target_repo='' if env_target_repo=='' else env_target_repo+':' - print ('') + logging.info ('') for name in experiment: - print (' Preparing experiment artifact "{}"'.format(name)) + logging.info (' Preparing experiment artifact "{}"'.format(name)) tags = name.split('--') if 'mlperf' not in tags: tags.insert(0, 'mlperf') diff --git a/script/import-mlperf-tiny-to-experiment/customize.py b/script/import-mlperf-tiny-to-experiment/customize.py index 8929cba8d7..57e4868873 100644 --- a/script/import-mlperf-tiny-to-experiment/customize.py +++ b/script/import-mlperf-tiny-to-experiment/customize.py @@ -4,7 +4,7 @@ import os import subprocess import json - +import logging file_summary_json = 'mlperf-inference-summary.json' file_result = 'cm-result.json' @@ -44,15 +44,15 @@ def preprocess(i): r = convert_repo_to_experiment(path, version, env) if r['return']>0: return r - print ('') + logging.info ('') return {'return':0} def convert_repo_to_experiment(path, version, env): - print ('') - print ('Processing MLPerf repo from CM cache path: {}'.format(path)) - print ('* Version: {}'.format(version)) + logging.info ('') + logging.info ('Processing MLPerf repo from CM cache path: {}'.format(path)) + logging.info ('* Version: {}'.format(version)) cur_dir = os.getcwd() @@ -62,7 +62,7 @@ def convert_repo_to_experiment(path, version, env): burl = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']) url = burl.decode('UTF-8').strip() - print ('* Git URL: {}'.format(url)) + logging.info ('* Git URL: {}'.format(url)) # Create virtual experiment entries experiments = {} @@ -70,14 +70,14 @@ def convert_repo_to_experiment(path, version, env): for division in ['closed', 'open']: p1 = os.path.join(path, division) if os.path.isdir(p1): - print (' * Processing division: {}'.format(division)) + logging.info (' * Processing division: {}'.format(division)) companies = os.listdir(p1) for company in companies: p2 = os.path.join (p1, company) if os.path.isdir(p2): - print (' * Processing company: {}'.format(company)) + logging.info (' * Processing company: {}'.format(company)) presults = os.path.join(p2, 'results') psystems = os.path.join(p2, 'systems') @@ -101,13 +101,13 @@ def convert_repo_to_experiment(path, version, env): for system in systems: psystem = os.path.join(presult, system) if os.path.isdir(psystem): - print (' * Processing result for system: {}'.format(system)) + logging.info (' * Processing result for system: {}'.format(system)) # Check system file psystem_desc = os.path.join(psystems, system+'.json') psystem_dict = {} - print (' File: {}'.format(psystem_desc)) + logging.info (' File: {}'.format(psystem_desc)) # Check exceptions if version == 'v1.0': @@ -169,13 +169,13 @@ def convert_repo_to_experiment(path, version, env): psystem_dict = r['meta'] else: - print (' * Warning: system description not found in {}'.format(psystem_desc)) + logging.info (' * Warning: system description not found in {}'.format(psystem_desc)) input (' Press to continue') for benchmark in os.listdir(psystem): pbenchmark = os.path.join(psystem, benchmark) if os.path.isdir(pbenchmark): - print (' * Processing benchmark: {}'.format(benchmark)) + logging.info (' * Processing benchmark: {}'.format(benchmark)) models = [''] @@ -198,7 +198,7 @@ def convert_repo_to_experiment(path, version, env): results = {} if model!='': - print (' * Processing model: {}'.format(model)) + logging.info (' * Processing model: {}'.format(model)) pbenchmark = os.path.join(psystem, benchmark, model) perf_file_type=0 @@ -235,7 +235,7 @@ def convert_repo_to_experiment(path, version, env): results['_Result']=median_throughput if median_throughput==0: - print (' * Warning: median_throughput was not detected in {}'.format(pperf)) + logging.info (' * Warning: median_throughput was not detected in {}'.format(pperf)) input (' Press to continue') r = utils.load_txt(paccuracy, split=True) @@ -270,11 +270,11 @@ def convert_repo_to_experiment(path, version, env): found = True if not found: - print (' * Warning: accuracy not found in the file {}'.format(paccuracy)) + logging.info (' * Warning: accuracy not found in the file {}'.format(paccuracy)) input (' Press to continue') else: - print (' * Warning: performance or accuracy files are not present in this submission') + logging.info (' * Warning: performance or accuracy files are not present in this submission') input (' Press to continue') if os.path.isfile(penergy): @@ -295,7 +295,7 @@ def convert_repo_to_experiment(path, version, env): results['median_energy_median_throughput_metric']='inf./sec.' if median_throughput==0: - print (' * Warning: median_throughput was not detected in {}'.format(penergy)) + logging.info (' * Warning: median_throughput was not detected in {}'.format(penergy)) input (' Press to continue') else: median_energy_cost=0 @@ -310,10 +310,10 @@ def convert_repo_to_experiment(path, version, env): results['median_energy_cost_metric']='uj/inf.' if median_energy_cost==0: - print (' * Warning: median_energy_cost was not detected in {}'.format(penergy)) + logging.info (' * Warning: median_energy_cost was not detected in {}'.format(penergy)) input (' Press to continue') - print (' * Results dict: {}'.format(results)) + logging.info (' * Results dict: {}'.format(results)) # Finalizing keys results.update(psystem_dict) @@ -337,7 +337,7 @@ def convert_repo_to_experiment(path, version, env): # Prepare experiment name cm_name = 'mlperf-tiny--{}--'+division+'--'+xbenchmark - print (' * CM experiment name: {}'.format(cm_name)) + logging.info (' * CM experiment name: {}'.format(cm_name)) name_all = cm_name.format('all') name_ver = cm_name.format(version) @@ -348,7 +348,7 @@ def convert_repo_to_experiment(path, version, env): else: - print (' * Warning: some directories are not present in this submission') + logging.info (' * Warning: some directories are not present in this submission') input (' Press to continue') os.chdir(cur_dir) @@ -360,9 +360,9 @@ def convert_repo_to_experiment(path, version, env): target_repo='' if env_target_repo=='' else env_target_repo+':' # Checking experiment - print ('') + logging.info ('') for name in experiments: - print (' Preparing experiment artifact "{}"'.format(name)) + logging.info (' Preparing experiment artifact "{}"'.format(name)) tags = name.split('--') if 'mlperf' not in tags: tags.insert(0, 'mlperf') diff --git a/script/import-mlperf-training-to-experiment/customize.py b/script/import-mlperf-training-to-experiment/customize.py index 19a69a6af8..f4d96e8441 100644 --- a/script/import-mlperf-training-to-experiment/customize.py +++ b/script/import-mlperf-training-to-experiment/customize.py @@ -1,6 +1,6 @@ import cmind as cm from cmind import utils - +import logging import os import subprocess import csv @@ -105,9 +105,9 @@ def preprocess(i): env['CM_MLPERF_TRAINING_CURRENT_DIR'] = cur_dir env['CM_MLPERF_TRAINING_REPO_VERSION'] = version - print ('') - print ('Repo path: {}'.format(path)) - print ('Repo version: {}'.format(version)) + logging.info ('') + logging.info ('Repo path: {}'.format(path)) + logging.info ('Repo version: {}'.format(version)) r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, @@ -122,7 +122,7 @@ def preprocess(i): def convert_summary_csv_to_experiment(path, version, env): - print ('* Processing MLPerf training results repo in cache path: {}'.format(path)) + logging.info ('* Processing MLPerf training results repo in cache path: {}'.format(path)) cur_dir = os.getcwd() @@ -132,7 +132,7 @@ def convert_summary_csv_to_experiment(path, version, env): burl = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']) url = burl.decode('UTF-8').strip() - print (' Git URL: {}'.format(url)) + logging.info (' Git URL: {}'.format(url)) os.chdir(cur_dir) @@ -238,9 +238,9 @@ def convert_summary_csv_to_experiment(path, version, env): env_target_repo=env.get('CM_IMPORT_MLPERF_TRAINING_TARGET_REPO','').strip() target_repo='' if env_target_repo=='' else env_target_repo+':' - print ('') + logging.info ('') for name in experiment: - print (' Preparing experiment artifact "{}"'.format(name)) + logging.info (' Preparing experiment artifact "{}"'.format(name)) tags = name.split('--') if 'mlperf' not in tags: tags.insert(0, 'mlperf') diff --git a/script/install-bazel/customize.py b/script/install-bazel/customize.py index d656e40bac..a2887d132c 100644 --- a/script/install-bazel/customize.py +++ b/script/install-bazel/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -15,7 +15,7 @@ def preprocess(i): if need_version == '': return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + logging.info (recursion_spaces + ' # Requested version: {}'.format(need_version)) # if 'CM_GIT_CHECKOUT' not in env: # env['CM_GIT_CHECKOUT'] = 'releases/gcc-' + need_version diff --git a/script/install-cmake-prebuilt/customize.py b/script/install-cmake-prebuilt/customize.py index 263e667c47..6788b728be 100644 --- a/script/install-cmake-prebuilt/customize.py +++ b/script/install-cmake-prebuilt/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -15,7 +15,7 @@ def preprocess(i): if need_version == '': return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + logging.info (recursion_spaces + ' # Requested version: {}'.format(need_version)) version_split = need_version.split(".") while len(version_split) < 3: @@ -61,10 +61,10 @@ def preprocess(i): package_url = 'https://github.com/Kitware/CMake/releases/download/v' + need_version + '/' + package_name - print (recursion_spaces + ' # Prepared package URL: {}'.format(package_url)) + logging.info (recursion_spaces + ' # Prepared package URL: {}'.format(package_url)) - print ('') - print ('Downloading from {} ...'.format(package_url)) + logging.info ('') + logging.info ('Downloading from {} ...'.format(package_url)) cm = automation.cmind @@ -77,7 +77,7 @@ def preprocess(i): # Check what to do with this file depending on OS if os_info['platform'] == 'windows': - print ('Unzipping file {}'.format(filename)) + logging.info ('Unzipping file {}'.format(filename)) r = cm.access({'action':'unzip_file', 'automation':'utils,dc2743f8450541e3', @@ -86,7 +86,7 @@ def preprocess(i): if r['return']>0: return r if os.path.isfile(filename): - print ('Removing file {}'.format(filename)) + logging.info ('Removing file {}'.format(filename)) os.remove(filename) path_bin = os.path.join(os.getcwd(), 'bin') diff --git a/script/install-gcc-src/customize.py b/script/install-gcc-src/customize.py index caff463edc..4bff18e9cb 100644 --- a/script/install-gcc-src/customize.py +++ b/script/install-gcc-src/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -18,7 +18,7 @@ def preprocess(i): if need_version == '': return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + logging.info (recursion_spaces + ' # Requested version: {}'.format(need_version)) if 'CM_GIT_CHECKOUT' not in env: env['CM_GIT_CHECKOUT'] = 'releases/gcc-' + need_version diff --git a/script/install-generic-conda-package/customize.py b/script/install-generic-conda-package/customize.py index 5f7905d592..a00d983f48 100644 --- a/script/install-generic-conda-package/customize.py +++ b/script/install-generic-conda-package/customize.py @@ -1,5 +1,6 @@ from cmind import utils import os +import logging import cmind as cm def preprocess(i): @@ -28,7 +29,7 @@ def preprocess(i): def detect_version(i): # TBD - print (i['recursion_spaces'] + ' Detected version: {}'.format(version)) + logging.info (i['recursion_spaces'] + ' Detected version: {}'.format(version)) return {'return':0, 'version':version} diff --git a/script/install-gflags/customize.py b/script/install-gflags/customize.py index 65872c79a0..58337ed180 100644 --- a/script/install-gflags/customize.py +++ b/script/install-gflags/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -18,7 +18,7 @@ def preprocess(i): if need_version == '': return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + logging.info (recursion_spaces + ' # Requested version: {}'.format(need_version)) return {'return':0} diff --git a/script/install-llvm-prebuilt/customize.py b/script/install-llvm-prebuilt/customize.py index 1550c0ed9e..4d942423f3 100644 --- a/script/install-llvm-prebuilt/customize.py +++ b/script/install-llvm-prebuilt/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -16,7 +16,7 @@ def preprocess(i): if need_version == '': return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + logging.info (recursion_spaces + ' # Requested version: {}'.format(need_version)) host_os_bits = env['CM_HOST_OS_BITS'] @@ -52,12 +52,12 @@ def preprocess(i): package_name = 'LLVM-' + need_version + '-win' + host_os_bits + '.exe' clang_file_name = "clang.exe" - print('') - print('WARNING: Please copy the following path and then paste it') - print(' when LLVM installer asks you about the "Destination Folder":') - print('') - print(os.getcwd()) - print('') + logging.info('') + logging.info('WARNING: Please copy the following path and then paste it') + logging.info(' when LLVM installer asks you about the "Destination Folder":') + logging.info('') + logging.info(os.getcwd()) + logging.info('') input('Press Enter to continue!') else: @@ -154,10 +154,10 @@ def preprocess(i): package_url = 'https://github.com/llvm/llvm-project/releases/download/llvmorg-' + need_version + '/' + package_name - print (recursion_spaces + ' # Prepared package URL: {}'.format(package_url)) + logging.info (recursion_spaces + ' # Prepared package URL: {}'.format(package_url)) - print ('') - print ('Downloading from {} ...'.format(package_url)) + logging.info ('') + logging.info ('Downloading from {} ...'.format(package_url)) cm = automation.cmind diff --git a/script/install-openssl/customize.py b/script/install-openssl/customize.py index e6163a0f5e..ea3ab29cd4 100644 --- a/script/install-openssl/customize.py +++ b/script/install-openssl/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -18,7 +18,7 @@ def preprocess(i): if need_version == '': return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + logging.info (recursion_spaces + ' # Requested version: {}'.format(need_version)) return {'return':0} diff --git a/script/install-python-src/customize.py b/script/install-python-src/customize.py index a7025a6cf2..f918cb1440 100644 --- a/script/install-python-src/customize.py +++ b/script/install-python-src/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -18,7 +18,7 @@ def preprocess(i): if need_version == '': return {'return':1, 'error':'internal problem - CM_VERSION is not defined in env'} - print (recursion_spaces + ' # Requested version: {}'.format(need_version)) + logging.info (recursion_spaces + ' # Requested version: {}'.format(need_version)) path_bin = os.path.join(os.getcwd(), 'install', 'bin') diff --git a/script/install-python-venv/customize.py b/script/install-python-venv/customize.py index 84fe4984ad..018cb284cb 100644 --- a/script/install-python-venv/customize.py +++ b/script/install-python-venv/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -21,7 +21,7 @@ def preprocess(i): name = env.get('CM_NAME','') if not quiet and name == '': - print ('') + logging.info ('') x = input('Enter some tag to describe this virtual env (mlperf-inf,octoml-bench,etc): ') x = x.strip() diff --git a/script/launch-benchmark/customize.py b/script/launch-benchmark/customize.py index 5db5e9f817..f193f13143 100644 --- a/script/launch-benchmark/customize.py +++ b/script/launch-benchmark/customize.py @@ -1,7 +1,7 @@ import cmind import os import copy - +import logging base_path={} base_path_meta={} @@ -94,7 +94,7 @@ def load_cfg(i): r = cmind.utils.load_yaml_and_json(full_path_without_ext) if r['return']>0: - print ('Warning: problem loading file {}'.format(full_path)) + logging.info ('Warning: problem loading file {}'.format(full_path)) else: meta = r['meta'] diff --git a/script/launch-benchmark/tests/debug.py b/script/launch-benchmark/tests/debug.py index 842003b2c6..57a99d0531 100644 --- a/script/launch-benchmark/tests/debug.py +++ b/script/launch-benchmark/tests/debug.py @@ -1,6 +1,6 @@ import cmind - +import logging r=cmind.access({'action':'gui', 'automation':'script', 'artifact':'launch benchmark'}) -print (r) +logging.info (r) diff --git a/script/preprocess-mlperf-inference-submission/customize.py b/script/preprocess-mlperf-inference-submission/customize.py index 03bca7cd9b..87fc1bde19 100644 --- a/script/preprocess-mlperf-inference-submission/customize.py +++ b/script/preprocess-mlperf-inference-submission/customize.py @@ -3,7 +3,7 @@ import os from os.path import exists import shutil - +import logging def preprocess(i): os_info = i['os_info'] @@ -11,7 +11,7 @@ def preprocess(i): submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") if submission_dir == "": - print("Please set CM_MLPERF_INFERENCE_SUBMISSION_DIR") + logging.info("Please set CM_MLPERF_INFERENCE_SUBMISSION_DIR") return {'return': 1, 'error':'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not specified'} submitter = env.get("CM_MLPERF_SUBMITTER", "cTuning") diff --git a/script/print-any-text/customize.py b/script/print-any-text/customize.py index 093cafdcff..4c31b8787b 100644 --- a/script/print-any-text/customize.py +++ b/script/print-any-text/customize.py @@ -2,7 +2,7 @@ from cmind import utils import os - +import logging def postprocess(i): env = i['env'] @@ -20,11 +20,11 @@ def postprocess(i): if kk!='': vv = e.get(kk) - print ('{}[{}]: {}'.format(t, kk, vv)) + logging.info ('{}[{}]: {}'.format(t, kk, vv)) printed = True if printed: - print ('') + logging.info ('') return {'return':0} diff --git a/script/print-croissant-desc/code.py b/script/print-croissant-desc/code.py index a475c5a6ec..511a2148bc 100644 --- a/script/print-croissant-desc/code.py +++ b/script/print-croissant-desc/code.py @@ -2,26 +2,26 @@ import os import mlcroissant as mlc - +import logging def main(): url = os.environ.get('CM_PRINT_CROISSANT_URL', '') if url=='': - print ('Error: --url is not specified') + logging.error ('Error: --url is not specified') exit(1) ds = mlc.Dataset(url) metadata = ds.metadata.to_json() - print ('') - print ('Croissant meta data URL: {}'.format(url)) - print ('') - print (f"{metadata['name']}: {metadata['description']}") + logging.info ('') + logging.info ('Croissant meta data URL: {}'.format(url)) + logging.info ('') + logging.info (f"{metadata['name']}: {metadata['description']}") - print ('') + logging.info ('') for x in ds.records(record_set="default"): - print(x) + logging.info(x) if __name__ == '__main__': main() diff --git a/script/print-hello-world-py/code.py b/script/print-hello-world-py/code.py index 735a890622..695e67858b 100644 --- a/script/print-hello-world-py/code.py +++ b/script/print-hello-world-py/code.py @@ -1,6 +1,7 @@ +import logging def main(): - print ('') - print ('HELLO WORLD from Python') + logging.info ('') + logging.info ('HELLO WORLD from Python') if __name__ == '__main__': main() diff --git a/script/process-ae-users/code.py b/script/process-ae-users/code.py index 6437eaa5e4..acf446ca6b 100644 --- a/script/process-ae-users/code.py +++ b/script/process-ae-users/code.py @@ -2,11 +2,11 @@ import csv import json import cmind - +import logging def main(): f = os.environ.get('CM_PROCESS_AE_USERS_INPUT_FILE','') - print ('Input CSV file: {}'.format(f)) + logging.info ('Input CSV file: {}'.format(f)) users = [] with open(f, 'r') as ff: @@ -15,7 +15,7 @@ def main(): if len(row)>0: users.append(row) - print ('') + logging.info ('') html = '
    \n' for user in sorted(users, key = lambda u: (u['last'].lower(), u['first'].lower())): @@ -23,7 +23,7 @@ def main(): name = full_name + ' ('+user['affiliation']+')' - print (name) + logging.info (name) html += '
  • '+name+'\n' @@ -36,7 +36,7 @@ def main(): lst = r['list'] if len(lst)==0: - print (' CM contributor not found!') + logging.info (' CM contributor not found!') meta = { 'challenges': [ @@ -47,7 +47,7 @@ def main(): 'organization': user['affiliation'] } - print (' Adding to mlcommons@ck ...') + logging.info (' Adding to mlcommons@ck ...') r = cmind.access({'out':'con', 'action':'add', 'automation':'contributor,68eae17b590d4f8f', # Need UID since using common function @@ -62,8 +62,8 @@ def main(): fo = f+'.html' - print ('') - print ('Saved HTML to {}'.format(fo)) + logging.info ('') + logging.info ('Saved HTML to {}'.format(fo)) cmind.utils.save_txt(fo, html) diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index 25f81a0921..ba56e6a066 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -1,7 +1,7 @@ from cmind import utils import cmind as cm import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -12,7 +12,7 @@ def preprocess(i): results_dir = env.get("CM_MLPERF_ACCURACY_RESULTS_DIR", "") if results_dir == "": - print("Please set CM_MLPERF_ACCURACY_RESULTS_DIR") + logging.infot("Please set CM_MLPERF_ACCURACY_RESULTS_DIR") return {'return':-1} # In fact, we expect only 1 command line here @@ -139,16 +139,16 @@ def postprocess(i): accuracy_file = os.path.join(result_dir, "accuracy.txt") if os.path.exists(accuracy_file): - print ('') - print ('Accuracy file: {}'.format(accuracy_file)) - print ('') + logging.info ('') + logging.info ('Accuracy file: {}'.format(accuracy_file)) + logging.info ('') x = '' with open(accuracy_file, "r") as fp: x=fp.read() if x!='': - print(x) + logging.info(x) # Trying to extract accuracy dict for y in x.split('\n'): @@ -164,6 +164,6 @@ def postprocess(i): except ValueError as e: pass - print ('') + logging.info ('') return {'return':0} diff --git a/script/prune-bert-models/customize.py b/script/prune-bert-models/customize.py index 34e0810231..5e07168d51 100644 --- a/script/prune-bert-models/customize.py +++ b/script/prune-bert-models/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -28,14 +28,14 @@ def preprocess(i): out_dir = os.path.join(os.getcwd(), 'pruned-model-output') env['CM_BERT_PRUNE_OUTPUT_DIR'] = out_dir - print ('') - print ('Local CM cache path to the updated BERT pruner src from NeurIPS 2022: ' + env['CM_GIT_REPO_BERT_PRUNER_NEURIPS_2022_CHECKOUT_PATH']) + logging.info ('') + logging.info ('Local CM cache path to the updated BERT pruner src from NeurIPS 2022: ' + env['CM_GIT_REPO_BERT_PRUNER_NEURIPS_2022_CHECKOUT_PATH']) - print ('') + logging.info ('') for k in ["CM_ML_MODEL_FILE_WITH_PATH", "CM_BERT_PRUNE_CKPT_PATH", "CM_BERT_PRUNE_OUTPUT_DIR"]: - print ('ENV["{}"]: {}'.format(k, env[k])) + logging.info ('ENV["{}"]: {}'.format(k, env[k])) - print ('') + logging.info ('') return {'return': 0} @@ -43,6 +43,6 @@ def postprocess(i): env = i['env'] - print("Entered postprocess") + logging.info("Entered postprocess") return {'return': 0} diff --git a/script/publish-results-to-dashboard/code.py b/script/publish-results-to-dashboard/code.py index 1c9732fa06..81e6623ed1 100644 --- a/script/publish-results-to-dashboard/code.py +++ b/script/publish-results-to-dashboard/code.py @@ -1,14 +1,14 @@ # Developer: Grigori Fursin import os - +import logging def main(): # For now quick prototype hardwired to "summary.json" from MLPerf # Later need to clean it and make it universal - print ('') - print ('Reading summary.json ...') - print ('') + logging.info ('') + logging.info ('Reading summary.json ...') + logging.info ('') import json filename = os.environ.get('MLPERF_INFERENCE_SUBMISSION_SUMMARY','') @@ -22,9 +22,9 @@ def main(): f.close() - print ('=========================================================') - print ('Sending results to W&B dashboard ...') - print ('') + logging.info ('=========================================================') + logging.info ('Sending results to W&B dashboard ...') + logging.info ('') import wandb @@ -86,7 +86,7 @@ def main(): wandb.finish() - print ('=========================================================') + logging.info ('=========================================================') if __name__ == '__main__': main() diff --git a/script/push-csv-to-spreadsheet/google_api.py b/script/push-csv-to-spreadsheet/google_api.py index d1e7643aa4..33054e25ed 100644 --- a/script/push-csv-to-spreadsheet/google_api.py +++ b/script/push-csv-to-spreadsheet/google_api.py @@ -1,5 +1,5 @@ from __future__ import print_function - +import logging import os.path import os import csv @@ -48,7 +48,7 @@ def main(): request = service.spreadsheets().values().update(spreadsheetId=DOCUMENT_ID, range=sheet_name, valueInputOption="USER_ENTERED", body={"values": values}).execute() except HttpError as err: - print(err) + logging.info(err) if __name__ == '__main__': diff --git a/script/reproduce-ipol-paper-2022-439/customize.py b/script/reproduce-ipol-paper-2022-439/customize.py index 6b57ab932f..846f1ce736 100644 --- a/script/reproduce-ipol-paper-2022-439/customize.py +++ b/script/reproduce-ipol-paper-2022-439/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -27,8 +27,8 @@ def preprocess(i): def postprocess(i): - print ('') - print ('Please check "diff.png"') - print ('') + logging.info ('') + logging.info ('Please check "diff.png"') + logging.info ('') return {'return':0} diff --git a/script/reproduce-micro-paper-2023-victima/main.py b/script/reproduce-micro-paper-2023-victima/main.py index d851f1450f..e25378c0c1 100644 --- a/script/reproduce-micro-paper-2023-victima/main.py +++ b/script/reproduce-micro-paper-2023-victima/main.py @@ -1,10 +1,10 @@ import os - +import logging if __name__ == "__main__": - print ('') - print ('Main script:') - print ('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT',''))) - print ('') + logging.info ('') + logging.info ('Main script:') + logging.info ('Experiment: {}'.format(os.environ.get('CM_EXPERIMENT',''))) + logging.info ('') exit(0) diff --git a/script/run-all-mlperf-models/customize.py b/script/run-all-mlperf-models/customize.py index 40f0fced40..79f8f37b3d 100644 --- a/script/run-all-mlperf-models/customize.py +++ b/script/run-all-mlperf-models/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -24,7 +24,7 @@ def preprocess(i): if devices: devices = devices.split(",") - print(backends) + logging.info(backends) implementation = env['IMPLEMENTATION'] power = env.get('POWER', '') @@ -88,7 +88,7 @@ def preprocess(i): run_script_content += "\n\n" +"\n\n".join(cmds) with open(os.path.join(script_path, run_file_name+".sh"), 'w') as f: f.write(run_script_content) - print(cmds) + logging.info(cmds) diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index 1d39bb4056..16b7997388 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -3,7 +3,7 @@ import os import subprocess from os.path import exists - +import logging def preprocess(i): os_info = i['os_info'] @@ -49,11 +49,11 @@ def preprocess(i): else: CMD += " 2> /dev/null" - print ('') - print ('Checking Docker images:') - print ('') - print (' '+CMD) - print ('') + logging.info ('') + logging.info ('Checking Docker images:') + logging.info ('') + logging.info (' '+CMD) + logging.info ('') try: docker_image = subprocess.check_output(CMD, shell=True).decode("utf-8") @@ -64,7 +64,7 @@ def preprocess(i): if recreate_image != 'yes': if docker_image: - print("Docker image exists with ID: " + docker_image) + logging.infot("Docker image exists with ID: " + docker_image) env['CM_DOCKER_IMAGE_EXISTS'] = "yes" # elif recreate_image == "yes": @@ -173,19 +173,19 @@ def postprocess(i): CONTAINER="docker run -dt "+ run_opts + " --rm " + docker_image_repo + "/" + docker_image_name + ":" + docker_image_tag + " bash" CMD = "ID=`" + CONTAINER + "` && docker exec $ID bash -c '" + run_cmd + "' && docker kill $ID >/dev/null" - print ('=========================') - print ("Container launch command:") - print ('') - print (CMD) - print ('') - print ("Running "+run_cmd+" inside docker container") + logging.info ('=========================') + logging.info ("Container launch command:") + logging.info ('') + logging.info (CMD) + logging.info ('') + logging.info ("Running "+run_cmd+" inside docker container") record_script({'cmd':CMD, 'env': env}) - print ('') + logging.info ('') docker_out = subprocess.check_output(CMD, shell=True).decode("utf-8") - print(docker_out) + logging.info(docker_out) else: x = "'" @@ -202,14 +202,14 @@ def postprocess(i): CONTAINER="docker run " + x1 + " --entrypoint " + x + x + " " + run_opts + " " + docker_image_repo + "/" + docker_image_name + ":" + docker_image_tag CMD = CONTAINER + " bash -c " + x + run_cmd + x2 + x - print ('') - print ("Container launch command:") - print ('') - print (CMD) + logging.info ('') + logging.info ("Container launch command:") + logging.info ('') + logging.info (CMD) record_script({'cmd':CMD, 'env': env}) - print ('') + logging.info ('') docker_out = os.system(CMD) return {'return':0} diff --git a/script/run-mlperf-inference-app/customize.py b/script/run-mlperf-inference-app/customize.py index 861207c29b..6c86477a8a 100644 --- a/script/run-mlperf-inference-app/customize.py +++ b/script/run-mlperf-inference-app/customize.py @@ -6,7 +6,7 @@ import cmind as cm import copy from tabulate import tabulate - +import logging summary_ext = ['.csv', '.json', '.xlsx'] ################################################################################## @@ -70,7 +70,7 @@ def preprocess(i): if env.get('CM_RUN_STYLE', '') == "valid" and 'CM_RUN_MLPERF_ACCURACY' not in env: env['CM_RUN_MLPERF_ACCURACY'] = "on" - print("Using MLCommons Inference source from " + env['CM_MLPERF_INFERENCE_SOURCE']) + logging.info("Using MLCommons Inference source from " + env['CM_MLPERF_INFERENCE_SOURCE']) if 'CM_MLPERF_LOADGEN_EXTRA_OPTIONS' not in env: @@ -165,12 +165,12 @@ def preprocess(i): if clean: path_to_clean = output_dir - print ('=========================================================') - print ('Cleaning results in {}'.format(path_to_clean)) + logging.info ('=========================================================') + logging.info ('Cleaning results in {}'.format(path_to_clean)) if os.path.exists(path_to_clean): shutil.rmtree(path_to_clean) - print ('=========================================================') + logging.info ('=========================================================') if str(env.get('CM_MLPERF_USE_DOCKER', '')).lower() in [ "1", "true", "yes"]: action = "docker" @@ -209,7 +209,7 @@ def preprocess(i): for mode in env['CM_MLPERF_LOADGEN_MODES']: env['CM_MLPERF_LOADGEN_MODE'] = mode - print(f"\nRunning loadgen scenario: {scenario} and mode: {mode}") + logging.info(f"\nRunning loadgen scenario: {scenario} and mode: {mode}") ii = {'action':action, 'automation':'script', 'tags': scenario_tags, 'quiet': 'true', 'env': copy.deepcopy(env), 'input': inp, 'state': state, 'add_deps': copy.deepcopy(add_deps), 'add_deps_recursive': copy.deepcopy(add_deps_recursive), 'ad': ad, 'adr': copy.deepcopy(adr), 'v': verbose, 'print_env': print_env, 'print_deps': print_deps, 'dump_version_info': dump_version_info} @@ -247,11 +247,11 @@ def preprocess(i): # Better to do this in a stand alone CM script with proper deps but currently we manage this by modifying the sys path of the python executing CM import mlperf_utils - print(sut) + logging.info(sut) result_table, headers = mlperf_utils.get_result_table(state["cm-mlperf-inference-results"][sut]) - print(tabulate(result_table, headers = headers, tablefmt="pretty")) + logging.info(tabulate(result_table, headers = headers, tablefmt="pretty")) - print(f"\nThe MLPerf inference results are stored at {output_dir}\n") + logging.info(f"\nThe MLPerf inference results are stored at {output_dir}\n") return {'return':0} @@ -280,7 +280,7 @@ def get_valid_scenarios(model, category, mlperf_version, mlperf_path): valid_scenarios = config[mlperf_version]["required-scenarios-"+category][internal_model_name] - print("Valid Scenarios for " + model + " in " + category + " category are :" + str(valid_scenarios)) + logging.info("Valid Scenarios for " + model + " in " + category + " category are :" + str(valid_scenarios)) return valid_scenarios @@ -295,10 +295,10 @@ def postprocess(i): x2 = env.get('CM_MLPERF_INFERENCE_CONF_PATH','') if x1 != '' and x2 != '': - print ('') - print ('Path to the MLPerf inference benchmark reference sources: {}'.format(x1)) - print ('Path to the MLPerf inference reference configuration file: {}'.format(x2)) - print ('') + logging.info ('') + logging.info ('Path to the MLPerf inference benchmark reference sources: {}'.format(x1)) + logging.info ('Path to the MLPerf inference reference configuration file: {}'.format(x2)) + logging.info ('') return {'return':0} diff --git a/script/run-mlperf-inference-app/run_mobilenet.py b/script/run-mlperf-inference-app/run_mobilenet.py index b5259168a2..4e190d2029 100644 --- a/script/run-mlperf-inference-app/run_mobilenet.py +++ b/script/run-mlperf-inference-app/run_mobilenet.py @@ -1,7 +1,7 @@ import cmind import os import sys - +import logging models = { "mobilenet": { "v1": { @@ -96,10 +96,10 @@ } } } - print(cm_input) + logging.info(cm_input) r = cmind.access(cm_input) if r['return'] > 0: - print(r) + logging.info(r) #exit(1) diff --git a/script/run-mlperf-inference-mobilenet-models/customize.py b/script/run-mlperf-inference-mobilenet-models/customize.py index ace19a6fd8..593b330ec7 100644 --- a/script/run-mlperf-inference-mobilenet-models/customize.py +++ b/script/run-mlperf-inference-mobilenet-models/customize.py @@ -2,7 +2,7 @@ import os import cmind import sys - +import logging def preprocess(i): @@ -175,7 +175,7 @@ def preprocess(i): if env.get('CM_MLPERF_ACCURACY_MODE','') == "yes": cm_input['mode'] = 'accuracy' - print(cm_input) + logging.info(cm_input) r = cmind.access(cm_input) if r['return'] > 0: return r @@ -183,7 +183,7 @@ def preprocess(i): if env.get('CM_MLPERF_PERFORMANCE_MODE','') == "yes": cm_input['mode'] = 'performance' - print(cm_input) + logging.info(cm_input) r = cmind.access(cm_input) if r['return'] > 0: return r diff --git a/script/run-mlperf-inference-submission-checker/code.py b/script/run-mlperf-inference-submission-checker/code.py index 892d16be33..948f2967af 100644 --- a/script/run-mlperf-inference-submission-checker/code.py +++ b/script/run-mlperf-inference-submission-checker/code.py @@ -2,26 +2,26 @@ import os import pandas - +import logging def main(): - print ('=========================================================') + logging.info ('=========================================================') - print ('Searching for summary.csv ...') + logging.info ('Searching for summary.csv ...') if os.path.isfile('summary.csv'): - print ('Converting to json ...') + logging.info ('Converting to json ...') import pandas df = pandas.read_csv('summary.csv').T - print ('') - print (df) - print ('') + logging.info ('') + logging.info (df) + logging.info ('') df.to_json('summary.json', orient='columns', indent=4) - print ('=========================================================') + logging.info ('=========================================================') if __name__ == '__main__': main() diff --git a/script/run-mlperf-power-server/customize.py b/script/run-mlperf-power-server/customize.py index 65c7830420..d04984d3c1 100644 --- a/script/run-mlperf-power-server/customize.py +++ b/script/run-mlperf-power-server/customize.py @@ -2,7 +2,7 @@ import cmind as cm import os import configparser - +import logging def preprocess(i): os_info = i['os_info'] @@ -18,7 +18,7 @@ def preprocess(i): config['ptd']['devicePort'] = env['CM_MLPERF_POWER_DEVICE_PORT'] with open('power-server.conf', 'w') as configfile: config.write(configfile) - print({section: dict(config[section]) for section in config.sections()}) + logging.info({section: dict(config[section]) for section in config.sections()}) if env['CM_HOST_OS_TYPE'] == "windows": cmd_prefix = "" diff --git a/script/run-terraform/customize.py b/script/run-terraform/customize.py index eeddbff60d..599cdaf5ba 100644 --- a/script/run-terraform/customize.py +++ b/script/run-terraform/customize.py @@ -3,7 +3,7 @@ import os import shutil import json - +import logging def preprocess(i): os_info = i['os_info'] @@ -13,7 +13,7 @@ def preprocess(i): env['CM_TERRAFORM_CONFIG_DIR'] = config_dir cache_dir = os.getcwd() - print(f"Running terraform from {cache_dir}") + logging.info(f"Running terraform from {cache_dir}") shutil.copy(os.path.join(config_dir, "main.tf"), cache_dir) env['CM_TERRAFORM_RUN_DIR'] = cache_dir @@ -84,4 +84,4 @@ def postprocess(i): def print_attr(instance_attributes, key): if key in instance_attributes: - print(key.upper() + ": " + str(instance_attributes[key])) + logging.info(key.upper() + ": " + str(instance_attributes[key])) diff --git a/script/set-venv/customize.py b/script/set-venv/customize.py index a8517a366e..6f695d5e6d 100644 --- a/script/set-venv/customize.py +++ b/script/set-venv/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os - +import logging def preprocess(i): os_info = i['os_info'] @@ -46,9 +46,9 @@ def preprocess(i): cmd = python_path + ' -m venv ' + name + create_dir.format(name) - print ('====================================================================') + logging.info ('====================================================================') - print ('Creating venv: "{}" ...'.format(cmd)) + logging.info ('Creating venv: "{}" ...'.format(cmd)) os.system(cmd) @@ -81,11 +81,11 @@ def preprocess(i): with open(script_file, 'w') as f: f.write(cmd) - print ('====================================================================') - print ('Please run the following command:') - print ('') - print (xcmd) - print ('====================================================================') + logging.info ('====================================================================') + logging.info ('Please run the following command:') + logging.info ('') + logging.info (xcmd) + logging.info ('====================================================================') return {'return':0} diff --git a/script/tar-my-folder/customize.py b/script/tar-my-folder/customize.py index d405e76fdb..c4c545f7e2 100644 --- a/script/tar-my-folder/customize.py +++ b/script/tar-my-folder/customize.py @@ -3,7 +3,7 @@ import os import subprocess from os.path import exists - +import logging def preprocess(i): os_info = i['os_info'] @@ -22,8 +22,8 @@ def preprocess(i): input_path = Path(input_dir) cd_dir = input_path.parent.absolute() CMD = 'tar --directory '+str(cd_dir)+' -czf ' + os.path.join(output_dir, output_file) + ' ' + input_dirname - print(CMD) + logging.info(CMD) ret = os.system(CMD) - print("Tar file "+os.path.join(output_dir, output_file)+ " created") + logging.info("Tar file "+os.path.join(output_dir, output_file)+ " created") return {'return':ret} diff --git a/script/test-cm-core/src/test_cm.py b/script/test-cm-core/src/test_cm.py index 41fb402c22..cb3ea2c5c3 100644 --- a/script/test-cm-core/src/test_cm.py +++ b/script/test-cm-core/src/test_cm.py @@ -1,3 +1,4 @@ +import logging try: import cmind as cm @@ -9,6 +10,6 @@ except ImportError as e: from sys import stderr from subprocess import call - print('WARNING: CM module for python is not installed & jupyter notebooks will not be supported', file=stderr) + logging.info('WARNING: CM module for python is not installed & jupyter notebooks will not be supported', file=stderr) retcode = call(['cm', 'test', 'script']) exit(retcode) diff --git a/script/test-cm-core/src/test_search_speed.py b/script/test-cm-core/src/test_search_speed.py index 3086a83408..c140e19df3 100644 --- a/script/test-cm-core/src/test_search_speed.py +++ b/script/test-cm-core/src/test_search_speed.py @@ -1,11 +1,11 @@ import cmind as cm import time - +import logging times = [] steps = 10 -print ('Running search with tags {} times ...'.format(steps)) +logging.info ('Running search with tags {} times ...'.format(steps)) for step in range(steps): @@ -22,4 +22,4 @@ step = 0 for t in times: step += 1 - print ("{}) {:0.3f} sec.".format(step, t)) + logging.info ("{}) {:0.3f} sec.".format(step, t)) diff --git a/script/test-cm-script-pipeline/customize.py b/script/test-cm-script-pipeline/customize.py index 89311a22fa..bc13dd856a 100644 --- a/script/test-cm-script-pipeline/customize.py +++ b/script/test-cm-script-pipeline/customize.py @@ -2,12 +2,12 @@ from cmind import utils import os - +import logging def preprocess(i): - print ('') - print ('customize.py: preprocess') - print ('') + logging.info ('') + logging.info ('customize.py: preprocess') + logging.info ('') return {'return':0} @@ -17,9 +17,9 @@ def postprocess(i): run_script_input = i['run_script_input'] env = i['env'] - print ('') - print ('customize.py: postprocess') - print ('') + logging.info ('') + logging.info ('customize.py: postprocess') + logging.info ('') r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'run2'}) if r['return']>0: @@ -30,9 +30,9 @@ def postprocess(i): def detect_version(i): - print ('') - print ('customize.py: detect_version') - print ('') + logging.info ('') + logging.info ('customize.py: detect_version') + logging.info ('') return {'return':0} diff --git a/script/test-debug/_demo.py b/script/test-debug/_demo.py index 781bed321b..bab8c3cf4e 100644 --- a/script/test-debug/_demo.py +++ b/script/test-debug/_demo.py @@ -2,8 +2,8 @@ import cmind import sys - -print(sys.executable) +import logging +logging.info(sys.executable) r = cmind.access('run "cm-debug"') -print(r) +logging.info(r) diff --git a/script/test-debug/customize.py b/script/test-debug/customize.py index a3af87f648..2e6a824253 100644 --- a/script/test-debug/customize.py +++ b/script/test-debug/customize.py @@ -1,27 +1,27 @@ # Developer(s): Grigori Fursin import os - +import logging def preprocess(i): os_info = i['os_info'] env = i['env'] meta = i['meta'] - print ("********************************************************") - print ('- Importing CM library ...') + logging.info ("********************************************************") + logging.info ('- Importing CM library ...') import cmind - print (' SUCCESS!') + logging.info (' SUCCESS!') cmind.utils.debug_here(__file__, port=5678, text='Debugging customize.py!', env=env, env_debug_uid='8d96cd9fa4734204').breakpoint() - print ('') - print ('- List CM repos ...') - print ('') + logging.info ('') + logging.info ('- List CM repos ...') + logging.info ('') r = cmind.access({'action':'show', 'automation':'repo', 'out':'con'}) - print ('') - print (' SUCCESS!') - print ("********************************************************") + logging.info ('') + logging.info (' SUCCESS!') + logging.info ("********************************************************") return {'return':0} diff --git a/script/test-debug/python/main.py b/script/test-debug/python/main.py index 0dfdf30f56..fce122be68 100644 --- a/script/test-debug/python/main.py +++ b/script/test-debug/python/main.py @@ -3,23 +3,23 @@ # Developer(s): Grigori Fursin """ - +import logging import os import json -print ("Hello World 1") +logging.info ("Hello World 1") env = os.environ import json -print ('') -print (json.dumps(dict(env), indent=2)) +logging.info ('') +logging.info (json.dumps(dict(env), indent=2)) # Import cmind to test break points import cmind.utils if os.environ.get('CM_TMP_DEBUG_UID', '') == '45a7c3a500d24a63': cmind.utils.debug_here(__file__, port=5678, text='Debugging main.py!').breakpoint() -print ('') -print ("Hello World 2") +logging.info ('') +logging.info ("Hello World 2") diff --git a/script/truncate-mlperf-inference-accuracy-log/customize.py b/script/truncate-mlperf-inference-accuracy-log/customize.py index d13d504ff8..fcc5978153 100644 --- a/script/truncate-mlperf-inference-accuracy-log/customize.py +++ b/script/truncate-mlperf-inference-accuracy-log/customize.py @@ -3,7 +3,7 @@ import os import subprocess from os.path import exists - +import logging def preprocess(i): os_info = i['os_info'] @@ -11,7 +11,7 @@ def preprocess(i): submission_dir = env.get("CM_MLPERF_INFERENCE_SUBMISSION_DIR", "") if submission_dir == "": - print("Please set CM_MLPERF_INFERENCE_SUBMISSION_DIR") + logging.info("Please set CM_MLPERF_INFERENCE_SUBMISSION_DIR") return {'return': 1, 'error':'CM_MLPERF_INFERENCE_SUBMISSION_DIR is not specified in env in run-mlperf-accuracy-log-truncator'} submitter = env.get("CM_MLPERF_SUBMITTER", "CTuning") diff --git a/setup.py b/setup.py index 45f577ed51..ab3d300087 100644 --- a/setup.py +++ b/setup.py @@ -7,6 +7,7 @@ import importlib.util import platform import os +import logging class CustomInstallCommand(install): def run(self): @@ -53,7 +54,7 @@ def install_system_packages(self): subprocess.check_call(['sudo', 'apt-get', 'update']) subprocess.check_call(['sudo', 'apt-get', 'install', '-y'] + packages) elif self.system == 'Windows': - print(f"Please install the following packages manually: {packages}") + logging.info(f"Please install the following packages manually: {packages}") @@ -98,7 +99,7 @@ def custom_function(self): import cmind #r = cmind.access({'action':'rm', 'automation':'repo', 'data_uoa':'mlcommons@cm4mlops', 'force': True}) r = cmind.access({'action':'pull', 'automation':'repo', 'artifact':'mlcommons@cm4mlops', 'branch': 'mlperf-inference'}) - print(r) + logging.debug(r) if r['return'] > 0: return r['return'] diff --git a/tests/test_cm.py b/tests/test_cm.py index 41fb402c22..cb3ea2c5c3 100644 --- a/tests/test_cm.py +++ b/tests/test_cm.py @@ -1,3 +1,4 @@ +import logging try: import cmind as cm @@ -9,6 +10,6 @@ except ImportError as e: from sys import stderr from subprocess import call - print('WARNING: CM module for python is not installed & jupyter notebooks will not be supported', file=stderr) + logging.info('WARNING: CM module for python is not installed & jupyter notebooks will not be supported', file=stderr) retcode = call(['cm', 'test', 'script']) exit(retcode) diff --git a/tests/test_search_speed.py b/tests/test_search_speed.py index 3086a83408..c140e19df3 100644 --- a/tests/test_search_speed.py +++ b/tests/test_search_speed.py @@ -1,11 +1,11 @@ import cmind as cm import time - +import logging times = [] steps = 10 -print ('Running search with tags {} times ...'.format(steps)) +logging.info ('Running search with tags {} times ...'.format(steps)) for step in range(steps): @@ -22,4 +22,4 @@ step = 0 for t in times: step += 1 - print ("{}) {:0.3f} sec.".format(step, t)) + logging.info ("{}) {:0.3f} sec.".format(step, t)) From 273e8cd5bdac280bc915150e9062d4c60af63826 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 29 Jun 2024 22:53:31 +0100 Subject: [PATCH 2/3] Update module.py --- automation/script/module.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automation/script/module.py b/automation/script/module.py index 0860ee70d7..79cc89e070 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -3792,7 +3792,7 @@ def find_artifact(self, i): if extra_paths[extra_path] not in env: env[extra_paths[extra_path]] = [] env[extra_paths[extra_path]].append(epath) - logging.info () + logging.info (recursion_spaces + ' # Found artifact in {}'.format(file_path)) if env_path_key != '': From ed7482363dc3571188f92cdd4e2aae4f66ae024a Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sat, 29 Jun 2024 23:02:04 +0100 Subject: [PATCH 3/3] Update customize.py --- script/get-onnxruntime-prebuilt/customize.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/script/get-onnxruntime-prebuilt/customize.py b/script/get-onnxruntime-prebuilt/customize.py index 14996f0869..05025a7e0e 100644 --- a/script/get-onnxruntime-prebuilt/customize.py +++ b/script/get-onnxruntime-prebuilt/customize.py @@ -29,9 +29,7 @@ def preprocess(i): URL = 'https://github.com/microsoft/onnxruntime/releases/download/v{}/{}'.format(version, FILENAME) - logging.infot ('') logging.info ('Downloading from {}'.format(URL)) - logging.info ('') env['FOLDER'] = FOLDER env['FILENAME'] = FILENAME