diff --git a/cm-mlops/automation/docs/_cm.json b/cm-mlops/automation/docs/_cm.json
new file mode 100644
index 0000000000..6945baccaf
--- /dev/null
+++ b/cm-mlops/automation/docs/_cm.json
@@ -0,0 +1,9 @@
+{
+ "alias": "docs",
+ "automation_alias": "automation",
+ "automation_uid": "bbeb15d8f0a944a4",
+ "tags": [
+ "automation"
+ ],
+ "uid": "9558c9e6ca124065"
+}
diff --git a/cm-mlops/automation/docs/module.py b/cm-mlops/automation/docs/module.py
new file mode 100644
index 0000000000..be8d6e7b1d
--- /dev/null
+++ b/cm-mlops/automation/docs/module.py
@@ -0,0 +1,52 @@
+import os
+
+from cmind.automation import Automation
+from cmind import utils
+
+class CAutomation(Automation):
+ """
+ Automation actions
+ """
+
+ ############################################################
+ def __init__(self, cmind, automation_file):
+ super().__init__(cmind, __file__)
+
+ ############################################################
+ def test(self, i):
+ """
+ Test automation
+
+ Args:
+ (CM input dict):
+
+ (out) (str): if 'con', output to console
+
+ automation (str): automation as CM string object
+
+ parsed_automation (list): prepared in CM CLI or CM access function
+ [ (automation alias, automation UID) ] or
+ [ (automation alias, automation UID), (automation repo alias, automation repo UID) ]
+
+ (artifact) (str): artifact as CM string object
+
+ (parsed_artifact) (list): prepared in CM CLI or CM access function
+ [ (artifact alias, artifact UID) ] or
+ [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
+
+ ...
+
+ Returns:
+ (CM return dict):
+
+ * return (int): return code == 0 if no error and >0 if error
+ * (error) (str): error string if return>0
+
+ * Output from this automation action
+
+ """
+
+ import json
+ print (json.dumps(i, indent=2))
+
+ return {'return':0}
diff --git a/cm-mlops/automation/experiment/README-extra.md b/cm-mlops/automation/experiment/README-extra.md
index b209b9e1c1..c098acc14e 100644
--- a/cm-mlops/automation/experiment/README-extra.md
+++ b/cm-mlops/automation/experiment/README-extra.md
@@ -37,7 +37,7 @@ The goal is to provide a common interface to run, record, share, visualize and r
on any platform with any software, hardware and data.
The community helped us test a prototype of our "experiment" automation to record results in a unified CM format
-from [several MLPerf benchmarks](https://github.com/mlcommons/ck_mlperf_results)
+from [several MLPerf benchmarks](https://github.com/mlcommons/cm4mlperf-results)
including [MLPerf inference](https://github.com/mlcommons/inference) and [MLPerf Tiny](https://github.com/mlcommons/tiny),
visualize them at the [MLCommons CM platform](https://access.cknowledge.org/playground/?action=experiments&tags=all),
and improve them by the community via [public benchmarking, optimization and reproducibility challenges](https://access.cknowledge.org/playground/?action=challenges).
diff --git a/cm-mlops/automation/script/README-specs.md b/cm-mlops/automation/script/README-specs.md
index daeafc2062..58526d1687 100644
--- a/cm-mlops/automation/script/README-specs.md
+++ b/cm-mlops/automation/script/README-specs.md
@@ -55,7 +55,7 @@ Sometimes it is difficult to add all variations needed for a script like say `ba
* `--input` is automatically converted to `CM_INPUT` env key
* `version` is converted to `CM_VERSION`, ``version_min` to `CM_VERSION_MIN` and `version_max` to `CM_VERSION_MAX`
* If `env['CM_GH_TOKEN']=TOKEN_VALUE` is set then git URLs (specified by `CM_GIT_URL`) are changed to add this token.
-* If `env['CM_GIT_SSH']=yes`, then git URLs are chnged to SSH from HTTPS.
+* If `env['CM_GIT_SSH']=yes`, then git URLs are changed to SSH from HTTPS.
### Script Meta
#### Special keys in script meta
diff --git a/cm-mlops/automation/script/module.py b/cm-mlops/automation/script/module.py
index 88710fed07..22ff9c5da3 100644
--- a/cm-mlops/automation/script/module.py
+++ b/cm-mlops/automation/script/module.py
@@ -43,7 +43,7 @@ def __init__(self, cmind, automation_file):
self.tmp_file_run_env = 'tmp-run-env.out'
self.tmp_file_ver = 'tmp-ver.out'
- self.__version__ = "1.1.6"
+ self.__version__ = "1.2.1"
self.local_env_keys = ['CM_VERSION',
'CM_VERSION_MIN',
@@ -155,7 +155,8 @@ def run(self, i):
(verbose) (bool): if True, prints all tech. info about script execution (False by default)
(v) (bool): the same as verbose
- (time) (bool): if True, print script execution time (on if verbose == True)
+ (time) (bool): if True, print script execution time (or if verbose == True)
+ (space) (bool): if True, print used disk space for this script (or if verbose == True)
(ignore_script_error) (bool): if True, ignore error code in native tools and scripts
and finish a given CM script. Useful to test/debug partial installations
@@ -172,6 +173,7 @@ def run(self, i):
(repro_prefix) (str): if !='', use it to record above files {repro-prefix)-input.json ...
(repro_dir) (str): if !='', use this directory to dump info
+ (script_call_prefix) (str): how to call script in logs and READMEs (cm run script)
...
Returns:
@@ -201,6 +203,7 @@ def _run(self, i):
from cmind import utils
import copy
import time
+ import shutil
# Check if save input/output to file
repro = i.get('repro', False)
@@ -222,8 +225,9 @@ def _run(self, i):
recursion = i.get('recursion', False)
# If first script run, check if can write to current directory
- if not recursion and not can_write_to_current_directory():
- return {'return':1, 'error':'Current directory "{}" is not writable - please change it'.format(os.getcwd())}
+ if not recursion and not i.get('skip_write_test', False):
+ if not can_write_to_current_directory():
+ return {'return':1, 'error':'Current directory "{}" is not writable - please change it'.format(os.getcwd())}
recursion_int = int(i.get('recursion_int',0))+1
@@ -308,6 +312,10 @@ def _run(self, i):
env['CM_VERBOSE']='yes'
show_time = i.get('time', False)
+ show_space = i.get('space', False)
+
+ if not recursion and show_space:
+ start_disk_stats = shutil.disk_usage("/")
extra_recursion_spaces = ' '# if verbose else ''
@@ -470,7 +478,9 @@ def _run(self, i):
# print (recursion_spaces + '* Running ' + cm_script_info)
- cm_script_info = 'cm run script '
+ cm_script_info = i.get('script_call_prefix', '').strip()
+ if cm_script_info == '': cm_script_info = 'cm run script'
+ if not cm_script_info.endswith(' '): cm_script_info+=' '
x = '"'
y = ' '
@@ -682,6 +692,16 @@ def _run(self, i):
meta = script_artifact.meta
path = script_artifact.path
+ # Check path to repo
+ script_repo_path = script_artifact.repo_path
+
+ script_repo_path_with_prefix = script_artifact.repo_path
+ if script_artifact.repo_meta.get('prefix', '') != '':
+ script_repo_path_with_prefix = os.path.join(script_repo_path, script_artifact.repo_meta['prefix'])
+
+ env['CM_TMP_CURRENT_SCRIPT_REPO_PATH'] = script_repo_path
+ env['CM_TMP_CURRENT_SCRIPT_REPO_PATH_WITH_PREFIX'] = script_repo_path_with_prefix
+
# Check if has --help
if i.get('help',False):
return utils.call_internal_module(self, __file__, 'module_help', 'print_help', {'meta':meta, 'path':path})
@@ -1716,6 +1736,16 @@ def _run(self, i):
if verbose or show_time:
print (recursion_spaces+' - running time of script "{}": {:.2f} sec.'.format(','.join(found_script_tags), elapsed_time))
+
+ if not recursion and show_space:
+ stop_disk_stats = shutil.disk_usage("/")
+
+ used_disk_space_in_mb = int((start_disk_stats.free - stop_disk_stats.free) / (1024*1024))
+
+ if used_disk_space_in_mb > 0:
+ print (recursion_spaces+' - used disk space: {} MB'.format(used_disk_space_in_mb))
+
+
# Check if pause (useful if running a given script in a new terminal that may close automatically)
if i.get('pause', False):
print ('')
@@ -1725,13 +1755,17 @@ def _run(self, i):
print_env_at_the_end = meta.get('print_env_at_the_end',{})
if len(print_env_at_the_end)>0:
print ('')
- for p in print_env_at_the_end:
+
+ for p in sorted(print_env_at_the_end):
t = print_env_at_the_end[p]
+ if t == '': t = 'ENV[{}]'.format(p)
v = new_env.get(p, None)
print ('{}: {}'.format(t, str(v)))
+ print ('')
+
return rr
######################################################################################
@@ -2372,7 +2406,8 @@ def add(self, i):
parsed_artifact = i.get('parsed_artifact',[])
- artifact_obj = parsed_artifact[0] if len(parsed_artifact)>0 else ('','')
+ artifact_obj = parsed_artifact[0] if len(parsed_artifact)>0 else None
+ artifact_repo = parsed_artifact[1] if len(parsed_artifact)>1 else None
script_name = ''
if 'script_name' in i:
@@ -2473,6 +2508,13 @@ def add(self, i):
ii['automation']='script,5b4e0237da074764'
+ for k in ['parsed_automation', 'parsed_artifact']:
+ if k in ii: del ii[k]
+
+ if artifact_repo != None:
+ artifact = ii.get('artifact','')
+ ii['artifact'] = utils.assemble_cm_object2(artifact_repo) + ':' + artifact
+
r_obj=self.cmind.access(ii)
if r_obj['return']>0: return r_obj
diff --git a/cm-mlops/automation/script/module_misc.py b/cm-mlops/automation/script/module_misc.py
index 6931232f3d..91b7873ae9 100644
--- a/cm-mlops/automation/script/module_misc.py
+++ b/cm-mlops/automation/script/module_misc.py
@@ -1122,7 +1122,7 @@ def update_path_for_docker(path, mounts, force_path_target=''):
path_target='/cm-mount'+path_target if force_path_target=='' else force_path_target
# If file, mount directory
- if os.path.isfile(path):
+ if os.path.isfile(path) or not os.path.isdir(path):
x = os.path.dirname(path_orig) + ':' + os.path.dirname(path_target)
else:
x = path_orig + ':' + path_target
@@ -1274,8 +1274,8 @@ def rebuild_flags(i_run_cmd, fake_run, skip_input_for_fake_run, add_quotes_to_ke
else:
run_cmd+=' --'+long_key+'='+q+str(v)+q
- return run_cmd
-
+ return run_cmd
+
run_cmd += rebuild_flags(i_run_cmd, fake_run, skip_input_for_fake_run, add_quotes_to_keys, '')
run_cmd = docker_run_cmd_prefix + ' && ' + run_cmd if docker_run_cmd_prefix!='' else run_cmd
@@ -1292,7 +1292,11 @@ def aux_search(i):
inp = i['input']
repos = inp.get('repos','')
- if repos == '': repos='internal,a4705959af8e447a'
+# Grigori Fursin remarked on 20240412 because this line prevents
+# from searching for scripts in other public or private repositories.
+# Not sure why we enforce just 2 repositories
+#
+# if repos == '': repos='internal,a4705959af8e447a'
parsed_artifact = inp.get('parsed_artifact',[])
@@ -1360,9 +1364,12 @@ def dockerfile(i):
cur_dir = os.getcwd()
+ quiet = i.get('quiet', False)
+
console = i.get('out') == 'con'
cm_repo = i.get('docker_cm_repo', 'mlcommons@ck')
+ cm_repo_flags = i.get('docker_cm_repo_flags', '')
# Search for script(s)
r = aux_search({'self_module': self_module, 'input': i})
@@ -1466,16 +1473,26 @@ def dockerfile(i):
docker_os = i.get('docker_os', docker_settings.get('docker_os', 'ubuntu'))
docker_os_version = i.get('docker_os_version', docker_settings.get('docker_os_version', '22.04'))
+ docker_cm_repos = i.get('docker_cm_repos', docker_settings.get('cm_repos', ''))
+
+ docker_extra_sys_deps = i.get('docker_extra_sys_deps', '')
+
if not docker_base_image:
dockerfilename_suffix = docker_os +'_'+docker_os_version
else:
- dockerfilename_suffix = docker_base_image.split("/")
- dockerfilename_suffix = dockerfilename_suffix[len(dockerfilename_suffix) - 1]
+ if os.name == 'nt':
+ dockerfilename_suffix = docker_base_image.replace('/', '-').replace(':','-')
+ else:
+ dockerfilename_suffix = docker_base_image.split("/")
+ dockerfilename_suffix = dockerfilename_suffix[len(dockerfilename_suffix) - 1]
fake_run_deps = i.get('fake_run_deps', docker_settings.get('fake_run_deps', False))
docker_run_final_cmds = docker_settings.get('docker_run_final_cmds', [])
- gh_token = i.get('docker_gh_token')
+ r = check_gh_token(i, docker_settings, quiet)
+ if r['return'] >0 : return r
+ gh_token = r['gh_token']
+ i['docker_gh_token'] = gh_token # To pass to docker function if needed
if i.get('docker_real_run', docker_settings.get('docker_real_run',False)):
fake_run_option = " "
@@ -1487,7 +1504,11 @@ def dockerfile(i):
env['CM_DOCKER_PRE_RUN_COMMANDS'] = docker_run_final_cmds
- dockerfile_path = os.path.join(script_path,'dockerfiles', dockerfilename_suffix +'.Dockerfile')
+ docker_path = i.get('docker_path', '').strip()
+ if docker_path == '':
+ docker_path = script_path
+
+ dockerfile_path = os.path.join(docker_path, 'dockerfiles', dockerfilename_suffix +'.Dockerfile')
if i.get('print_deps'):
cm_input = {'action': 'run',
@@ -1510,35 +1531,43 @@ def dockerfile(i):
comments = []
cm_docker_input = {'action': 'run',
- 'automation': 'script',
- 'tags': 'build,dockerfile',
- 'cm_repo': cm_repo,
- 'docker_base_image': docker_base_image,
- 'docker_os': docker_os,
- 'docker_os_version': docker_os_version,
- 'file_path': dockerfile_path,
- 'fake_run_option': fake_run_option,
- 'comments': comments,
- 'run_cmd': f'{run_cmd} --quiet',
- 'script_tags': f'{tag_string}',
- 'copy_files': docker_copy_files,
- 'quiet': True,
- 'env': env,
- 'dockerfile_env': dockerfile_env,
- 'v': i.get('v', False),
- 'fake_docker_deps': fake_run_deps,
- 'print_deps': True,
- 'real_run': True
- }
-
- if gh_token:
+ 'automation': 'script',
+ 'tags': 'build,dockerfile',
+ 'cm_repo': cm_repo,
+ 'cm_repo_flags': cm_repo_flags,
+ 'docker_base_image': docker_base_image,
+ 'docker_os': docker_os,
+ 'docker_os_version': docker_os_version,
+ 'file_path': dockerfile_path,
+ 'fake_run_option': fake_run_option,
+ 'comments': comments,
+ 'run_cmd': f'{run_cmd} --quiet',
+ 'script_tags': f'{tag_string}',
+ 'copy_files': docker_copy_files,
+ 'quiet': True,
+ 'env': env,
+ 'dockerfile_env': dockerfile_env,
+ 'v': i.get('v', False),
+ 'fake_docker_deps': fake_run_deps,
+ 'print_deps': True,
+ 'real_run': True
+ }
+
+ if docker_cm_repos != '':
+ cm_docker_input['cm_repos'] = docker_cm_repos
+
+ if gh_token != '':
cm_docker_input['gh_token'] = gh_token
+ if docker_extra_sys_deps != '':
+ cm_docker_input['extra_sys_deps'] = docker_extra_sys_deps
+
r = self_module.cmind.access(cm_docker_input)
if r['return'] > 0:
return r
- print("Dockerfile generated at "+dockerfile_path)
+ print ('')
+ print ("Dockerfile generated at " + dockerfile_path)
return {'return':0}
@@ -1567,13 +1596,9 @@ def docker(i):
(out) (str): if 'con', output to console
- parsed_artifact (list): prepared in CM CLI or CM access function
- [ (artifact alias, artifact UID) ] or
- [ (artifact alias, artifact UID), (artifact repo alias, artifact repo UID) ]
-
- (repos) (str): list of repositories to search for automations (internal & mlcommons@ck by default)
-
- (output_dir) (str): output directory (./ by default)
+ (docker_path) (str): where to create or find Dockerfile
+ (docker_gh_token) (str): GitHub token for private repositories
+ (docker_save_script) (str): if !='' name of script to save docker command
Returns:
(CM return dict):
@@ -1586,6 +1611,8 @@ def docker(i):
import copy
import re
+ quiet = i.get('quiet', False)
+
detached = i.get('docker_detached', '')
if detached=='':
detached = i.get('docker_dt', '')
@@ -1640,11 +1667,10 @@ def docker(i):
if 'CM_DOCKER_CACHE' not in env:
env['CM_DOCKER_CACHE'] = docker_cache
- image_repo = i.get('image_repo','')
+ image_repo = i.get('docker_image_repo','')
if image_repo == '':
image_repo = 'cknowledge'
-
for artifact in sorted(lst, key = lambda x: x.meta.get('alias','')):
meta = artifact.meta
@@ -1722,7 +1748,6 @@ def docker(i):
mounts.append(key)
# Updating environment variables from CM input based on input_mapping from meta
-
input_mapping = meta.get('input_mapping', {})
for c_input in input_mapping:
@@ -1789,26 +1814,41 @@ def docker(i):
#check for proxy settings and pass onto the docker
proxy_keys = [ "ftp_proxy", "FTP_PROXY", "http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY", "no_proxy", "NO_PROXY", "socks_proxy", "SOCKS_PROXY", "GH_TOKEN" ]
+
if env.get('+ CM_DOCKER_BUILD_ARGS', []) == []:
env['+ CM_DOCKER_BUILD_ARGS'] = []
+
for key in proxy_keys:
if os.environ.get(key, '') != '':
value = os.environ[key]
container_env_string += " --env.{}={} ".format(key, value)
env['+ CM_DOCKER_BUILD_ARGS'].append("{}={}".format(key, value))
+ docker_use_host_group_id = i.get('docker_use_host_group_id', docker_settings.get('use_host_group_id'))
+ if docker_use_host_group_id and os.name != 'nt':
+ env['+ CM_DOCKER_BUILD_ARGS'].append("{}={}".format('CM_ADD_DOCKER_GROUP_ID', '\\"-g $(id -g $USER) -o\\"'))
+
docker_base_image = i.get('docker_base_image', docker_settings.get('base_image'))
docker_os = i.get('docker_os', docker_settings.get('docker_os', 'ubuntu'))
docker_os_version = i.get('docker_os_version', docker_settings.get('docker_os_version', '22.04'))
+
if not docker_base_image:
dockerfilename_suffix = docker_os +'_'+docker_os_version
else:
- dockerfilename_suffix = docker_base_image.split("/")
- dockerfilename_suffix = dockerfilename_suffix[len(dockerfilename_suffix) - 1]
+ if os.name == 'nt':
+ dockerfilename_suffix = docker_base_image.replace('/', '-').replace(':','-')
+ else:
+ dockerfilename_suffix = docker_base_image.split("/")
+ dockerfilename_suffix = dockerfilename_suffix[len(dockerfilename_suffix) - 1]
+
cm_repo=i.get('docker_cm_repo', 'mlcommons@ck')
- dockerfile_path = os.path.join(script_path,'dockerfiles', dockerfilename_suffix +'.Dockerfile')
+ docker_path = i.get('docker_path', '').strip()
+ if docker_path == '':
+ docker_path = script_path
+
+ dockerfile_path = os.path.join(docker_path, 'dockerfiles', dockerfilename_suffix +'.Dockerfile')
docker_skip_run_cmd = i.get('docker_skip_run_cmd', docker_settings.get('skip_run_cmd', False)) #skips docker run cmd and gives an interactive shell to the user
@@ -1820,7 +1860,10 @@ def docker(i):
device = i.get('docker_device', docker_settings.get('device'))
- gh_token = i.get('docker_gh_token')
+ r = check_gh_token(i, docker_settings, quiet)
+ if r['return'] >0 : return r
+ gh_token = r['gh_token']
+
port_maps = i.get('docker_port_maps', docker_settings.get('port_maps', []))
@@ -1833,7 +1876,7 @@ def docker(i):
if interactive == '':
interactive = docker_settings.get('interactive', '')
-
+
# # Regenerate run_cmd
# if i.get('cmd'):
# run_cmd = "cm run script " + " ".join( a for a in i['cmd'] if not a.startswith('--docker_') )
@@ -1845,8 +1888,7 @@ def docker(i):
# run_cmd = ""
-
-
+
r = regenerate_script_cmd({'script_uid':script_uid,
'script_alias':script_alias,
'tags':tags,
@@ -1862,12 +1904,15 @@ def docker(i):
if docker_settings.get('mount_current_dir','')=='yes':
run_cmd = 'cd '+current_path_target+' && '+run_cmd
+ final_run_cmd = run_cmd if docker_skip_run_cmd not in [ 'yes', True, 'True' ] else 'cm version'
+
print ('')
print ('CM command line regenerated to be used inside Docker:')
print ('')
- print (run_cmd)
+ print (final_run_cmd)
print ('')
+
cm_docker_input = {'action': 'run',
'automation': 'script',
'tags': 'run,docker,container',
@@ -1884,7 +1929,7 @@ def docker(i):
# 'image_tag': script_alias,
'detached': detached,
'script_tags': f'{tag_string}',
- 'run_cmd': run_cmd if docker_skip_run_cmd not in [ 'yes', True, 'True' ] else 'echo "cm version"',
+ 'run_cmd': final_run_cmd,
'v': i.get('v', False),
'quiet': True,
'pre_run_cmds': docker_pre_run_cmds,
@@ -1902,7 +1947,7 @@ def docker(i):
if device:
cm_docker_input['device'] = device
- if gh_token:
+ if gh_token != '':
cm_docker_input['gh_token'] = gh_token
if port_maps:
@@ -1914,8 +1959,10 @@ def docker(i):
if extra_run_args != '':
cm_docker_input['extra_run_args'] = extra_run_args
- print ('')
+ if i.get('docker_save_script', ''):
+ cm_docker_input['save_script'] = i['docker_save_script']
+ print ('')
r = self_module.cmind.access(cm_docker_input)
if r['return'] > 0:
@@ -1923,3 +1970,21 @@ def docker(i):
return {'return':0}
+
+############################################################
+def check_gh_token(i, docker_settings, quiet):
+ gh_token = i.get('docker_gh_token', '')
+
+ if docker_settings.get('gh_token_required', False) and gh_token == '':
+ rx = {'return':1, 'error':'GH token is required but not provided. Use --docker_gh_token to set it'}
+
+ if quiet:
+ return rx
+
+ print ('')
+ gh_token = input ('Enter GitHub token to access private CM repositories required for this CM script: ')
+
+ if gh_token == '':
+ return rx
+
+ return {'return':0, 'gh_token': gh_token}
diff --git a/cm-mlops/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml b/cm-mlops/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml
new file mode 100644
index 0000000000..50086d0862
--- /dev/null
+++ b/cm-mlops/cfg/benchmark-run-mlperf-inference-v4.0/_cm.yaml
@@ -0,0 +1,38 @@
+alias: benchmark-run-mlperf-inference-v4.0
+uid: b4ee9b6c820e493a
+
+automation_alias: cfg
+automation_uid: 88dce9c160324c5d
+
+tags:
+- benchmark
+- run
+- mlperf
+- inference
+- v4.0
+
+name: "MLPerf inference - v4.0"
+
+supported_compute:
+- ee8c568e0ac44f2b
+- fe379ecd1e054a00
+
+bench_uid: 39877bb63fb54725
+
+view_dimensions:
+- - input.device
+ - "MLPerf device"
+- - input.implementation
+ - "MLPerf implementation"
+- - input.backend
+ - "MLPerf backend"
+- - input.model
+ - "MLPerf model"
+- - input.scenario
+ - "MLPerf scenario"
+- - input.host_os
+ - "Host OS"
+- - output.state.cm-mlperf-inference-results-last.performance
+ - "Got performance"
+- - output.state.cm-mlperf-inference-results-last.accuracy
+ - "Got accuracy"
diff --git a/cm-mlops/challenge/add-derived-metrics-to-mlperf-inference-v3.1/README.md b/cm-mlops/challenge/add-derived-metrics-to-mlperf-inference-v3.1/README.md
index 516e9b0695..130cebd5aa 100644
--- a/cm-mlops/challenge/add-derived-metrics-to-mlperf-inference-v3.1/README.md
+++ b/cm-mlops/challenge/add-derived-metrics-to-mlperf-inference-v3.1/README.md
@@ -1,6 +1,6 @@
### Challenge
-Check past MLPerf inference results in [this MLCommons repository](https://github.com/mlcommons/ck_mlperf_results)
+Check past MLPerf inference results in [this MLCommons repository](https://github.com/mlcommons/cm4mlperf-results)
and add derived metrics such as result/No of cores, power efficiency, device cost, operational costs, etc.
Add clock speed as a third dimension to graphs and improve Bar graph visualization.
@@ -30,6 +30,6 @@ Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn
### Results
All accepted results will be publicly available in the CM format with derived metrics
-in this [MLCommons repository](https://github.com/mlcommons/ck_mlperf_results),
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
and at official [MLCommons website](https://mlcommons.org).
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.0-2023/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.0-2023/README.md
index 5e419d14f2..daac203cec 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.0-2023/README.md
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.0-2023/README.md
@@ -29,7 +29,7 @@ Official results:
* https://github.com/mlcommons/inference_results_v3.0/tree/main/open/cTuning
Results in the MLCommons CK/CM format:
-* https://github.com/ctuning/ck_mlperf_results
+* https://github.com/mlcommons/cm4mlperf-results
Visualization and comparison with derived metrics:
* [MLCommons Collective Knowledge Playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-inference,v3.0).
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-2023/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-2023/README.md
index 02c1fef747..b365ac78d5 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-2023/README.md
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-2023/README.md
@@ -82,6 +82,6 @@ with PRs from participants [here](https://github.com/ctuning/mlperf_inference_su
### Results
All accepted results will be publicly available in the CM format with derived metrics
-in this [MLCommons repository](https://github.com/mlcommons/ck_mlperf_results),
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
and at official [MLCommons website](https://mlcommons.org).
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/README.md
index 4baa492ebd..9169e478f7 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/README.md
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-amazon-inferentia-2023/README.md
@@ -31,6 +31,6 @@ Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn
### Results
All accepted results will be publicly available in the CM format with derived metrics
-in this [MLCommons repository](https://github.com/mlcommons/ck_mlperf_results),
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
and at official [MLCommons website](https://mlcommons.org).
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-deepsparse/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-deepsparse/README.md
index b4b709ecd1..e0222c36e1 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-deepsparse/README.md
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-deepsparse/README.md
@@ -31,6 +31,6 @@ Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn
### Results
All accepted results will be publicly available in the CM format with derived metrics
-in this [MLCommons repository](https://github.com/mlcommons/ck_mlperf_results),
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
and at official [MLCommons website](https://mlcommons.org).
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/README.md
index 70f1f34f1a..a136ea6f93 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/README.md
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-google-tpu-2023/README.md
@@ -32,6 +32,6 @@ Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn
### Results
All accepted results will be publicly available in the CM format with derived metrics
-in this [MLCommons repository](https://github.com/mlcommons/ck_mlperf_results),
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
and at official [MLCommons website](https://mlcommons.org).
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md
index c4d8636579..33a62d56e4 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-intel-2023/README.md
@@ -29,6 +29,6 @@ Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn
### Results
All accepted results will be publicly available in the CM format with derived metrics
-in this [MLCommons repository](https://github.com/mlcommons/ck_mlperf_results),
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
and at official [MLCommons website](https://mlcommons.org).
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/README.md
index 7198895785..94c542d937 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/README.md
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-modular-mojo-2023/README.md
@@ -34,6 +34,6 @@ Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn
### Results
All accepted results will be publicly available in the CM format with derived metrics
-in this [MLCommons repository](https://github.com/mlcommons/ck_mlperf_results),
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
and at official [MLCommons website](https://mlcommons.org).
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/README.md
index a91c819826..e4497e50d1 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/README.md
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-qualcomm-ai100-2023/README.md
@@ -33,6 +33,6 @@ Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn
### Results
All accepted results will be publicly available in the CM format with derived metrics
-in this [MLCommons repository](https://github.com/mlcommons/ck_mlperf_results),
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
and at official [MLCommons website](https://mlcommons.org).
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-tvm-2023/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-tvm-2023/README.md
index fcec854ba3..852b9bd214 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-tvm-2023/README.md
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-tvm-2023/README.md
@@ -41,6 +41,6 @@ This challenge is under preparation.
### Results
All accepted results will be publicly available in the CM format with derived metrics
-in this [MLCommons repository](https://github.com/mlcommons/ck_mlperf_results),
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
and at official [MLCommons website](https://mlcommons.org).
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/README.md
index 014ae17137..c0843a7801 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/README.md
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-universal-cpp-implementation-2023/README.md
@@ -31,6 +31,6 @@ Check [this ACM REP'23 keynote](https://doi.org/10.5281/zenodo.8105339) to learn
### Results
All accepted results will be publicly available in the CM format with derived metrics
-in this [MLCommons repository](https://github.com/mlcommons/ck_mlperf_results),
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
and at official [MLCommons website](https://mlcommons.org).
diff --git a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-windows-2023/README.md b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-windows-2023/README.md
index 179961b5b6..cd79f8ba41 100644
--- a/cm-mlops/challenge/optimize-mlperf-inference-v3.1-windows-2023/README.md
+++ b/cm-mlops/challenge/optimize-mlperf-inference-v3.1-windows-2023/README.md
@@ -36,6 +36,6 @@ Open ticket: [GitHub](https://github.com/mlcommons/ck/issues/696)
### Results
All accepted results will be publicly available in the CM format with derived metrics
-in this [MLCommons repository](https://github.com/mlcommons/ck_mlperf_results),
+in this [MLCommons repository](https://github.com/mlcommons/cm4mlperf-results),
in [MLCommons Collective Knowledge explorer](https://access.cknowledge.org/playground/?action=experiments)
and at official [MLCommons website](https://mlcommons.org).
diff --git a/cm-mlops/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/README.md b/cm-mlops/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/README.md
index 18f17506a8..998c2df36a 100644
--- a/cm-mlops/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/README.md
+++ b/cm-mlops/challenge/reproduce-and-automate-tinymlperf-v1.1-2023/README.md
@@ -36,5 +36,5 @@ in mid June 2023.
### Results
-All results will be available in [this GitHub repo](https://github.com/ctuning/ck_mlperf_results)
+All results will be available in [this GitHub repo](https://github.com/ctuning/cm4mlperf-results)
and can be visualized and compared using the [MLCommons Collective Knowledge Playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny).
diff --git a/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml b/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml
index cad48a78b7..a2cd1994be 100644
--- a/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml
+++ b/cm-mlops/script/app-image-classification-onnx-py/_cm.yaml
@@ -67,6 +67,8 @@ deps:
variations:
cuda:
+ docker:
+ all_gpus: 'yes'
group: target
env:
USE_CUDA: yes
@@ -101,7 +103,6 @@ input_description:
docker:
skip_run_cmd: 'no'
- all_gpus: 'yes'
input_paths:
- input
- env.CM_IMAGE
diff --git a/cm-mlops/script/app-loadgen-generic-python/_cm.yaml b/cm-mlops/script/app-loadgen-generic-python/_cm.yaml
index 27468b32db..08b63927ff 100644
--- a/cm-mlops/script/app-loadgen-generic-python/_cm.yaml
+++ b/cm-mlops/script/app-loadgen-generic-python/_cm.yaml
@@ -204,6 +204,9 @@ variations:
CPUExecutionProvider
cuda:
+ docker:
+ all_gpus: 'yes'
+ base_image: nvcr.io/nvidia/pytorch:24.03-py3
group:
device
env:
@@ -296,14 +299,15 @@ input_description:
docker:
skip_run_cmd: 'no'
- all_gpus: 'yes'
input_paths:
- modelpath
+ - modelsamplepath
- env.CM_ML_MODEL_FILE_WITH_PATH
- env.CM_ML_MODEL_CODE_WITH_PATH
- output_dir
skip_input_for_fake_run:
- modelpath
+ - modelsamplepath
- env.CM_ML_MODEL_FILE_WITH_PATH
- env.CM_ML_MODEL_CODE_WITH_PATH
- output_dir
@@ -314,3 +318,5 @@ docker:
- interop
- execmode
- samples
+ - modelcfg.num_classes
+ - modelcfg.config
diff --git a/cm-mlops/script/app-loadgen-generic-python/customize.py b/cm-mlops/script/app-loadgen-generic-python/customize.py
index 9e15f45a08..c8810dcd7b 100644
--- a/cm-mlops/script/app-loadgen-generic-python/customize.py
+++ b/cm-mlops/script/app-loadgen-generic-python/customize.py
@@ -18,7 +18,7 @@ def preprocess(i):
if env.get('CM_MLPERF_BACKEND', '') != '':
run_opts +=" -b "+env['CM_MLPERF_BACKEND']
-
+
if env.get('CM_MLPERF_RUNNER', '') != '':
run_opts +=" -r "+env['CM_MLPERF_RUNNER']
diff --git a/cm-mlops/script/app-mlperf-inference-mlcommons-python/README.md b/cm-mlops/script/app-mlperf-inference-mlcommons-python/README.md
index 1704ba155f..ed11d669d9 100644
--- a/cm-mlops/script/app-mlperf-inference-mlcommons-python/README.md
+++ b/cm-mlops/script/app-mlperf-inference-mlcommons-python/README.md
@@ -540,6 +540,9 @@ ___
* get,generic-python-lib,_package.open_clip_torch
* CM names: `--adr.['open-clip']...`
- CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib)
+ * get,generic-python-lib,_package.opencv-python
+ * CM names: `--adr.['opencv-python']...`
+ - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib)
* get,generic-python-lib,_package.scipy
* CM names: `--adr.['scipy']...`
- CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib)
diff --git a/cm-mlops/script/app-mlperf-inference-mlcommons-python/_cm.yaml b/cm-mlops/script/app-mlperf-inference-mlcommons-python/_cm.yaml
index 4a2347c4de..f2ca733fd9 100644
--- a/cm-mlops/script/app-mlperf-inference-mlcommons-python/_cm.yaml
+++ b/cm-mlops/script/app-mlperf-inference-mlcommons-python/_cm.yaml
@@ -867,6 +867,9 @@ variations:
- tags: get,generic-python-lib,_package.open_clip_torch
names:
- open-clip
+ - tags: get,generic-python-lib,_package.opencv-python
+ names:
+ - opencv-python
- tags: get,generic-python-lib,_package.scipy
names:
- scipy
diff --git a/cm-mlops/script/app-mlperf-inference/build_dockerfiles.py b/cm-mlops/script/app-mlperf-inference/build_dockerfiles.py
index e14a1794c3..10579d33ea 100644
--- a/cm-mlops/script/app-mlperf-inference/build_dockerfiles.py
+++ b/cm-mlops/script/app-mlperf-inference/build_dockerfiles.py
@@ -92,6 +92,7 @@
if r['return'] > 0:
print(r)
exit(1)
- print("Dockerfile generated at "+dockerfile_path)
+ print ('')
+ print ("Dockerfile generated at " + dockerfile_path)
diff --git a/cm-mlops/script/app-stable-diffusion-onnx-py/_cm.yaml b/cm-mlops/script/app-stable-diffusion-onnx-py/_cm.yaml
index dac863d325..306bebbb5f 100644
--- a/cm-mlops/script/app-stable-diffusion-onnx-py/_cm.yaml
+++ b/cm-mlops/script/app-stable-diffusion-onnx-py/_cm.yaml
@@ -72,6 +72,8 @@ deps:
variations:
cuda:
+ docker:
+ all_gpus: 'yes'
group: target
env:
USE_CUDA: yes
@@ -98,7 +100,6 @@ input_description:
docker:
skip_run_cmd: 'no'
- all_gpus: 'yes'
input_paths:
- output
add_quotes_to_keys:
diff --git a/cm-mlops/script/benchmark-object-detection-loadgen/README.md b/cm-mlops/script/benchmark-object-detection-loadgen/README.md
deleted file mode 100644
index f04cf7ae93..0000000000
--- a/cm-mlops/script/benchmark-object-detection-loadgen/README.md
+++ /dev/null
@@ -1,179 +0,0 @@
-
-Click here to see the table of contents.
-
-* [About](#about)
-* [Summary](#summary)
-* [Reuse this script in your project](#reuse-this-script-in-your-project)
- * [ Install CM automation language](#install-cm-automation-language)
- * [ Check CM script flags](#check-cm-script-flags)
- * [ Run this script from command line](#run-this-script-from-command-line)
- * [ Run this script from Python](#run-this-script-from-python)
- * [ Run this script via GUI](#run-this-script-via-gui)
- * [ Run this script via Docker (beta)](#run-this-script-via-docker-(beta))
-* [Customization](#customization)
- * [ Variations](#variations)
- * [ Default environment](#default-environment)
-* [Script workflow, dependencies and native scripts](#script-workflow-dependencies-and-native-scripts)
-* [Script output](#script-output)
-* [New environment keys (filter)](#new-environment-keys-(filter))
-* [New environment keys auto-detected from customize](#new-environment-keys-auto-detected-from-customize)
-* [Maintainers](#maintainers)
-
-
-
-*Note that this README is automatically generated - don't edit!*
-
-### About
-
-
-See extra [notes](README-extra.md) from the authors and contributors.
-
-#### Summary
-
-* Category: *Benchmark object detection (loadgen, python, ONNX).*
-* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/master/cm-mlops)*
-* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-object-detection-loadgen)*
-* CM meta description for this script: *[_cm.yaml](_cm.yaml)*
-* CM "database" tags to find this script: *benchmark,object-detection,loadgen*
-* Output cached? *False*
-___
-### Reuse this script in your project
-
-#### Install CM automation language
-
-* [Installation guide](https://github.com/mlcommons/ck/blob/master/docs/installation.md)
-* [CM intro](https://doi.org/10.5281/zenodo.8105339)
-
-#### Pull CM repository with this automation
-
-```cm pull repo mlcommons@ck```
-
-
-#### Run this script from command line
-
-1. `cm run script --tags=benchmark,object-detection,loadgen[,variations] `
-
-2. `cmr "benchmark object-detection loadgen[ variations]" `
-
-* `variations` can be seen [here](#variations)
-
-#### Run this script from Python
-
-
-Click here to expand this section.
-
-```python
-
-import cmind
-
-r = cmind.access({'action':'run'
- 'automation':'script',
- 'tags':'benchmark,object-detection,loadgen'
- 'out':'con',
- ...
- (other input keys for this script)
- ...
- })
-
-if r['return']>0:
- print (r['error'])
-
-```
-
-
-
-
-#### Run this script via GUI
-
-```cmr "cm gui" --script="benchmark,object-detection,loadgen"```
-
-Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=benchmark,object-detection,loadgen) to generate CM CMD.
-
-#### Run this script via Docker (beta)
-
-`cm docker script "benchmark object-detection loadgen[ variations]" `
-
-___
-### Customization
-
-
-#### Variations
-
- * *No group (any variation can be selected)*
-
- Click here to expand this section.
-
- * `_cpu`
- - Environment variables:
- - *USE_CPU*: `True`
- - Workflow:
- * `_cuda`
- - Environment variables:
- - *USE_CUDA*: `True`
- - Workflow:
-
-
-
-#### Default environment
-
-
-Click here to expand this section.
-
-These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
-
-
-
-
-___
-### Script workflow, dependencies and native scripts
-
-
-Click here to expand this section.
-
- 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-object-detection-loadgen/_cm.yaml)***
- * detect,os
- - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os)
- * get,sys-utils-cm
- - CM script: [get-sys-utils-cm](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-sys-utils-cm)
- * get,target,device
- - CM script: [get-target-device](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-target-device)
- * get,python3
- * CM names: `--adr.['python', 'python3']...`
- - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3)
- * get,cuda
- * `if (USE_CUDA == True)`
- * CM names: `--adr.['cuda']...`
- - CM script: [get-cuda](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-cuda)
- * get,mlperf,inference,loadgen
- * CM names: `--adr.['inference-src']...`
- - CM script: [get-mlperf-inference-loadgen](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-loadgen)
- * get,ml-model,object-detection
- * CM names: `--adr.['ml-model']...`
- - CM script: [get-ml-model-retinanet](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-ml-model-retinanet)
- * get,generic-python-lib,_onnxruntime
- * `if (USE_CUDA != True)`
- - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib)
- * get,generic-python-lib,_onnxruntime_gpu
- * `if (USE_CUDA == True)`
- - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib)
- 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-object-detection-loadgen/customize.py)***
- 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-object-detection-loadgen/_cm.yaml)
- 1. ***Run native script if exists***
- * [run.bat](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-object-detection-loadgen/run.bat)
- * [run.sh](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-object-detection-loadgen/run.sh)
- 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-object-detection-loadgen/_cm.yaml)
- 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-object-detection-loadgen/customize.py)***
- 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/benchmark-object-detection-loadgen/_cm.yaml)
-
-
-___
-### Script output
-`cmr "benchmark object-detection loadgen[,variations]" -j`
-#### New environment keys (filter)
-
-#### New environment keys auto-detected from customize
-
-___
-### Maintainers
-
-* [Open MLCommons taskforce on automation and reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)
\ No newline at end of file
diff --git a/cm-mlops/script/benchmark-object-detection-loadgen/run.bat b/cm-mlops/script/benchmark-object-detection-loadgen/run.bat
deleted file mode 100644
index 50fb42790d..0000000000
--- a/cm-mlops/script/benchmark-object-detection-loadgen/run.bat
+++ /dev/null
@@ -1,7 +0,0 @@
-rem native script
-
-echo "TBD"
-
-
-%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\\python\\main.py
-IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/cm-mlops/script/build-docker-image/customize.py b/cm-mlops/script/build-docker-image/customize.py
index c749b8f8d9..2cfab08594 100644
--- a/cm-mlops/script/build-docker-image/customize.py
+++ b/cm-mlops/script/build-docker-image/customize.py
@@ -7,9 +7,15 @@ def preprocess(i):
os_info = i['os_info']
env = i['env']
- dockerfile_dir = env.get('CM_DOCKERFILE_WITH_PATH')
- if dockerfile_dir and os.path.exists(dockerfile_dir):
- os.chdir(os.path.dirname(dockerfile_dir))
+ dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '')
+ if dockerfile_path!='' and os.path.exists(dockerfile_path):
+ build_dockerfile = False
+ env['CM_BUILD_DOCKERFILE'] = "no"
+ os.chdir(os.path.dirname(dockerfile_path))
+ else:
+ build_dockerfile = True
+ env['CM_BUILD_DOCKERFILE'] = "yes"
+
CM_DOCKER_BUILD_ARGS = env.get('+ CM_DOCKER_BUILD_ARGS', [])
@@ -23,11 +29,11 @@ def preprocess(i):
env['CM_DOCKER_BUILD_ARGS'] = build_args
- if 'CM_DOCKERFILE_WITH_PATH' not in env or not exists(env['CM_DOCKERFILE_WITH_PATH']):
- env['CM_BUILD_DOCKERFILE'] = "yes"
- else:
- env['CM_BUILD_DOCKERFILE'] = "no"
-
+# if 'CM_DOCKERFILE_WITH_PATH' not in env or not exists(env['CM_DOCKERFILE_WITH_PATH']):
+# env['CM_BUILD_DOCKERFILE'] = "yes"
+# else:
+# env['CM_BUILD_DOCKERFILE'] = "no"
+#
if "CM_DOCKER_IMAGE_REPO" not in env:
env['CM_DOCKER_IMAGE_REPO'] = "local"
@@ -45,4 +51,38 @@ def preprocess(i):
if env.get("CM_DOCKER_CACHE", "yes") == "no":
env["CM_DOCKER_CACHE_ARG"] = " --no-cache"
+ CMD = ''
+ if not build_dockerfile:
+ # Write .dockerignore
+ with open('.dockerignore', 'w') as f:
+ f.write('.git\n')
+
+ # Prepare CMD to build image
+ XCMD = [
+ 'docker build ' + env.get('CM_DOCKER_CACHE_ARG',''),
+ ' ' + build_args,
+ ' -f "' + dockerfile_path + '"',
+ ' -t "' + env.get('CM_DOCKER_IMAGE_REPO', '') + '/' + \
+ env.get('CM_DOCKER_IMAGE_NAME', '') + ':' + \
+ env.get('CM_DOCKER_IMAGE_TAG', '') + '"',
+ ' .'
+ ]
+
+ with open(dockerfile_path + '.build.sh', 'w') as f:
+ f.write(' \\\n'.join(XCMD) + '\n')
+
+ with open(dockerfile_path + '.build.bat', 'w') as f:
+ f.write(' ^\n'.join(XCMD) + '\n')
+
+ CMD = ''.join(XCMD)
+
+ print ('')
+ print ('CM generated the following Docker build command:')
+ print ('')
+ print (CMD)
+
+ print ('')
+
+ env['CM_DOCKER_BUILD_CMD'] = CMD
+
return {'return':0}
diff --git a/cm-mlops/script/build-docker-image/run.bat b/cm-mlops/script/build-docker-image/run.bat
index de5883877e..89a8f6f456 100644
--- a/cm-mlops/script/build-docker-image/run.bat
+++ b/cm-mlops/script/build-docker-image/run.bat
@@ -1,10 +1,12 @@
if exist %CM_DOCKERFILE_WITH_PATH% (
- echo .git > .dockerignore
+rem echo .git > .dockerignore
- echo.
- echo docker build %CM_DOCKER_CACHE_ARG% %CM_DOCKER_BUILD_ARGS% -f %CM_DOCKERFILE_WITH_PATH% -t %CM_DOCKER_IMAGE_REPO%/%CM_DOCKER_IMAGE_NAME%:%CM_DOCKER_IMAGE_TAG% .
+rem echo.
+rem echo docker build %CM_DOCKER_CACHE_ARG% %CM_DOCKER_BUILD_ARGS% -f %CM_DOCKERFILE_WITH_PATH% -t %CM_DOCKER_IMAGE_REPO%/%CM_DOCKER_IMAGE_NAME%:%CM_DOCKER_IMAGE_TAG% .
- echo.
- docker build %CM_DOCKER_CACHE_ARG% %CM_DOCKER_BUILD_ARGS% -f "%CM_DOCKERFILE_WITH_PATH%" -t "%CM_DOCKER_IMAGE_REPO%/%CM_DOCKER_IMAGE_NAME%:%CM_DOCKER_IMAGE_TAG%" .
+rem echo.
+rem docker build %CM_DOCKER_CACHE_ARG% %CM_DOCKER_BUILD_ARGS% -f "%CM_DOCKERFILE_WITH_PATH%" -t "%CM_DOCKER_IMAGE_REPO%/%CM_DOCKER_IMAGE_NAME%:%CM_DOCKER_IMAGE_TAG%" .
+
+ %CM_DOCKER_BUILD_CMD%
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
)
diff --git a/cm-mlops/script/build-docker-image/run.sh b/cm-mlops/script/build-docker-image/run.sh
index 29c4955b3f..a7e19e3189 100644
--- a/cm-mlops/script/build-docker-image/run.sh
+++ b/cm-mlops/script/build-docker-image/run.sh
@@ -1,11 +1,13 @@
#!/bin/bash
if [ -f "${CM_DOCKERFILE_WITH_PATH}" ]; then
- echo ".git" > .dockerignore
+# echo ".git" > .dockerignore
- echo ""
- echo "docker build ${CM_DOCKER_CACHE_ARG} ${CM_DOCKER_BUILD_ARGS} -f ${CM_DOCKERFILE_WITH_PATH} -t ${CM_DOCKER_IMAGE_REPO}/${CM_DOCKER_IMAGE_NAME}:${CM_DOCKER_IMAGE_TAG} ."
+# echo ""
+# echo "docker build ${CM_DOCKER_CACHE_ARG} ${CM_DOCKER_BUILD_ARGS} -f ${CM_DOCKERFILE_WITH_PATH} -t ${CM_DOCKER_IMAGE_REPO}/${CM_DOCKER_IMAGE_NAME}:${CM_DOCKER_IMAGE_TAG} ."
- docker build ${CM_DOCKER_CACHE_ARG} ${CM_DOCKER_BUILD_ARGS} -f "${CM_DOCKERFILE_WITH_PATH}" -t "${CM_DOCKER_IMAGE_REPO}/${CM_DOCKER_IMAGE_NAME}:${CM_DOCKER_IMAGE_TAG}" .
+# docker build ${CM_DOCKER_CACHE_ARG} ${CM_DOCKER_BUILD_ARGS} -f "${CM_DOCKERFILE_WITH_PATH}" -t "${CM_DOCKER_IMAGE_REPO}/${CM_DOCKER_IMAGE_NAME}:${CM_DOCKER_IMAGE_TAG}" .
+
+ eval "${CM_DOCKER_BUILD_CMD}"
test $? -eq 0 || exit 1
fi
diff --git a/cm-mlops/script/build-dockerfile/README.md b/cm-mlops/script/build-dockerfile/README.md
index 5c7aa7eb7a..cf554c5add 100644
--- a/cm-mlops/script/build-dockerfile/README.md
+++ b/cm-mlops/script/build-dockerfile/README.md
@@ -112,11 +112,14 @@ ___
* `--build=value` → `CM_BUILD_DOCKER_IMAGE=value`
* `--cache=value` → `CM_DOCKER_CACHE=value`
* `--cm_repo=value` → `CM_MLOPS_REPO=value`
+* `--cm_repo_flags=value` → `CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO=value`
+* `--cm_repos=value` → `CM_DOCKER_EXTRA_CM_REPOS=value`
* `--comments=value` → `CM_DOCKER_RUN_COMMENTS=value`
* `--copy_files=value` → `CM_DOCKER_COPY_FILES=value`
* `--docker_base_image=value` → `CM_DOCKER_IMAGE_BASE=value`
* `--docker_os=value` → `CM_DOCKER_OS=value`
* `--docker_os_version=value` → `CM_DOCKER_OS_VERSION=value`
+* `--extra_sys_deps=value` → `CM_DOCKER_EXTRA_SYS_DEPS=value`
* `--fake_docker_deps=value` → `CM_DOCKER_FAKE_DEPS=value`
* `--fake_run_option=value` → `CM_DOCKER_FAKE_RUN_OPTION=value`
* `--file_path=value` → `CM_DOCKERFILE_WITH_PATH=value`
diff --git a/cm-mlops/script/build-dockerfile/_cm.json b/cm-mlops/script/build-dockerfile/_cm.json
index 3737cd40c6..0a438e8fd9 100644
--- a/cm-mlops/script/build-dockerfile/_cm.json
+++ b/cm-mlops/script/build-dockerfile/_cm.json
@@ -5,9 +5,12 @@
"category": "Docker automation",
"cache": false,
"input_mapping": {
+ "extra_sys_deps": "CM_DOCKER_EXTRA_SYS_DEPS",
"build": "CM_BUILD_DOCKER_IMAGE",
"cache": "CM_DOCKER_CACHE",
"cm_repo": "CM_MLOPS_REPO",
+ "cm_repo_flags": "CM_DOCKER_ADD_FLAG_TO_CM_MLOPS_REPO",
+ "cm_repos": "CM_DOCKER_EXTRA_CM_REPOS",
"docker_os": "CM_DOCKER_OS",
"docker_os_version": "CM_DOCKER_OS_VERSION",
"docker_base_image": "CM_DOCKER_IMAGE_BASE",
diff --git a/cm-mlops/script/build-dockerfile/customize.py b/cm-mlops/script/build-dockerfile/customize.py
index d223bb78e6..038f003384 100644
--- a/cm-mlops/script/build-dockerfile/customize.py
+++ b/cm-mlops/script/build-dockerfile/customize.py
@@ -17,6 +17,7 @@ def preprocess(i):
config = json.load(f)
build_args = []
+ build_args_default = {'CM_ADD_DOCKER_GROUP_ID':''}
input_args = []
copy_files = []
@@ -58,11 +59,12 @@ def preprocess(i):
if 'CM_DOCKERFILE_WITH_PATH' not in env:
env['CM_DOCKERFILE_WITH_PATH'] = os.path.join(os.getcwd(), "Dockerfile")
+
dockerfile_with_path = env['CM_DOCKERFILE_WITH_PATH']
dockerfile_dir = os.path.dirname(dockerfile_with_path)
extra_dir = os.path.dirname(dockerfile_with_path)
-
+
if extra_dir!='':
os.makedirs(extra_dir, exist_ok=True)
@@ -72,18 +74,25 @@ def preprocess(i):
# Maintainers
f.write(EOL)
- f.write('# Maintained by the MLCommons taskforce on automation and reproducibility' + EOL)
- f.write('LABEL github="https://github.com/mlcommons/ck"' + EOL)
- f.write('LABEL maintainer="https://cKnowledge.org/mlcommons-taskforce"' + EOL)
+ f.write('# Automatically generated by the CM workflow automation meta-framework' + EOL)
+ f.write('# https://github.com/mlcommons/ck' + EOL)
+ f.write(EOL)
+
+ f.write('LABEL github=""' + EOL)
+ f.write('LABEL maintainer=""' + EOL)
+ f.write('LABEL license=""' + EOL)
+
f.write(EOL)
image_label = get_value(env, config, 'LABEL', 'CM_DOCKER_IMAGE_LABEL')
if image_label:
f.write('LABEL ' + image_label + EOL)
+ f.write(EOL)
shell = get_value(env, config, 'SHELL', 'CM_DOCKER_IMAGE_SHELL')
if shell:
f.write('SHELL ' + shell + EOL)
+ f.write(EOL)
for arg in config['ARGS']:
f.write('ARG '+ arg + EOL)
@@ -91,6 +100,11 @@ def preprocess(i):
for build_arg in build_args:
f.write('ARG '+ build_arg + EOL)
+ for build_arg in sorted(build_args_default):
+ v = build_args_default[build_arg]
+ f.write('ARG '+ build_arg + '="' + str(v) + '"' + EOL)
+
+ f.write(EOL)
copy_cmds = []
if 'CM_DOCKER_COPY_FILES' in env:
import shutil
@@ -108,6 +122,9 @@ def preprocess(i):
f.write('RUN '+ get_value(env, config, 'package-manager-get-cmd') + " " + " ".join(get_value(env, config,
'packages')) + EOL)
+ if env.get('CM_DOCKER_EXTRA_SYS_DEPS', '')!='':
+ f.write('RUN ' + env['CM_DOCKER_EXTRA_SYS_DEPS'] + EOL)
+
if env['CM_DOCKER_OS'] == "ubuntu":
if int(env['CM_DOCKER_OS_VERSION'].split('.')[0]) >= 23:
if "--break-system-packages" not in env.get('CM_DOCKER_PIP_INSTALL_EXTRA_FLAGS', ''):
@@ -139,7 +156,7 @@ def preprocess(i):
DOCKER_GROUP_ID = "-g " + docker_groupid
else:
DOCKER_GROUP_ID = ""
- f.write('RUN groupadd ' + DOCKER_GROUP_ID + docker_group + EOL)
+ f.write('RUN groupadd ${CM_ADD_DOCKER_GROUP_ID} ' + DOCKER_GROUP_ID + docker_group + EOL)
if docker_userid:
DOCKER_USER_ID = "-u " + docker_userid
else:
@@ -171,6 +188,12 @@ def preprocess(i):
f.write('RUN cm pull repo ' + cm_mlops_repo + x + EOL)
+ # Check extra repositories
+ x = env.get('CM_DOCKER_EXTRA_CM_REPOS','')
+ if x!='':
+ for y in x.split(','):
+ f.write('RUN '+ y + EOL)
+
f.write(EOL+'# Install all system dependencies' + EOL)
f.write('RUN cm run script --tags=get,sys-utils-cm --quiet' + EOL)
@@ -234,7 +257,7 @@ def preprocess(i):
f.close()
- f = open(env['CM_DOCKERFILE_WITH_PATH'], "r")
+ #f = open(env['CM_DOCKERFILE_WITH_PATH'], "r")
#print(f.read())
return {'return':0}
diff --git a/cm-mlops/script/build-dockerfile/dockerinfo.json b/cm-mlops/script/build-dockerfile/dockerinfo.json
index 964063edba..d669ee068c 100644
--- a/cm-mlops/script/build-dockerfile/dockerinfo.json
+++ b/cm-mlops/script/build-dockerfile/dockerinfo.json
@@ -36,6 +36,9 @@
},
"22.04": {
"FROM": "ubuntu:22.04"
+ },
+ "23.04": {
+ "FROM": "ubuntu:23.04"
}
}
},
diff --git a/cm-mlops/script/generate-mlperf-tiny-submission/README.md b/cm-mlops/script/generate-mlperf-tiny-submission/README.md
index 65bdb79afe..7bf2c9aca3 100644
--- a/cm-mlops/script/generate-mlperf-tiny-submission/README.md
+++ b/cm-mlops/script/generate-mlperf-tiny-submission/README.md
@@ -145,7 +145,7 @@ ___
- CM script: [run-how-to-run-server](https://github.com/how-to-run/server/tree/master/script/run-how-to-run-server)
- CM script: [app-mlperf-inference-nvidia](https://github.com/cknowledge/cm-tests/tree/master/script/app-mlperf-inference-nvidia)
- CM script: [get-axs](https://github.com/cknowledge/cm-tests/tree/master/script/get-axs)
- - CM script: [process-mlperf-inference-results](https://github.com/mlcommons/ck_mlperf_results/tree/master/script/process-mlperf-inference-results)
+ - CM script: [process-mlperf-inference-results](https://github.com/mlcommons/cm4mlperf-results/tree/master/script/process-mlperf-inference-results)
- CM script: [activate-python-venv](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/activate-python-venv)
- CM script: [add-custom-nvidia-system](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/add-custom-nvidia-system)
- CM script: [app-image-classification-onnx-py](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-image-classification-onnx-py)
diff --git a/cm-mlops/script/get-aria2/README-extra.md b/cm-mlops/script/get-aria2/README-extra.md
new file mode 100644
index 0000000000..40539d77f2
--- /dev/null
+++ b/cm-mlops/script/get-aria2/README-extra.md
@@ -0,0 +1,9 @@
+# Some commands
+
+```bash
+cmr "get aria2" --version=1.37.0
+cmr "get aria2" --install
+cmr "get aria2" --path={path to the directory with aria2}
+cmr "get aria2" --input={full path to aria2}
+cmr "get aria2" --shell
+```
diff --git a/cm-mlops/script/get-aria2/_cm.yaml b/cm-mlops/script/get-aria2/_cm.yaml
new file mode 100644
index 0000000000..6fdd8bb17f
--- /dev/null
+++ b/cm-mlops/script/get-aria2/_cm.yaml
@@ -0,0 +1,37 @@
+alias: get-aria2
+uid: d83419a90a0c40d0
+
+automation_alias: script
+automation_uid: 5b4e0237da074764
+
+cache: true
+
+category: Detection or installation of tools and artifacts
+
+input_mapping:
+ install: CM_FORCE_INSTALL
+ src: CM_ARIA2_BUILD_FROM_SRC
+
+deps:
+ - tags: detect,cpu
+ - tags: detect,os
+
+#called after preprocess from customize.py
+#prehook_deps:
+# - tags: print,native,hello-world
+
+env:
+ CM_REQUIRE_INSTALL: no
+ CM_ARIA2_DEFAULT_INSTALL_VERSION: "1.37.0"
+
+new_env_keys:
+ - CM_ARIA2_*
+ - +PATH
+
+print_env_at_the_end:
+ CM_ARIA2_INSTALLED_PATH: Path to the tool
+
+tags:
+- get
+- aria2
+- get-aria2
diff --git a/cm-mlops/script/get-aria2/customize.py b/cm-mlops/script/get-aria2/customize.py
new file mode 100644
index 0000000000..3c65bbe4f6
--- /dev/null
+++ b/cm-mlops/script/get-aria2/customize.py
@@ -0,0 +1,122 @@
+from cmind import utils
+import os
+
+def preprocess(i):
+
+ # Pre-set by CM
+ os_info = i['os_info']
+ env = i['env']
+ recursion_spaces = i['recursion_spaces']
+ automation = i['automation']
+ run_script_input = i['run_script_input']
+
+ # Check if a given tool is already installed
+ file_name_core = 'aria2c'
+ file_name = file_name_core+'.exe' if os_info['platform'] == 'windows' else file_name_core
+
+ force_install = env.get('CM_FORCE_INSTALL', False) == True
+
+ if not force_install:
+ r = i['automation'].find_artifact({'file_name': file_name,
+ 'env':env,
+ 'os_info':os_info,
+ 'default_path_env_key': 'PATH',
+ 'detect_version':True,
+ 'env_path_key':'CM_ARIA2_BIN_WITH_PATH',
+ 'run_script_input':i['run_script_input'],
+ 'recursion_spaces':recursion_spaces})
+ if r['return'] >0 :
+ if r['return'] == 16:
+ # Not found, try install
+ force_install = True
+ else:
+ return r
+
+ # Force install
+ if force_install:
+ # Attempt to run installer
+ version = env.get('CM_VERSION','')
+ if version == '': version = env['CM_ARIA2_DEFAULT_INSTALL_VERSION']
+
+ if os_info['platform'] == 'windows':
+ archive = 'aria2-{}-win-64bit-build1'
+ ext = '.zip'
+ ext2 = ''
+ else:
+ archive = 'aria2-{}'
+ ext = '.tar.bz2'
+ ext2 = '.tar'
+
+ archive = archive.format(version)
+ archive_with_ext = archive+ext
+
+ env['CM_ARIA2_DOWNLOAD_DIR'] = archive
+
+ env['CM_ARIA2_DOWNLOAD_FILE'] = archive_with_ext
+ if ext2!='':
+ env['CM_ARIA2_DOWNLOAD_FILE2'] = archive+ext2
+
+ url = 'https://github.com/aria2/aria2/releases/download/release-{}/{}'.format(version, archive_with_ext)
+ env['CM_ARIA2_DOWNLOAD_URL'] = url
+
+ print ('URL to download ARIA2: {}'.format(url))
+
+ r = automation.run_native_script({'run_script_input':run_script_input, 'env':env, 'script_name':'install'})
+ if r['return']>0: return r
+
+ if os_info['platform'] == 'windows' or env.get('CM_ARIA2_BUILD_FROM_SRC', '').lower() == 'true':
+ install_path = os.path.join(os.getcwd(), archive)
+
+ path_to_file = os.path.join(install_path, file_name)
+ if not os.path.isfile(path_to_file):
+ return {'return':1, 'error':'file not found: {}'.format(path_to_file)}
+
+ env['CM_ARIA2_BIN_WITH_PATH'] = path_to_file
+ env['CM_ARIA2_INSTALLED_TO_CACHE'] = 'yes'
+ else:
+ path_to_bin = r['env_tmp'].get('CM_ARIA2_BIN_WITH_PATH','')
+ env['CM_ARIA2_BIN_WITH_PATH'] = path_to_bin
+
+ r = i['automation'].find_artifact({'file_name': file_name,
+ 'env':env,
+ 'os_info':os_info,
+ 'default_path_env_key': 'PATH',
+ 'detect_version':True,
+ 'env_path_key':'CM_ARIA2_BIN_WITH_PATH',
+ 'run_script_input':i['run_script_input'],
+ 'recursion_spaces':recursion_spaces})
+ if r['return']>0: return r
+
+ return {'return':0}
+
+def detect_version(i):
+ env = i['env']
+
+ r = i['automation'].parse_version({'match_text': r'aria2 version\s*([\d.]+)',
+ 'group_number': 1,
+ 'env_key':'CM_ARIA2_VERSION',
+ 'which_env':i['env']})
+ if r['return'] >0: return r
+
+ version = r['version']
+ print (i['recursion_spaces'] + ' Detected version: {}'.format(version))
+
+ return {'return':0, 'version':version}
+
+def postprocess(i):
+
+ env = i['env']
+ r = detect_version(i)
+ if r['return'] >0: return r
+
+ version = r['version']
+ found_file_path = env['CM_ARIA2_BIN_WITH_PATH']
+
+ found_path = os.path.dirname(found_file_path)
+
+ env['CM_ARIA2_INSTALLED_PATH'] = found_path
+
+ if env.get('CM_ARIA2_INSTALLED_TO_CACHE','')=='yes':
+ env['+PATH'] = [env['CM_ARIA2_INSTALLED_PATH']]
+
+ return {'return':0, 'version': version}
diff --git a/cm-mlops/script/get-aria2/install.bat b/cm-mlops/script/get-aria2/install.bat
new file mode 100644
index 0000000000..6255f0cafd
--- /dev/null
+++ b/cm-mlops/script/get-aria2/install.bat
@@ -0,0 +1,9 @@
+echo.
+
+del /Q /S %CM_ARIA2_DOWNLOAD_FILE%
+
+wget --no-check-certificate %CM_ARIA2_DOWNLOAD_URL%
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
+
+unzip -o -q %CM_ARIA2_DOWNLOAD_FILE%
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/cm-mlops/script/get-aria2/install.sh b/cm-mlops/script/get-aria2/install.sh
new file mode 100644
index 0000000000..d9424732d0
--- /dev/null
+++ b/cm-mlops/script/get-aria2/install.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+echo ""
+
+if [[ "${CM_ARIA2_BUILD_FROM_SRC}" == "True" ]]; then
+
+ echo "Building from sources ..."
+ echo ""
+
+ rm -rf ${CM_ARIA2_DOWNLOAD_FILE}
+ rm -rf ${CM_ARIA2_DOWNLOAD_FILE2}
+
+ wget --no-check-certificate ${CM_ARIA2_DOWNLOAD_URL}
+ test $? -eq 0 || exit $?
+
+ bzip2 -d ${CM_ARIA2_DOWNLOAD_FILE}
+ test $? -eq 0 || exit $?
+
+ tar xvf ${CM_ARIA2_DOWNLOAD_FILE2}
+ test $? -eq 0 || exit $?
+
+ cd ${CM_ARIA2_DOWNLOAD_DIR}
+ test $? -eq 0 || exit $?
+
+ ./configure --prefix=$PWD/bin
+ test $? -eq 0 || exit $?
+
+ make
+ test $? -eq 0 || exit $?
+
+ make install
+ test $? -eq 0 || exit $?
+
+else
+ echo "Installing binary via sudo ..."
+ echo ""
+
+ cmd="sudo ${CM_HOST_OS_PACKAGE_MANAGER} install aria2"
+ echo "$cmd"
+
+ $cmd
+ test $? -eq 0 || exit $?
+
+ path_to_bin=`which aria2c`
+ echo "CM_ARIA2_BIN_WITH_PATH=$path_to_bin" > tmp-run-env.out
+
+fi
diff --git a/cm-mlops/script/get-aria2/run.bat b/cm-mlops/script/get-aria2/run.bat
new file mode 100644
index 0000000000..625b7edc03
--- /dev/null
+++ b/cm-mlops/script/get-aria2/run.bat
@@ -0,0 +1,4 @@
+rem Detect version
+
+%CM_ARIA2_BIN_WITH_PATH% --version > tmp-ver.out
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/cm-mlops/script/get-aria2/run.sh b/cm-mlops/script/get-aria2/run.sh
new file mode 100644
index 0000000000..85ba9421a6
--- /dev/null
+++ b/cm-mlops/script/get-aria2/run.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+# Detect version
+
+${CM_ARIA2_BIN_WITH_PATH} --version > tmp-ver.out
+test $? -eq 0 || exit 1
diff --git a/cm-mlops/script/get-croissant/README.md b/cm-mlops/script/get-croissant/README.md
new file mode 100644
index 0000000000..8a42f268a4
--- /dev/null
+++ b/cm-mlops/script/get-croissant/README.md
@@ -0,0 +1,128 @@
+Automatically generated README for this automation recipe: **get-croissant**
+
+Category: **AI/ML datasets**
+
+License: **Apache 2.0**
+
+Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)
+
+---
+*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=get-croissant,8fd653eac8da4c14) ]*
+
+---
+#### Summary
+
+* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)*
+* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant)*
+* CM meta description for this script: *[_cm.json](_cm.json)*
+* All CM tags to find and reuse this script (see in above meta description): *get,mlcommons,croissant*
+* Output cached? *True*
+* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts
+
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://access.cknowledge.org/playground/?action=install)
+* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@ck```
+
+#### Print CM help from the command line
+
+````cmr "get mlcommons croissant" --help````
+
+#### Customize and run this script from the command line with different variations and flags
+
+`cm run script --tags=get,mlcommons,croissant`
+
+`cm run script --tags=get,mlcommons,croissant `
+
+*or*
+
+`cmr "get mlcommons croissant"`
+
+`cmr "get mlcommons croissant " `
+
+
+#### Run this script from Python
+
+
+Click here to expand this section.
+
+```python
+
+import cmind
+
+r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'get,mlcommons,croissant'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+if r['return']>0:
+ print (r['error'])
+
+```
+
+
+
+
+#### Run this script via GUI
+
+```cmr "cm gui" --script="get,mlcommons,croissant"```
+
+Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=get,mlcommons,croissant) to generate CM CMD.
+
+#### Run this script via Docker (beta)
+
+`cm docker script "get mlcommons croissant" `
+
+___
+### Customization
+
+#### Default environment
+
+
+Click here to expand this section.
+
+These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+
+
+
+___
+### Dependencies on other CM scripts
+
+
+ 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/_cm.json)***
+ * detect,os
+ - CM script: [detect-os](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/detect-os)
+ * get,python3
+ * CM names: `--adr.['python3', 'python']...`
+ - CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3)
+ * git,repo,_repo.https://github.com/mlcommons/croissant
+ * CM names: `--adr.['git-mlcommons-croissant']...`
+ - CM script: [get-git-repo](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-git-repo)
+ 1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/customize.py)***
+ 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/_cm.json)
+ 1. ***Run native script if exists***
+ * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/run.bat)
+ * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/run.sh)
+ 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/_cm.json)
+ 1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/customize.py)***
+ 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-croissant/_cm.json)
+
+___
+### Script output
+`cmr "get mlcommons croissant " -j`
+#### New environment keys (filter)
+
+#### New environment keys auto-detected from customize
diff --git a/cm-mlops/script/get-croissant/_cm.yaml b/cm-mlops/script/get-croissant/_cm.yaml
new file mode 100644
index 0000000000..a024189d24
--- /dev/null
+++ b/cm-mlops/script/get-croissant/_cm.yaml
@@ -0,0 +1,30 @@
+alias: get-croissant
+uid: 8fd653eac8da4c14
+
+automation_alias: script
+automation_uid: 5b4e0237da074764
+
+cache: true
+
+category: AI/ML datasets
+
+deps:
+ - tags: detect,os
+
+ - names:
+ - python3
+ - python
+ tags: get,python3
+ version_min: '3.10'
+
+ - env:
+ CM_GIT_CHECKOUT_PATH_ENV_NAME: CM_MLCOMMONS_CROISSANT_PATH
+ extra_cache_tags: mlcommons,croissant
+ names:
+ - git-mlcommons-croissant
+ tags: git,repo,_repo.https://github.com/mlcommons/croissant
+
+tags:
+ - get
+ - mlcommons
+ - croissant
diff --git a/cm-mlops/script/get-croissant/customize.py b/cm-mlops/script/get-croissant/customize.py
new file mode 100644
index 0000000000..1ced8a4846
--- /dev/null
+++ b/cm-mlops/script/get-croissant/customize.py
@@ -0,0 +1,16 @@
+from cmind import utils
+import os
+
+def preprocess(i):
+
+ os_info = i['os_info']
+
+ return {'return':0}
+
+def postprocess(i):
+
+ os_info = i['os_info']
+ env = i['env']
+
+
+ return {'return':0}
diff --git a/cm-mlops/script/get-croissant/run.bat b/cm-mlops/script/get-croissant/run.bat
new file mode 100644
index 0000000000..3177de9f60
--- /dev/null
+++ b/cm-mlops/script/get-croissant/run.bat
@@ -0,0 +1,20 @@
+@echo off
+
+echo =======================================================
+
+cd %CM_MLCOMMONS_CROISSANT_PATH%\python\mlcroissant
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
+
+echo.
+echo Running %CM_PYTHON_BIN_WITH_PATH% -m pip install -e .[git]
+
+%CM_PYTHON_BIN_WITH_PATH% -m pip install -e .[git]
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
+
+echo.
+echo Validating Croissant ...
+
+mlcroissant validate --file ../../datasets/titanic/metadata.json
+IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
+
+echo =======================================================
diff --git a/cm-mlops/script/get-croissant/run.sh b/cm-mlops/script/get-croissant/run.sh
new file mode 100644
index 0000000000..dd2c67bb27
--- /dev/null
+++ b/cm-mlops/script/get-croissant/run.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+echo "======================================================="
+
+cd ${CM_MLCOMMONS_CROISSANT_PATH}/python/mlcroissant
+if [ "${?}" != "0" ]; then exit 1; fi
+
+echo ""
+echo "Running ${CM_PYTHON_BIN_WITH_PATH} -m pip install -e .[git]"
+
+${CM_PYTHON_BIN_WITH_PATH} -m pip install -e .[git]
+if [ "${?}" != "0" ]; then exit 1; fi
+
+echo ""
+echo "Validating Croissant ..."
+
+mlcroissant validate --file ../../datasets/titanic/metadata.json
+if [ "${?}" != "0" ]; then exit 1; fi
+
+echo "======================================================="
+
diff --git a/cm-mlops/script/get-cuda/_cm.json b/cm-mlops/script/get-cuda/_cm.json
index 7d5d171010..fa5dac580e 100644
--- a/cm-mlops/script/get-cuda/_cm.json
+++ b/cm-mlops/script/get-cuda/_cm.json
@@ -119,5 +119,12 @@
},
"group": "installation-mode"
}
+ },
+ "print_env_at_the_end" : {
+ "CM_CUDA_PATH_LIB_CUDNN_EXISTS": "",
+ "CM_CUDA_VERSION": "",
+ "CM_CUDA_VERSION_STRING": "",
+ "CM_NVCC_BIN_WITH_PATH": "",
+ "CUDA_HOME": ""
}
}
diff --git a/cm-mlops/script/get-dataset-coco2014/README.md b/cm-mlops/script/get-dataset-coco2014/README.md
index a6f6b84bc0..0e84cca95c 100644
--- a/cm-mlops/script/get-dataset-coco2014/README.md
+++ b/cm-mlops/script/get-dataset-coco2014/README.md
@@ -14,7 +14,7 @@ Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](htt
* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)*
* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014)*
-* CM meta description for this script: *[_cm.json](_cm.json)*
+* CM meta description for this script: *[_cm.yaml](_cm.yaml)*
* All CM tags to find and reuse this script (see in above meta description): *get,dataset,coco2014,object-detection,original*
* Output cached? *True*
* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts
@@ -115,7 +115,6 @@ ___
* `_calibration`
- Environment variables:
- - *CM_CALIBRATION_DATASET_WGET_URL*: `https://github.com/mlcommons/inference/blob/master/calibration/openimages/openimages_cal_images_list.txt`
- *CM_DATASET_CALIBRATION*: `yes`
- Workflow:
* **`_validation`** (default)
@@ -168,24 +167,26 @@ ___
### Dependencies on other CM scripts
- 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/_cm.json)***
+ 1. ***Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/_cm.yaml)***
* get,python3
* CM names: `--adr.['python', 'python3']...`
- CM script: [get-python3](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-python3)
+ * get,generic-python-lib,_package.tqdm
+ - CM script: [get-generic-python-lib](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-generic-python-lib)
* mlperf,inference,source
* CM names: `--adr.['inference-src']...`
- CM script: [get-mlperf-inference-src](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/get-mlperf-inference-src)
1. ***Run "preprocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/customize.py)***
- 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/_cm.json)
+ 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/_cm.yaml)
1. ***Run native script if exists***
* [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/run.bat)
* [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/run.sh)
- 1. ***Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/_cm.json)***
+ 1. ***Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/_cm.yaml)***
* get,coco2014,annotations
* `if (CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS == yes)`
- *Warning: no scripts found*
1. ***Run "postrocess" function from [customize.py](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/customize.py)***
- 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/_cm.json)
+ 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/get-dataset-coco2014/_cm.yaml)
___
### Script output
diff --git a/cm-mlops/script/get-dataset-coco2014/_cm.json b/cm-mlops/script/get-dataset-coco2014/_cm.json
deleted file mode 100644
index a90bf9149a..0000000000
--- a/cm-mlops/script/get-dataset-coco2014/_cm.json
+++ /dev/null
@@ -1,112 +0,0 @@
-{
- "alias": "get-dataset-coco2014",
- "automation_alias": "script",
- "automation_uid": "5b4e0237da074764",
- "cache": true,
- "category": "AI/ML datasets",
- "category_sort": 8500,
- "default_env": {
- "CM_DATASET_CALIBRATION": "no"
- },
- "deps": [
- {
- "names": [
- "python",
- "python3"
- ],
- "tags": "get,python3"
- },
- {
- "force_env_keys": [
- "CM_GIT_*"
- ],
- "names": [
- "inference-src"
- ],
- "tags": "mlperf,inference,source",
- "version": "master"
- }
- ],
- "env": {
- "CM_DATASET": "COCO2014"
- },
- "new_env_keys": [
- "CM_DATASET_PATH",
- "CM_DATASET_PATH_ROOT",
- "CM_DATASET_ANNOTATIONS_DIR_PATH",
- "CM_DATASET_ANNOTATIONS_FILE_PATH",
- "CM_CALIBRATION_DATASET_PATH"
- ],
- "posthook_deps": [
- {
- "enable_if_env": {
- "CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS": [
- "yes"
- ]
- },
- "tags": "get,coco2014,annotations"
- }
- ],
- "tags": [
- "get",
- "dataset",
- "coco2014",
- "object-detection",
- "original"
- ],
- "uid": "3f7ad9d42f4040f8",
- "variations": {
- "50": {
- "default": true,
- "env": {
- "CM_DATASET_SIZE": "50"
- },
- "group": "size"
- },
- "500": {
- "env": {
- "CM_DATASET_SIZE": "500"
- },
- "group": "size"
- },
- "calibration": {
- "env": {
- "CM_CALIBRATION_DATASET_WGET_URL": "https://github.com/mlcommons/inference/blob/master/calibration/openimages/openimages_cal_images_list.txt",
- "CM_DATASET_CALIBRATION": "yes"
- },
- "group": "dataset-type"
- },
- "custom-annotations": {
- "env": {
- "CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS": "yes"
- },
- "group": "annotations"
- },
- "default-annotations": {
- "default": true,
- "env": {
- "CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS": "no"
- },
- "group": "annotations"
- },
- "full": {
- "env": {
- "CM_DATASET_SIZE": ""
- },
- "group": "size"
- },
- "size.#": {
- "env": {
- "CM_DATASET_SIZE": "#"
- },
- "group": "size"
- },
- "validation": {
- "default": true,
- "env": {
- "CM_DATASET_CALIBRATION": "no"
- },
- "group": "dataset-type"
- }
- }
-}
diff --git a/cm-mlops/script/get-dataset-coco2014/_cm.yaml b/cm-mlops/script/get-dataset-coco2014/_cm.yaml
new file mode 100644
index 0000000000..b6017a2e8c
--- /dev/null
+++ b/cm-mlops/script/get-dataset-coco2014/_cm.yaml
@@ -0,0 +1,89 @@
+alias: get-dataset-coco2014
+uid: 3f7ad9d42f4040f8
+
+automation_alias: script
+automation_uid: 5b4e0237da074764
+
+cache: true
+
+category: AI/ML datasets
+category_sort: 8500
+
+tags:
+- get
+- dataset
+- coco2014
+- object-detection
+- original
+
+default_env:
+ CM_DATASET_CALIBRATION: 'no'
+
+deps:
+
+- names:
+ - python
+ - python3
+ tags: get,python3
+
+- tags: get,generic-python-lib,_package.tqdm
+
+- force_env_keys:
+ - CM_GIT_*
+ names:
+ - inference-src
+ tags: mlperf,inference,source
+ version: master
+
+env:
+ CM_DATASET: COCO2014
+
+new_env_keys:
+- CM_DATASET_PATH
+- CM_DATASET_PATH_ROOT
+- CM_DATASET_ANNOTATIONS_DIR_PATH
+- CM_DATASET_ANNOTATIONS_FILE_PATH
+- CM_CALIBRATION_DATASET_PATH
+
+posthook_deps:
+- enable_if_env:
+ CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS:
+ - 'yes'
+ tags: get,coco2014,annotations
+
+variations:
+ '50':
+ default: true
+ env:
+ CM_DATASET_SIZE: '50'
+ group: size
+ '500':
+ env:
+ CM_DATASET_SIZE: '500'
+ group: size
+ calibration:
+ env:
+ CM_DATASET_CALIBRATION: 'yes'
+ group: dataset-type
+ custom-annotations:
+ env:
+ CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'yes'
+ group: annotations
+ default-annotations:
+ default: true
+ env:
+ CM_DATASET_COCO2014_CUSTOM_ANNOTATIONS: 'no'
+ group: annotations
+ full:
+ env:
+ CM_DATASET_SIZE: ''
+ group: size
+ size.#:
+ env:
+ CM_DATASET_SIZE: '#'
+ group: size
+ validation:
+ default: true
+ env:
+ CM_DATASET_CALIBRATION: 'no'
+ group: dataset-type
diff --git a/cm-mlops/script/get-dataset-coco2014/run.bat b/cm-mlops/script/get-dataset-coco2014/run.bat
index 5f4c9183f3..9ac62e6ad8 100644
--- a/cm-mlops/script/get-dataset-coco2014/run.bat
+++ b/cm-mlops/script/get-dataset-coco2014/run.bat
@@ -15,5 +15,7 @@ if not "%CM_DATASET_SIZE%" == "" (
set MAX_IMAGES=
)
+rem TBD - next file doesn't exist in the latest inference - need to check/fix ...
+
%CM_PYTHON_BIN% download-coco-2014.py %MAX_IMAGES% --dataset-dir=%INSTALL_DIR% --output-labels=openimages-mlperf.json
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
diff --git a/cm-mlops/script/get-generic-python-lib/_cm.json b/cm-mlops/script/get-generic-python-lib/_cm.json
index b691e85b3c..fa84d62fa5 100644
--- a/cm-mlops/script/get-generic-python-lib/_cm.json
+++ b/cm-mlops/script/get-generic-python-lib/_cm.json
@@ -965,7 +965,7 @@
},
"torchvision_cuda": {
"default_env": {
- "CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS": "torchvision"
+ "CM_GENERIC_PYTHON_PIP_UNINSTALL_DEPS1": "torchvision"
},
"deps": [
{
diff --git a/cm-mlops/script/get-generic-python-lib/customize.py b/cm-mlops/script/get-generic-python-lib/customize.py
index a6e9590f65..b15e2d9e00 100644
--- a/cm-mlops/script/get-generic-python-lib/customize.py
+++ b/cm-mlops/script/get-generic-python-lib/customize.py
@@ -60,7 +60,7 @@ def preprocess(i):
# Check if upgrade
if force_install:
extra+=' --upgrade --no-deps --force-reinstall'
-
+
# Check index URL
index_url = env.get('CM_GENERIC_PYTHON_PIP_INDEX_URL','').strip()
if index_url != '':
diff --git a/cm-mlops/script/get-git-repo/customize.py b/cm-mlops/script/get-git-repo/customize.py
index d8eefbf43f..d1e92f7040 100644
--- a/cm-mlops/script/get-git-repo/customize.py
+++ b/cm-mlops/script/get-git-repo/customize.py
@@ -15,6 +15,9 @@ def preprocess(i):
env_key = get_env_key(env)
+ cm_git_url = env['CM_GIT_URL']
+
+
if 'CM_GIT_REPO_NAME' not in env:
update_env(env, 'CM_GIT_REPO{}_NAME', env_key, os.path.basename(env['CM_GIT_URL']))
diff --git a/cm-mlops/script/get-mlperf-inference-src/README.md b/cm-mlops/script/get-mlperf-inference-src/README.md
index c88c8a0499..6d854cedcf 100644
--- a/cm-mlops/script/get-mlperf-inference-src/README.md
+++ b/cm-mlops/script/get-mlperf-inference-src/README.md
@@ -109,7 +109,7 @@ ___
- Environment variables:
- *CM_GIT_CHECKOUT*: `deepsparse`
- *CM_GIT_URL*: `https://github.com/neuralmagic/inference`
- - *CM_MLPERF_LAST_RELEASE*: `v3.0`
+ - *CM_MLPERF_LAST_RELEASE*: `v4.0`
- Workflow:
* `_gn`
- Environment variables:
diff --git a/cm-mlops/script/get-mlperf-inference-src/_cm.json b/cm-mlops/script/get-mlperf-inference-src/_cm.json
index 3b7b4ea839..4e4c4806d6 100644
--- a/cm-mlops/script/get-mlperf-inference-src/_cm.json
+++ b/cm-mlops/script/get-mlperf-inference-src/_cm.json
@@ -104,7 +104,7 @@
"env": {
"CM_GIT_CHECKOUT": "deepsparse",
"CM_GIT_URL": "https://github.com/neuralmagic/inference",
- "CM_MLPERF_LAST_RELEASE": "v3.0"
+ "CM_MLPERF_LAST_RELEASE": "v4.0"
}
},
"full-history": {
@@ -201,7 +201,7 @@
"env": {
"CM_GIT_CHECKOUT": "deepsparse",
"CM_GIT_URL": "https://github.com/neuralmagic/inference",
- "CM_MLPERF_LAST_RELEASE": "v3.1"
+ "CM_MLPERF_LAST_RELEASE": "v4.0"
}
},
"main": {
diff --git a/cm-mlops/script/import-mlperf-inference-to-experiment/README-extra.md b/cm-mlops/script/import-mlperf-inference-to-experiment/README-extra.md
index 43f19c3756..64b604d16c 100644
--- a/cm-mlops/script/import-mlperf-inference-to-experiment/README-extra.md
+++ b/cm-mlops/script/import-mlperf-inference-to-experiment/README-extra.md
@@ -9,7 +9,7 @@ and link reproducibility reports as shown in these examples:
* [Power efficiency to compare Qualcomm, Nvidia and Sima.ai devices](https://cKnowledge.org/mlcommons-mlperf-inference-gui-derived-metrics-and-conditions)
* [Reproducibility report for Nvidia Orin](https://access.cknowledge.org/playground/?action=experiments&name=mlperf-inference--v3.0--edge--closed--image-classification--offline&result_uid=3751b230c800434a)
-Aggreaged results are available in [this MLCommons repository](https://github.com/mlcommons/ck_mlperf_results).
+Aggreaged results are available in [this MLCommons repository](https://github.com/mlcommons/cm4mlperf-results).
You can see these results at [MLCommons CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-inference,all).
@@ -24,15 +24,15 @@ Pull the MLCommons CK repository with automation recipes for interoperable MLOps
cm pull repo mlcommons@ck
```
-Pull already imported results (v2.0, v2.1, v3.0, v3.1) from this [mlcommons@ck_mlperf_results repo](https://github.com/mlcommons/ck_mlperf_results):
+Pull already imported results (v2.0, v2.1, v3.0, v3.1) from this [mlcommons@cm4mlperf-results repo](https://github.com/mlcommons/cm4mlperf-results):
```bash
-cm pull repo mlcommons@ck_mlperf_results
+cm pull repo mlcommons@cm4mlperf-results --checkout=dev
```
Install repository with raw MLPerf inference benchmark results with {NEW VERSION}:
```bash
-cmr "get git repo _repo.https://github.com/mlcommons/inference_results_v{NEW VERSION}" --extra_cache_tags=mlperf-inference-results,version-{NEW VERSION}
+cmr "get git repo _repo.https://github.com/mlcommons/inference_results_v{NEW VERSION}" --extra_cache_tags=mlperf-inference-results,version-{NEW VERSION} --time --space
```
@@ -40,31 +40,32 @@ Use the following CM command if you want to analyze private MLPerf results under
(you need to be a submitter or collaborate with cTuning.org and cKnowledge.org to have an access to such repository):
```bash
-cm run script "get git repo _repo.https://github.com/mlcommons/submissions_inference_v4.0" --env.CM_GIT_CHECKOUT=main --extra_cache_tags=mlperf-inference-results,version-4.0-private
+cm run script "get git repo _repo.https://github.com/mlcommons/submissions_inference_v4.0" --env.CM_GIT_CHECKOUT=main --extra_cache_tags=mlperf-inference-results,version-4.0-private --time --space
```
-Convert all raw MLPerf results into CM experiment entries (it can take 5..15 minutes to run submission checker with raw MLPerf results before converting them to the fast CM format):
+Convert all raw MLPerf results into CM experiment entries - it can take 5..15 minutes to run submission checker
+with raw MLPerf results before converting them to the fast CM format (skip target_repo if you want
+to record results to the `local` CM repository):
+
```bash
-cm run script "import mlperf inference to-experiment"
+cm run script "import mlperf inference to-experiment" --target_repo=mlcommons@cm4mlperf-results --time --space
```
+
or for a specific submitter:
+
```bash
cm run script "import mlperf inference to-experiment" --submitter=CTuning
```
If you already generated `summary.csv` in your current directory, you can skip submission checker as follows:
-```bash
-cm run script "import mlperf inference to-experiment _skip_checker"
-```
-
-Import to a specific repo:
```bash
-cm run script "import mlperf inference to-experiment" --target_repo=mlcommons@ck_mlperf_results
+cm run script "import mlperf inference to-experiment _skip_checker"
```
Visualize results on your local machine via CK playground GUI:
+
```bash
cm run script "gui _playground"
```
@@ -73,7 +74,7 @@ These results are also available in the [public CK playground](https://access.ck
## Further analysis of results
-Please check this [README](https://github.com/mlcommons/ck_mlperf_results#how-to-update-this-repository-with-new-results).
+Please check this [README](https://github.com/mlcommons/cm4mlperf-results#how-to-update-this-repository-with-new-results).
# Contact us
diff --git a/cm-mlops/script/import-mlperf-tiny-to-experiment/README-extra.md b/cm-mlops/script/import-mlperf-tiny-to-experiment/README-extra.md
index 3e13b627f3..105e7ea4a6 100644
--- a/cm-mlops/script/import-mlperf-tiny-to-experiment/README-extra.md
+++ b/cm-mlops/script/import-mlperf-tiny-to-experiment/README-extra.md
@@ -9,7 +9,7 @@ and link reproducibility reports as shown in these examples:
* [Power efficiency to compare Qualcomm, Nvidia and Sima.ai devices](https://cKnowledge.org/mlcommons-mlperf-inference-gui-derived-metrics-and-conditions)
* [Reproducibility report for Nvidia Orin](https://access.cknowledge.org/playground/?action=experiments&name=mlperf-inference--v3.0--edge--closed--image-classification--offline&result_uid=3751b230c800434a)
-Aggreaged results are available in [this MLCommons repository](https://github.com/mlcommons/ck_mlperf_results).
+Aggreaged results are available in [this MLCommons repository](https://github.com/mlcommons/cm4mlperf-results).
You can see these results at [MLCommons CK playground](You can see aggregated results [here](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-tiny,all).
diff --git a/cm-mlops/script/import-mlperf-training-to-experiment/README-extra.md b/cm-mlops/script/import-mlperf-training-to-experiment/README-extra.md
index 9a4c9f8d1f..abfc764233 100644
--- a/cm-mlops/script/import-mlperf-training-to-experiment/README-extra.md
+++ b/cm-mlops/script/import-mlperf-training-to-experiment/README-extra.md
@@ -7,7 +7,7 @@ The goal is to make it easier for the community to analyze MLPerf results,
add derived metrics such as performance/Watt and constraints,
and link reproducibility reports.
-Aggreaged results are available in [this MLCommons repository](https://github.com/mlcommons/ck_mlperf_results).
+Aggreaged results are available in [this MLCommons repository](https://github.com/mlcommons/cm4mlperf-results).
You can see these results at [MLCommons CK playground](https://access.cknowledge.org/playground/?action=experiments&tags=mlperf-training,all).
diff --git a/cm-mlops/script/prune-docker/README.md b/cm-mlops/script/prune-docker/README.md
new file mode 100644
index 0000000000..496e0b87d3
--- /dev/null
+++ b/cm-mlops/script/prune-docker/README.md
@@ -0,0 +1,120 @@
+Automatically generated README for this automation recipe: **prune-docker**
+
+Category: **Docker automation**
+
+License: **Apache 2.0**
+
+Maintainers: [Public MLCommons Task Force on Automation and Reproducibility](https://github.com/mlcommons/ck/blob/master/docs/taskforce.md)
+
+---
+*[ [Online info and GUI to run this CM script](https://access.cknowledge.org/playground/?action=scripts&name=prune-docker,27ead88809bb4d4e) ]*
+
+---
+#### Summary
+
+* CM GitHub repository: *[mlcommons@ck](https://github.com/mlcommons/ck/tree/dev/cm-mlops)*
+* GitHub directory for this script: *[GitHub](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker)*
+* CM meta description for this script: *[_cm.json](_cm.json)*
+* All CM tags to find and reuse this script (see in above meta description): *prune,docker*
+* Output cached? *False*
+* See [pipeline of dependencies](#dependencies-on-other-cm-scripts) on other CM scripts
+
+
+---
+### Reuse this script in your project
+
+#### Install MLCommons CM automation meta-framework
+
+* [Install CM](https://access.cknowledge.org/playground/?action=install)
+* [CM Getting Started Guide](https://github.com/mlcommons/ck/blob/master/docs/getting-started.md)
+
+#### Pull CM repository with this automation recipe (CM script)
+
+```cm pull repo mlcommons@ck```
+
+#### Print CM help from the command line
+
+````cmr "prune docker" --help````
+
+#### Customize and run this script from the command line with different variations and flags
+
+`cm run script --tags=prune,docker`
+
+`cm run script --tags=prune,docker `
+
+*or*
+
+`cmr "prune docker"`
+
+`cmr "prune docker " `
+
+
+#### Run this script from Python
+
+
+Click here to expand this section.
+
+```python
+
+import cmind
+
+r = cmind.access({'action':'run'
+ 'automation':'script',
+ 'tags':'prune,docker'
+ 'out':'con',
+ ...
+ (other input keys for this script)
+ ...
+ })
+
+if r['return']>0:
+ print (r['error'])
+
+```
+
+
+
+
+#### Run this script via GUI
+
+```cmr "cm gui" --script="prune,docker"```
+
+Use this [online GUI](https://cKnowledge.org/cm-gui/?tags=prune,docker) to generate CM CMD.
+
+#### Run this script via Docker (beta)
+
+`cm docker script "prune docker" `
+
+___
+### Customization
+
+#### Default environment
+
+
+Click here to expand this section.
+
+These keys can be updated via `--env.KEY=VALUE` or `env` dictionary in `@input.json` or using script flags.
+
+
+
+
+___
+### Dependencies on other CM scripts
+
+
+ 1. Read "deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker/_cm.json)
+ 1. Run "preprocess" function from customize.py
+ 1. Read "prehook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker/_cm.json)
+ 1. ***Run native script if exists***
+ * [run.bat](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker/run.bat)
+ * [run.sh](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker/run.sh)
+ 1. Read "posthook_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker/_cm.json)
+ 1. Run "postrocess" function from customize.py
+ 1. Read "post_deps" on other CM scripts from [meta](https://github.com/mlcommons/ck/tree/dev/cm-mlops/script/prune-docker/_cm.json)
+
+___
+### Script output
+`cmr "prune docker " -j`
+#### New environment keys (filter)
+
+#### New environment keys auto-detected from customize
diff --git a/cm-mlops/script/prune-docker/_cm.json b/cm-mlops/script/prune-docker/_cm.json
new file mode 100644
index 0000000000..c7f9cfcb31
--- /dev/null
+++ b/cm-mlops/script/prune-docker/_cm.json
@@ -0,0 +1,11 @@
+{
+ "alias": "prune-docker",
+ "automation_alias": "script",
+ "automation_uid": "5b4e0237da074764",
+ "category": "Docker automation",
+ "tags": [
+ "prune",
+ "docker"
+ ],
+ "uid": "27ead88809bb4d4e"
+}
diff --git a/cm-mlops/script/prune-docker/run.bat b/cm-mlops/script/prune-docker/run.bat
new file mode 100644
index 0000000000..980baad8ec
--- /dev/null
+++ b/cm-mlops/script/prune-docker/run.bat
@@ -0,0 +1 @@
+docker system prune -a --volumes
diff --git a/cm-mlops/script/prune-docker/run.sh b/cm-mlops/script/prune-docker/run.sh
new file mode 100644
index 0000000000..eb849e376a
--- /dev/null
+++ b/cm-mlops/script/prune-docker/run.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+docker system prune -a --volumes
diff --git a/cm-mlops/script/run-docker-container/README.md b/cm-mlops/script/run-docker-container/README.md
index 38f09df0c6..4c23be33eb 100644
--- a/cm-mlops/script/run-docker-container/README.md
+++ b/cm-mlops/script/run-docker-container/README.md
@@ -119,6 +119,7 @@ ___
* `--recreate=value` → `CM_DOCKER_IMAGE_RECREATE=value`
* `--run_cmd=value` → `CM_DOCKER_RUN_CMD=value`
* `--run_cmd_extra=value` → `CM_DOCKER_RUN_CMD_EXTRA=value`
+* `--save_script=value` → `CM_DOCKER_SAVE_SCRIPT=value`
* `--script_tags=value` → `CM_DOCKER_RUN_SCRIPT_TAGS=value`
* `--shm_size=value` → `CM_DOCKER_SHM_SIZE=value`
diff --git a/cm-mlops/script/run-docker-container/_cm.json b/cm-mlops/script/run-docker-container/_cm.json
index 60fba4cf6d..30d490bf5b 100644
--- a/cm-mlops/script/run-docker-container/_cm.json
+++ b/cm-mlops/script/run-docker-container/_cm.json
@@ -36,7 +36,8 @@
"extra_run_args": "CM_DOCKER_EXTRA_RUN_ARGS",
"device": "CM_DOCKER_ADD_DEVICE",
"cache": "CM_DOCKER_CACHE",
- "all_gpus": "CM_DOCKER_ADD_ALL_GPUS"
+ "all_gpus": "CM_DOCKER_ADD_ALL_GPUS",
+ "save_script": "CM_DOCKER_SAVE_SCRIPT"
},
"prehook_deps": [
{
diff --git a/cm-mlops/script/run-docker-container/customize.py b/cm-mlops/script/run-docker-container/customize.py
index 8348ce0ce6..f01a6e997b 100644
--- a/cm-mlops/script/run-docker-container/customize.py
+++ b/cm-mlops/script/run-docker-container/customize.py
@@ -10,6 +10,7 @@ def preprocess(i):
env = i['env']
+
interactive = env.get('CM_DOCKER_INTERACTIVE_MODE','')
if interactive:
@@ -43,18 +44,28 @@ def preprocess(i):
DOCKER_CONTAINER = docker_image_repo + "/" + docker_image_name + ":" + docker_image_tag
CMD = "docker images -q " + DOCKER_CONTAINER
+
if os_info['platform'] == 'windows':
CMD += " 2> nul"
else:
CMD += " 2> /dev/null"
- docker_image = subprocess.check_output(CMD, shell=True).decode("utf-8")
+ print ('')
+ print ('Checking Docker images:')
+ print (CMD)
+ print ('')
+
+ try:
+ docker_image = subprocess.check_output(CMD, shell=True).decode("utf-8")
+ except Exception as e:
+ return {'return':1, 'error':'Docker is either not installed or not started:\n{}'.format(e)}
recreate_image = env.get('CM_DOCKER_IMAGE_RECREATE', '')
if docker_image and recreate_image != "yes":
print("Docker image exists with ID: " + docker_image)
env['CM_DOCKER_IMAGE_EXISTS'] = "yes"
+
elif recreate_image == "yes":
env['CM_DOCKER_IMAGE_RECREATE'] = "no"
@@ -66,6 +77,7 @@ def postprocess(i):
env = i['env']
+
# Updating Docker info
update_docker_info(env)
@@ -157,10 +169,13 @@ def postprocess(i):
print ('')
print ("Container launch command:")
+ print ('')
print (CMD)
print ('')
print ("Running "+run_cmd+" inside docker container")
+ record_script({'cmd':CMD, 'env': env})
+
print ('')
docker_out = subprocess.check_output(CMD, shell=True).decode("utf-8")
@@ -176,20 +191,49 @@ def postprocess(i):
if env.get('CM_DOCKER_INTERACTIVE_MODE', '') in ['yes', 'True', True]:
x1 = '-it'
x2 = " && bash "
-
- CONTAINER="docker run {} --entrypoint ".format(x1) + x + x + " " + run_opts + " " + docker_image_repo + "/" + docker_image_name + ":" + docker_image_tag
+
+ CONTAINER="docker run " + x1 + " --entrypoint " + x + x + " " + run_opts + " " + docker_image_repo + "/" + docker_image_name + ":" + docker_image_tag
CMD = CONTAINER + " bash -c " + x + run_cmd + x2 + x
print ('')
print ("Container launch command:")
+ print ('')
print (CMD)
+ record_script({'cmd':CMD, 'env': env})
+
print ('')
docker_out = os.system(CMD)
return {'return':0}
+def record_script(i):
+
+ cmd = i['cmd']
+ env = i['env']
+
+ files = []
+
+ dockerfile_path = env.get('CM_DOCKERFILE_WITH_PATH', '')
+ if dockerfile_path != '' and os.path.isfile(dockerfile_path):
+ files.append(dockerfile_path + '.run.bat')
+ files.append(dockerfile_path + '.run.sh')
+
+ save_script = env.get('CM_DOCKER_SAVE_SCRIPT', '')
+ if save_script != '':
+ if save_script.endswith('.bat') or save_script.endswith('.sh'):
+ files.append(save_script)
+ else:
+ files.append(save_script+'.bat')
+ files.append(save_script+'.sh')
+
+ for filename in files:
+ with open (filename, 'w') as f:
+ f.write(cmd + '\n')
+
+ return {'return':0}
+
def update_docker_info(env):
# Updating Docker info
docker_image_repo = env.get('CM_DOCKER_IMAGE_REPO', 'cknowledge')
diff --git a/cm-mlops/script/set-venv/README-extra.md b/cm-mlops/script/set-venv/README-extra.md
new file mode 100644
index 0000000000..987ad1f67b
--- /dev/null
+++ b/cm-mlops/script/set-venv/README-extra.md
@@ -0,0 +1,6 @@
+# Examples
+
+```bash
+cmr "set venv" mlperf-test
+cmr "set venv" mlperf-test2 --python=/usr/bin/python3
+```
diff --git a/cm-mlops/script/set-venv/_cm.yaml b/cm-mlops/script/set-venv/_cm.yaml
new file mode 100644
index 0000000000..40b08b9f19
--- /dev/null
+++ b/cm-mlops/script/set-venv/_cm.yaml
@@ -0,0 +1,14 @@
+alias: set-venv
+uid: 07163dd7d6cd4026
+
+automation_alias: script
+automation_uid: 5b4e0237da074764
+
+input_mapping:
+ python: CM_SET_VENV_PYTHON
+
+cache: false
+
+tags:
+- set
+- venv
diff --git a/cm-mlops/script/set-venv/customize.py b/cm-mlops/script/set-venv/customize.py
new file mode 100644
index 0000000000..a8517a366e
--- /dev/null
+++ b/cm-mlops/script/set-venv/customize.py
@@ -0,0 +1,96 @@
+from cmind import utils
+import os
+
+def preprocess(i):
+
+ os_info = i['os_info']
+
+ env = i['env']
+
+ meta = i['meta']
+
+ automation = i['automation']
+
+ quiet = (env.get('CM_QUIET', False) == 'yes')
+
+ ############################################################
+ cur_dir = os.getcwd()
+
+ name = env.get('CM_NAME', '')
+ if name == '':
+ artifacts = i.get('input', {}).get('artifacts', [])
+ if len(artifacts)>0:
+ name = artifacts[0]
+ if name == '':
+ name = 'default'
+
+ if os_info['platform'] == 'windows':
+ activate_script = os.path.join('Scripts', 'activate.bat')
+ else:
+ activate_script = os.path.join('bin', 'activate')
+
+ activate_script2 = os.path.join(name, activate_script)
+
+ if not os.path.isfile(activate_script2):
+ force_python_path = env.get('CM_SET_VENV_PYTHON','')
+
+ if force_python_path != '' and not os.path.isfile(force_python_path):
+ return {'return':1, 'error':'python executable not found: {}'.format(force_python_path)}
+
+ if os_info['platform'] == 'windows':
+ python_path = 'python.exe' if force_python_path == '' else force_python_path
+ create_dir = ' & md {}\work'
+ else:
+ python_path = 'python3' if force_python_path == '' else force_python_path
+ create_dir = ' ; mkdir {}/work'
+
+ cmd = python_path + ' -m venv ' + name + create_dir.format(name)
+
+ print ('====================================================================')
+
+ print ('Creating venv: "{}" ...'.format(cmd))
+ os.system(cmd)
+
+
+ if os.path.isfile(activate_script2):
+ script_file = 'venv-'+name
+ if os_info['platform'] == 'windows':
+ script_file += '.bat'
+ xcmd = script_file
+ else:
+ script_file += '.sh'
+ xcmd = 'source '+script_file
+
+ if not os.path.isfile(script_file):
+
+ work_dir = os.path.join(name, 'work')
+ if not os.path.isdir(work_dir):
+ os.makedirs(work_dir)
+
+ if os_info['platform'] == 'windows':
+ shell = os.environ.get('CM_SET_VENV_SHELL', '')
+ if shell == '':
+ shell = env.get('CM_SET_VENV_SHELL', '')
+ if shell != '':
+ shell = shell.replace('CM_SET_VENV_WORK', 'work')
+ if shell == '': shell = 'cmd'
+ cmd = 'cd {} & call {} & set CM_REPOS=%CD%\{}\CM & {}\n'.format(name, activate_script, name, shell)
+ else:
+ cmd = '#!/bin/bash\n\ncd {} ; source {} ; export CM_REPOS=$PWD/CM ; cd work\n'.format(name, activate_script)
+
+ with open(script_file, 'w') as f:
+ f.write(cmd)
+
+ print ('====================================================================')
+ print ('Please run the following command:')
+ print ('')
+ print (xcmd)
+ print ('====================================================================')
+
+ return {'return':0}
+
+def postprocess(i):
+
+ env = i['env']
+
+ return {'return':0}
diff --git a/cm-mlops/script/test-download-and-extract-artifacts/_cm.yaml b/cm-mlops/script/test-download-and-extract-artifacts/_cm.yaml
index 60ec1d160b..c1961ba300 100644
--- a/cm-mlops/script/test-download-and-extract-artifacts/_cm.yaml
+++ b/cm-mlops/script/test-download-and-extract-artifacts/_cm.yaml
@@ -1,8 +1,13 @@
alias: test-download-and-extract-artifacts
+uid: 51dde7580b404b27
+
automation_alias: script
automation_uid: 5b4e0237da074764
+
cache: false
+
category: Tests
+
deps:
- tags: download,file,_url.https://zenodo.org/record/4735647/files/resnet50_v1.onnx
env:
@@ -17,9 +22,10 @@ deps:
# CM_DOWNLOAD_CHECKSUM:
force_cache: true
extra_cache_tags: reproduce,paper,artifact,zenodo,xyz2
+
new_env_keys:
- CM_REPRODUCE_PAPER_XYZ*
+
tags:
- test
- download-and-extract-artifacts
-uid: 51dde7580b404b27
diff --git a/cm/CHANGES.md b/cm/CHANGES.md
index cd45f122bc..78497bf1be 100644
--- a/cm/CHANGES.md
+++ b/cm/CHANGES.md
@@ -1,3 +1,9 @@
+## V2.0.4.1
+ - changed outdated version of CM in requirements when creating new repos
+ - fixed minor bug in `cm add automation {name}`
+ - added dependency on giturlparse to support private repos in containers
+ - fixed bug when adding automation in the local repository: "cm add . {automation_name}"
+
## V2.0.4
- added skip of delayed help to simplify output of `cmr [tags] --help`
- revisited automatically generated READMEs for CM scripts (automation recipes)
diff --git a/cm/cmind/__init__.py b/cm/cmind/__init__.py
index d0ec817816..ea3331a269 100644
--- a/cm/cmind/__init__.py
+++ b/cm/cmind/__init__.py
@@ -1,4 +1,4 @@
-__version__ = "2.0.4"
+__version__ = "2.0.4.1"
from cmind.core import access
from cmind.core import error
diff --git a/cm/cmind/config.py b/cm/cmind/config.py
index bb8086996b..f4a7c80809 100644
--- a/cm/cmind/config.py
+++ b/cm/cmind/config.py
@@ -60,7 +60,7 @@ def __init__(self, config_file = None):
"cp":"copy"
},
- "new_repo_requirements": "cmind >= 0.7.5\n",
+ "new_repo_requirements": "cmind >= 2.0.4\n",
"cmind_automation":"automation",
diff --git a/cm/cmind/repo/automation/automation/module.py b/cm/cmind/repo/automation/automation/module.py
index 0c6e478915..450bef7a75 100644
--- a/cm/cmind/repo/automation/automation/module.py
+++ b/cm/cmind/repo/automation/automation/module.py
@@ -81,6 +81,10 @@ def add(self, i):
if 'tags' in i: del(i['tags'])
+ automation = i['automation']
+ if automation!='.' and ',' not in automation:
+ i['automation'] = automation + ',' + self.meta['uid']
+
r_obj=self.cmind.access(i)
if r_obj['return']>0: return r_obj
diff --git a/cm/cmind/repo/automation/ck/README.md b/cm/cmind/repo/automation/ckx/README.md
similarity index 100%
rename from cm/cmind/repo/automation/ck/README.md
rename to cm/cmind/repo/automation/ckx/README.md
diff --git a/cm/cmind/repo/automation/ck/_cm.json b/cm/cmind/repo/automation/ckx/_cm.json
similarity index 91%
rename from cm/cmind/repo/automation/ck/_cm.json
rename to cm/cmind/repo/automation/ckx/_cm.json
index 59549e28d0..aa6d89c6eb 100644
--- a/cm/cmind/repo/automation/ck/_cm.json
+++ b/cm/cmind/repo/automation/ckx/_cm.json
@@ -1,5 +1,5 @@
{
- "alias": "ck",
+ "alias": "ckx",
"automation_alias": "automation",
"automation_uid": "bbeb15d8f0a944a4",
"desc": "Accessing legacy CK automations",
@@ -7,7 +7,7 @@
"sort": -1000,
"tags": [
"automation",
- "ck"
+ "ckx"
],
"uid": "1818c39eaf3a4a78",
"use_any_action": true
diff --git a/cm/cmind/repo/automation/ck/module.py b/cm/cmind/repo/automation/ckx/module.py
similarity index 100%
rename from cm/cmind/repo/automation/ck/module.py
rename to cm/cmind/repo/automation/ckx/module.py
diff --git a/cm/requirements.txt b/cm/requirements.txt
index 3fbe3fb400..859f84a20b 100644
--- a/cm/requirements.txt
+++ b/cm/requirements.txt
@@ -1,3 +1,4 @@
pyyaml
requests
setuptools
+giturlparse
diff --git a/docs/mlperf/inference/README.md b/docs/mlperf/inference/README.md
index 0f7ad79dcd..f3cacaa2ec 100644
--- a/docs/mlperf/inference/README.md
+++ b/docs/mlperf/inference/README.md
@@ -236,7 +236,7 @@ You can pull all past MLPerf results in the CM format, import your current exper
with derived metrics on your system using the Collective Knowledge Playground as follows:
```bash
-cm pull repo mlcommons@ck_mlperf_results
+cm pull repo mlcommons@cm4mlperf-results
cmr "get git repo _repo.https://github.com/ctuning/mlperf_inference_submissions_v3.1" \
--env.CM_GIT_CHECKOUT=main \
--extra_cache_tags=mlperf-inference-results,community,version-3.1
diff --git a/tests/script/test_docker.py b/tests/script/test_docker.py
index 266a5b17cd..5a02c932ed 100644
--- a/tests/script/test_docker.py
+++ b/tests/script/test_docker.py
@@ -9,6 +9,7 @@
'add_deps_recursive': {
'compiler': {'tags': "gcc"}
},
+ 'image_name':'cm-script-app-image-classification-onnx-py',
'env': {
'CM_DOCKER_RUN_SCRIPT_TAGS': 'app,image-classification,onnx,python',
'CM_MLOPS_REPO': 'ctuning@mlcommons-ck',