From 9afe484bce6407ecadf4c2cca2fb57048cffce36 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Mon, 16 Sep 2024 19:46:46 +0530 Subject: [PATCH 01/16] CM_TMP_CURRENT_SCRIPT_PATH made accessible while updating dynamic env --- automation/script/module.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/automation/script/module.py b/automation/script/module.py index fbb400f89e..d0723971d4 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -744,6 +744,8 @@ def _run(self, i): meta = script_artifact.meta path = script_artifact.path + env['CM_TMP_CURRENT_SCRIPT_PATH'] = path + # Check min CM version requirement min_cm_version = meta.get('min_cm_version','').strip() if min_cm_version != '': @@ -1329,8 +1331,6 @@ def _run(self, i): if "add_deps_recursive" in versions_meta: self._merge_dicts_with_tags(add_deps_recursive, versions_meta['add_deps_recursive']) - env['CM_TMP_CURRENT_SCRIPT_PATH'] = path - # Run chain of docker dependencies if current run cmd is from inside a docker container docker_deps = [] if i.get('docker_run_deps'): From 31635dd53da4e939ae0b59596189b04e34cec710 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Tue, 17 Sep 2024 12:04:11 +0530 Subject: [PATCH 02/16] fixed rclone cmd bug --- script/download-file/customize.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/script/download-file/customize.py b/script/download-file/customize.py index c8834f1ce6..3a1921ebf0 100644 --- a/script/download-file/customize.py +++ b/script/download-file/customize.py @@ -178,9 +178,9 @@ def preprocess(i): if env["CM_HOST_OS_TYPE"] == "windows": url = url.replace("%", "%%") temp_download_file = env['CM_DOWNLOAD_FILENAME'].replace("%", "%%") - env['CM_DOWNLOAD_CMD'] = f" || ((rm -f {env['CM_DOWNLOAD_FILENAME']} || true) && rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), temp_download_file)}{q} -P)" + env['CM_DOWNLOAD_CMD'] += f" || ((rm -f {env['CM_DOWNLOAD_FILENAME']} || true) && rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), temp_download_file)}{q} -P)" else: - env['CM_DOWNLOAD_CMD'] = f" || ((rm -f {env['CM_DOWNLOAD_FILENAME']} || true) && rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), env['CM_DOWNLOAD_FILENAME'])}{q} -P" + env['CM_DOWNLOAD_CMD'] += f" || ((rm -f {env['CM_DOWNLOAD_FILENAME']} || true) && rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), env['CM_DOWNLOAD_FILENAME'])}{q} -P)" filename = env['CM_DOWNLOAD_FILENAME'] env['CM_DOWNLOAD_DOWNLOADED_FILENAME'] = filename From 29054cb1cb59f7da8e68c4d7119899313e326cc1 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Tue, 17 Sep 2024 22:34:02 +0530 Subject: [PATCH 03/16] Included abstract class for updating env variables --- automation/script/module.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/automation/script/module.py b/automation/script/module.py index d0723971d4..1ff65805ed 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -744,8 +744,6 @@ def _run(self, i): meta = script_artifact.meta path = script_artifact.path - env['CM_TMP_CURRENT_SCRIPT_PATH'] = path - # Check min CM version requirement min_cm_version = meta.get('min_cm_version','').strip() if min_cm_version != '': @@ -1331,6 +1329,9 @@ def _run(self, i): if "add_deps_recursive" in versions_meta: self._merge_dicts_with_tags(add_deps_recursive, versions_meta['add_deps_recursive']) + r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_PATH', path) + if r['return']>0: return r + # Run chain of docker dependencies if current run cmd is from inside a docker container docker_deps = [] if i.get('docker_run_deps'): @@ -4359,6 +4360,20 @@ def any_enable_or_skip_script(meta, env): return False +############################################################################################################ +def _update_env(env, key=None, value=None): + if key == None or value == None: + return {'return': 1, 'error': 'None value not expected in key and value arguments in _update_env.'} + if not isinstance(key, str): + return {'return': 1, 'error': 'String value expected inside key argument.'} + + env[key] = value + + r = update_env_with_values(env) + if r['return']>0: return r + + return {'return': 0} + ############################################################################################################ def update_env_with_values(env, fail_on_not_found=False, extra_env={}): """ From fd7aff8d62971c7fac25bf5ab2413bf1c059bec0 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Tue, 17 Sep 2024 23:15:57 +0530 Subject: [PATCH 04/16] modified updation of env variable throug abstract function --- automation/script/module.py | 72 +++++++++++++++++++++++++------------ 1 file changed, 49 insertions(+), 23 deletions(-) diff --git a/automation/script/module.py b/automation/script/module.py index 2b0b84b703..9094b18a95 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -347,19 +347,26 @@ def _run(self, i): fake_run = i.get('fake_run', False) fake_run = i.get('fake_run', False) if 'fake_run' in i else i.get('prepare', False) - if fake_run: env['CM_TMP_FAKE_RUN']='yes' + if fake_run: + r = _update_env(env, 'CM_TMP_FAKE_RUN', 'yes') + if r['return']>0: return r debug_uid = i.get('debug_uid', '') if debug_uid!='': - env['CM_TMP_DEBUG_UID'] = debug_uid + r = _update_env(env, 'CM_TMP_DEBUG_UID', debug_uid) + if r['return']>0: return r fake_deps = i.get('fake_deps', False) - if fake_deps: env['CM_TMP_FAKE_DEPS']='yes' + if fake_deps: + r = _update_env(env, 'CM_TMP_FAKE_DEPS', 'yes') + if r['return']>0: return r if str(i.get('skip_sys_utils', '')).lower() in ['true', 'yes']: - env['CM_SKIP_SYS_UTILS']='yes' + r = _update_env(env, 'CM_SKIP_SYS_UTILS', 'yes') + if r['return']>0: return r if str(i.get('skip_sudo', '')).lower() in ['true', 'yes']: - env['CM_TMP_SKIP_SUDO']='yes' + r = _update_env(env, 'CM_TMP_SKIP_SUDO', 'yes') + if r['return']>0: return r run_state = i.get('run_state', self.run_state) if not run_state.get('version_info', []): @@ -380,16 +387,18 @@ def _run(self, i): if silent: if 'verbose' in i: del(i['verbose']) if 'v' in i: del(i['v']) - env['CM_TMP_SILENT']='yes' + r = _update_env(env, 'CM_TMP_SILENT', 'yes') + if r['return']>0: return r run_state['tmp_silent']=True if 'verbose' in i: verbose=i['verbose'] elif 'v' in i: verbose=i['v'] if verbose: - env['CM_VERBOSE']='yes' - run_state['tmp_verbose']=True - logging.getLogger().setLevel(logging.DEBUG) + r = _update_env(env, 'CM_VERBOSE', 'yes') + if r['return']>0: return r + run_state['tmp_verbose']=True + logging.getLogger().setLevel(logging.DEBUG) print_deps = i.get('print_deps', False) @@ -418,15 +427,20 @@ def _run(self, i): # Detect current path and record in env for further use in native scripts current_path = os.path.abspath(os.getcwd()) - env['CM_TMP_CURRENT_PATH'] = current_path + r = _update_env(env, 'CM_TMP_CURRENT_PATH', current_path) + if r['return']>0: return r # Check if quiet mode quiet = i.get('quiet', False) if 'quiet' in i else (env.get('CM_QUIET','').lower() == 'yes') - if quiet: env['CM_QUIET'] = 'yes' + if quiet: + r = _update_env(env, 'CM_QUIET', 'yes') + if r['return']>0: return r skip_remembered_selections = i.get('skip_remembered_selections', False) if 'skip_remembered_selections' in i \ else (env.get('CM_SKIP_REMEMBERED_SELECTIONS','').lower() == 'yes') - if skip_remembered_selections: env['CM_SKIP_REMEMBERED_SELECTIONS'] = 'yes' + if skip_remembered_selections: + r = _update_env(env, 'CM_SKIP_REMEMBERED_SELECTIONS', 'yes') + if r['return']>0: return r # Prepare debug info parsed_script = i.get('parsed_artifact') @@ -464,13 +478,15 @@ def _run(self, i): for key in self.input_flags_converted_to_tmp_env: value = i.get(key, '').strip() if value != '': - env['CM_TMP_' + key.upper()] = value + r = _update_env(env, 'CM_TMP_' + key.upper(), value) + if r['return']>0: return r for key in self.input_flags_converted_to_env: value = i.get(key, '') if type(value)==str: value=value.strip() if value != '': - env['CM_' + key.upper()] = value + r = _update_env(env, 'CM_' + key.upper(), value) + if r['return']>0: return r ############################################################################################################ @@ -760,8 +776,11 @@ def _run(self, i): if script_artifact.repo_meta.get('prefix', '') != '': script_repo_path_with_prefix = os.path.join(script_repo_path, script_artifact.repo_meta['prefix']) - env['CM_TMP_CURRENT_SCRIPT_REPO_PATH'] = script_repo_path - env['CM_TMP_CURRENT_SCRIPT_REPO_PATH_WITH_PREFIX'] = script_repo_path_with_prefix + r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_REPO_PATH', script_repo_path) + if r['return']>0: return r + + r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_REPO_PATH_WITH_PREFIX', script_repo_path_with_prefix) + if r['return']>0: return r # Check if has --help if i.get('help',False): @@ -972,7 +991,8 @@ def _run(self, i): elif str(state['docker'].get('real_run', True)).lower() in ['false', '0', 'no']: logging.info(recursion_spaces+' - Doing fake run for script::{} as we are inside docker'.format(found_script_artifact)) fake_run = True - env['CM_TMP_FAKE_RUN']='yes' + r = _update_env(env, 'CM_TMP_FAKE_RUN', 'yes') + if r['return']>0: return r @@ -1263,7 +1283,8 @@ def _run(self, i): found_cached = False remove_tmp_tag = True - env['CM_RENEW_CACHE_ENTRY']='yes' + r = _update_env(env, 'CM_RENEW_CACHE_ENTRY', 'yes') + if r['return']>0: return r # Prepare files to be cleaned clean_files = [self.tmp_file_run_state, @@ -1317,7 +1338,8 @@ def _run(self, i): logging.debug(recursion_spaces+' - Version is not specified - use either default_version from meta or min/max/usable: {}'.format(version)) - env['CM_VERSION'] = version + r = _update_env(env, 'CM_VERSION', version) + if r['return']>0: return r if 'version-'+version not in cached_tags: cached_tags.append('version-'+version) @@ -1445,7 +1467,8 @@ def _run(self, i): elif pip_version_max != '': pip_version_string = '<='+pip_version_max - env['CM_TMP_PIP_VERSION_STRING'] = pip_version_string + r = _update_env(env, 'CM_TMP_PIP_VERSION_STRING', pip_version_string) + if r['return']>0: return r if pip_version_string != '': logging.debug(recursion_spaces+' # potential PIP version string (if needed): '+pip_version_string) @@ -4547,9 +4570,12 @@ def prepare_and_run_script_with_postprocessing(i, postprocess="postprocess"): path = '"' + path + '"' cur_dir = os.getcwd() - - env['CM_TMP_CURRENT_SCRIPT_PATH'] = path - env['CM_TMP_CURRENT_SCRIPT_WORK_PATH'] = cur_dir + + r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_PATH', path) + if r['return']>0: return r + + r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_WORK_PATH', cur_dir) + if r['return']>0: return r # Record state if tmp_file_state != '': From 4f6ac44f080c0bcdc3561bfba8fa84e99bd1dc94 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 18 Sep 2024 00:06:53 +0530 Subject: [PATCH 05/16] avoided unnecessary env updation --- automation/script/module.py | 50 +++++++++++++------------------------ 1 file changed, 17 insertions(+), 33 deletions(-) diff --git a/automation/script/module.py b/automation/script/module.py index 9094b18a95..ad5a22f4ea 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -347,9 +347,7 @@ def _run(self, i): fake_run = i.get('fake_run', False) fake_run = i.get('fake_run', False) if 'fake_run' in i else i.get('prepare', False) - if fake_run: - r = _update_env(env, 'CM_TMP_FAKE_RUN', 'yes') - if r['return']>0: return r + if fake_run: env['CM_TMP_FAKE_RUN']='yes' debug_uid = i.get('debug_uid', '') if debug_uid!='': @@ -357,16 +355,12 @@ def _run(self, i): if r['return']>0: return r fake_deps = i.get('fake_deps', False) - if fake_deps: - r = _update_env(env, 'CM_TMP_FAKE_DEPS', 'yes') - if r['return']>0: return r + if fake_deps: env['CM_TMP_FAKE_DEPS']='yes' if str(i.get('skip_sys_utils', '')).lower() in ['true', 'yes']: - r = _update_env(env, 'CM_SKIP_SYS_UTILS', 'yes') - if r['return']>0: return r + env['CM_SKIP_SYS_UTILS']='yes' if str(i.get('skip_sudo', '')).lower() in ['true', 'yes']: - r = _update_env(env, 'CM_TMP_SKIP_SUDO', 'yes') - if r['return']>0: return r + env['CM_TMP_SKIP_SUDO']='yes' run_state = i.get('run_state', self.run_state) if not run_state.get('version_info', []): @@ -387,16 +381,14 @@ def _run(self, i): if silent: if 'verbose' in i: del(i['verbose']) if 'v' in i: del(i['v']) - r = _update_env(env, 'CM_TMP_SILENT', 'yes') - if r['return']>0: return r + env['CM_TMP_SILENT']='yes' run_state['tmp_silent']=True if 'verbose' in i: verbose=i['verbose'] elif 'v' in i: verbose=i['v'] if verbose: - r = _update_env(env, 'CM_VERBOSE', 'yes') - if r['return']>0: return r + env['CM_VERBOSE']='yes' run_state['tmp_verbose']=True logging.getLogger().setLevel(logging.DEBUG) @@ -432,15 +424,11 @@ def _run(self, i): # Check if quiet mode quiet = i.get('quiet', False) if 'quiet' in i else (env.get('CM_QUIET','').lower() == 'yes') - if quiet: - r = _update_env(env, 'CM_QUIET', 'yes') - if r['return']>0: return r + if quiet: env['CM_QUIET'] = 'yes' skip_remembered_selections = i.get('skip_remembered_selections', False) if 'skip_remembered_selections' in i \ else (env.get('CM_SKIP_REMEMBERED_SELECTIONS','').lower() == 'yes') - if skip_remembered_selections: - r = _update_env(env, 'CM_SKIP_REMEMBERED_SELECTIONS', 'yes') - if r['return']>0: return r + if skip_remembered_selections: env['CM_SKIP_REMEMBERED_SELECTIONS'] = 'yes' # Prepare debug info parsed_script = i.get('parsed_artifact') @@ -478,15 +466,16 @@ def _run(self, i): for key in self.input_flags_converted_to_tmp_env: value = i.get(key, '').strip() if value != '': - r = _update_env(env, 'CM_TMP_' + key.upper(), value) - if r['return']>0: return r + env['CM_TMP_' + key.upper()] = value for key in self.input_flags_converted_to_env: value = i.get(key, '') if type(value)==str: value=value.strip() if value != '': - r = _update_env(env, 'CM_' + key.upper(), value) - if r['return']>0: return r + env['CM_' + key.upper()] = value + + r = update_env_with_values(env) + if r['return']>0: return r ############################################################################################################ @@ -776,11 +765,8 @@ def _run(self, i): if script_artifact.repo_meta.get('prefix', '') != '': script_repo_path_with_prefix = os.path.join(script_repo_path, script_artifact.repo_meta['prefix']) - r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_REPO_PATH', script_repo_path) - if r['return']>0: return r - - r = _update_env(env, 'CM_TMP_CURRENT_SCRIPT_REPO_PATH_WITH_PREFIX', script_repo_path_with_prefix) - if r['return']>0: return r + env['CM_TMP_CURRENT_SCRIPT_REPO_PATH'] = script_repo_path + env['CM_TMP_CURRENT_SCRIPT_REPO_PATH_WITH_PREFIX'] = script_repo_path_with_prefix # Check if has --help if i.get('help',False): @@ -991,8 +977,7 @@ def _run(self, i): elif str(state['docker'].get('real_run', True)).lower() in ['false', '0', 'no']: logging.info(recursion_spaces+' - Doing fake run for script::{} as we are inside docker'.format(found_script_artifact)) fake_run = True - r = _update_env(env, 'CM_TMP_FAKE_RUN', 'yes') - if r['return']>0: return r + env['CM_TMP_FAKE_RUN']='yes' @@ -1283,8 +1268,7 @@ def _run(self, i): found_cached = False remove_tmp_tag = True - r = _update_env(env, 'CM_RENEW_CACHE_ENTRY', 'yes') - if r['return']>0: return r + env['CM_RENEW_CACHE_ENTRY']='yes' # Prepare files to be cleaned clean_files = [self.tmp_file_run_state, From 57c5f24814b47a59de25b13f433d266407a84527 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 18 Sep 2024 00:09:39 +0530 Subject: [PATCH 06/16] Fixed intendations --- automation/script/module.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automation/script/module.py b/automation/script/module.py index ad5a22f4ea..94de87aa9a 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -358,7 +358,7 @@ def _run(self, i): if fake_deps: env['CM_TMP_FAKE_DEPS']='yes' if str(i.get('skip_sys_utils', '')).lower() in ['true', 'yes']: - env['CM_SKIP_SYS_UTILS']='yes' + env['CM_SKIP_SYS_UTILS']='yes' if str(i.get('skip_sudo', '')).lower() in ['true', 'yes']: env['CM_TMP_SKIP_SUDO']='yes' From 238f0ca0ecd4d9d0d721eb962932a78c49bf3cb4 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 18 Sep 2024 09:19:32 +0530 Subject: [PATCH 07/16] code clean From d4505afb837d1466ae30cb2323073825a0ca1834 Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Wed, 18 Sep 2024 09:40:57 +0530 Subject: [PATCH 08/16] test commit-fix indent --- automation/script/module.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/automation/script/module.py b/automation/script/module.py index 94de87aa9a..38d17a9136 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -387,10 +387,11 @@ def _run(self, i): if 'verbose' in i: verbose=i['verbose'] elif 'v' in i: verbose=i['v'] + if verbose: - env['CM_VERBOSE']='yes' - run_state['tmp_verbose']=True logging.getLogger().setLevel(logging.DEBUG) + run_state['tmp_verbose'] = True + env['CM_VERBOSE'] = 'yes' print_deps = i.get('print_deps', False) From 0f377b345dc5ee9f21b33060f43cb412174e977d Mon Sep 17 00:00:00 2001 From: anandhu-eng Date: Wed, 18 Sep 2024 09:46:50 +0530 Subject: [PATCH 09/16] Revert "test commit-fix indent" This reverts commit d4505afb837d1466ae30cb2323073825a0ca1834. --- automation/script/module.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/automation/script/module.py b/automation/script/module.py index 38d17a9136..94de87aa9a 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -387,11 +387,10 @@ def _run(self, i): if 'verbose' in i: verbose=i['verbose'] elif 'v' in i: verbose=i['v'] - if verbose: + env['CM_VERBOSE']='yes' + run_state['tmp_verbose']=True logging.getLogger().setLevel(logging.DEBUG) - run_state['tmp_verbose'] = True - env['CM_VERBOSE'] = 'yes' print_deps = i.get('print_deps', False) From e18f64c7417d530d4fffe641bc166ab7f45fa3a0 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 18 Sep 2024 12:01:57 +0530 Subject: [PATCH 10/16] added error flag if no files are transfered --- script/download-file/customize.py | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/script/download-file/customize.py b/script/download-file/customize.py index 3a1921ebf0..becf4ab420 100644 --- a/script/download-file/customize.py +++ b/script/download-file/customize.py @@ -168,19 +168,9 @@ def preprocess(i): # have to modify the variable from url to temp_url if it is going to be used anywhere after this point url = url.replace("%", "%%") temp_download_file = env['CM_DOWNLOAD_FILENAME'].replace("%", "%%") - env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), temp_download_file)}{q} -P" + env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), temp_download_file)}{q} -P --error-on-no-transfer" else: - env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), env['CM_DOWNLOAD_FILENAME'])}{q} -P" - for i in range(1,5): - url = env.get('CM_DOWNLOAD_URL'+str(i),'') - if url == '': - break - if env["CM_HOST_OS_TYPE"] == "windows": - url = url.replace("%", "%%") - temp_download_file = env['CM_DOWNLOAD_FILENAME'].replace("%", "%%") - env['CM_DOWNLOAD_CMD'] += f" || ((rm -f {env['CM_DOWNLOAD_FILENAME']} || true) && rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), temp_download_file)}{q} -P)" - else: - env['CM_DOWNLOAD_CMD'] += f" || ((rm -f {env['CM_DOWNLOAD_FILENAME']} || true) && rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), env['CM_DOWNLOAD_FILENAME'])}{q} -P)" + env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), env['CM_DOWNLOAD_FILENAME'])}{q} -P --error-on-no-transfer" filename = env['CM_DOWNLOAD_FILENAME'] env['CM_DOWNLOAD_DOWNLOADED_FILENAME'] = filename From 7a08bfe45a26138f1646a51df2e6702fb872a3ab Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Wed, 18 Sep 2024 12:02:48 +0530 Subject: [PATCH 11/16] remove file only if PRE_DOWNLOAD_CLEAN env is set --- script/download-file/run.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/script/download-file/run.sh b/script/download-file/run.sh index 91f5428c9d..8fdefc7502 100644 --- a/script/download-file/run.sh +++ b/script/download-file/run.sh @@ -35,8 +35,10 @@ fi if [[ ${require_download} == "1" ]]; then echo "" - echo ${CM_PRE_DOWNLOAD_CLEAN_CMD} - ${CM_PRE_DOWNLOAD_CLEAN_CMD} + if [ -e "${CM_PRE_DOWNLOAD_CLEAN}" ]; then + echo ${CM_PRE_DOWNLOAD_CLEAN_CMD} + ${CM_PRE_DOWNLOAD_CLEAN_CMD} + fi echo "" echo "${CM_DOWNLOAD_CMD}" From 45b6981fa39b85a3a3ac5dcb9df4d05007ae17c4 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 18 Sep 2024 17:34:42 +0530 Subject: [PATCH 12/16] Fix typo in Nvidia mlperf inference app --- script/app-mlperf-inference-nvidia/_cm.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/app-mlperf-inference-nvidia/_cm.yaml b/script/app-mlperf-inference-nvidia/_cm.yaml index 6a1d6bdf46..390bde0ce6 100644 --- a/script/app-mlperf-inference-nvidia/_cm.yaml +++ b/script/app-mlperf-inference-nvidia/_cm.yaml @@ -783,7 +783,7 @@ variations: CM_MLPERF_NVIDIA_HARNESS_MAXN: yes preprocess-data: - alias: preprocess-data + alias: preprocess_data preprocess_data: group: run-mode From 18a55b85191cdbdf521d7964fbcb95956ad7b7e0 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 18 Sep 2024 15:03:13 +0100 Subject: [PATCH 13/16] Added get-rclone-config script for mlc-inference #172 --- script/get-rclone-config/_cm.yaml | 13 +++++++++++++ script/get-rclone-config/customize.py | 25 +++++++++++++++++++++++++ script/get-rclone-config/run.bat | 1 + script/get-rclone-config/run.sh | 17 +++++++++++++++++ 4 files changed, 56 insertions(+) create mode 100644 script/get-rclone-config/_cm.yaml create mode 100644 script/get-rclone-config/customize.py create mode 100644 script/get-rclone-config/run.bat create mode 100644 script/get-rclone-config/run.sh diff --git a/script/get-rclone-config/_cm.yaml b/script/get-rclone-config/_cm.yaml new file mode 100644 index 0000000000..a8fa322185 --- /dev/null +++ b/script/get-rclone-config/_cm.yaml @@ -0,0 +1,13 @@ +alias: get-rclone-config +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: false #keeping cache off as rerunning the command is safe +can_force_cache: true +tags: +- get +- rclone-config +uid: 6c59ddbc6cd046e3 +variations: + mlc-inference: + env: + CM_RCLONE_CONFIG_CMD: 'rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com' diff --git a/script/get-rclone-config/customize.py b/script/get-rclone-config/customize.py new file mode 100644 index 0000000000..92ac95147d --- /dev/null +++ b/script/get-rclone-config/customize.py @@ -0,0 +1,25 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + env = i['env'] + + meta = i['meta'] + + automation = i['automation'] + + quiet = (env.get('CM_QUIET', False) == 'yes') + + if env.get('CM_RCLONE_CONFIG_CMD', '') != '': + env['CM_RUN_CMD'] = env['CM_RCLONE_CONFIG_CMD'] + + return {'return':0} + +def postprocess(i): + + env = i['env'] + + return {'return':0} diff --git a/script/get-rclone-config/run.bat b/script/get-rclone-config/run.bat new file mode 100644 index 0000000000..648302ca71 --- /dev/null +++ b/script/get-rclone-config/run.bat @@ -0,0 +1 @@ +rem native script diff --git a/script/get-rclone-config/run.sh b/script/get-rclone-config/run.sh new file mode 100644 index 0000000000..4c23c380ea --- /dev/null +++ b/script/get-rclone-config/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +#CM Script location: ${CM_TMP_CURRENT_SCRIPT_PATH} + +#To export any variable +#echo "VARIABLE_NAME=VARIABLE_VALUE" >>tmp-run-env.out + +#${CM_PYTHON_BIN_WITH_PATH} contains the path to python binary if "get,python" is added as a dependency + +echo "Running: " +echo "${CM_RUN_CMD}" +echo "" + +if [[ ${CM_FAKE_RUN} != "yes" ]]; then + eval "${CM_RUN_CMD}" + test $? -eq 0 || exit 1 +fi From 96a5b2d4970868b53292cdae4f1158d72806c721 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 18 Sep 2024 15:17:18 +0100 Subject: [PATCH 14/16] Uses get-rclone-config script for MLC-inference configs --- script/download-file/_cm.json | 13 +++++++++++++ script/download-file/customize.py | 2 +- script/get-ml-model-dlrm-terabyte/_cm.json | 2 +- script/get-ml-model-gptj/_cm.json | 2 +- script/get-ml-model-stable-diffusion/_cm.json | 2 +- script/get-preprocessed-dataset-criteo/_cm.json | 2 +- script/get-preprocessed-dataset-openorca/_cm.json | 2 +- 7 files changed, 19 insertions(+), 6 deletions(-) diff --git a/script/download-file/_cm.json b/script/download-file/_cm.json index 259a7ed1b4..f9b2825643 100644 --- a/script/download-file/_cm.json +++ b/script/download-file/_cm.json @@ -83,6 +83,19 @@ "deps": [ { "tags": "get,rclone" + }, + { + "tags": "get,rclone-config", + "update_tags_from_env_with_prefix": { + "_": [ + "CM_RCLONE_CONFIG_NAME" + ] + }, + "enable_if_env": { + "CM_RCLONE_CONFIG_NAME": [ + "on" + ] + } } ], "env": { diff --git a/script/download-file/customize.py b/script/download-file/customize.py index becf4ab420..c400de4543 100644 --- a/script/download-file/customize.py +++ b/script/download-file/customize.py @@ -159,7 +159,7 @@ def preprocess(i): env['CM_DOWNLOAD_CMD'] += f" || ((rm -f {env['CM_DOWNLOAD_FILENAME']} || true) && gdown {extra_download_options} {url})" elif tool == "rclone": - if env.get('CM_RCLONE_CONFIG_CMD', '') != '': + if env.get('CM_RCLONE_CONFIG_CMD', '') != '': #keeping this for backward compatibility. Ideally should be done via get,rclone-config script env['CM_DOWNLOAD_CONFIG_CMD'] = env['CM_RCLONE_CONFIG_CMD'] rclone_copy_using = env.get('CM_RCLONE_COPY_USING', 'sync') if rclone_copy_using == "sync": diff --git a/script/get-ml-model-dlrm-terabyte/_cm.json b/script/get-ml-model-dlrm-terabyte/_cm.json index 07ef945d1f..553808932d 100644 --- a/script/get-ml-model-dlrm-terabyte/_cm.json +++ b/script/get-ml-model-dlrm-terabyte/_cm.json @@ -101,7 +101,7 @@ }, "pytorch,fp32,weight_sharded,rclone": { "env": { - "CM_RCLONE_CONFIG_CMD": "rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com", + "CM_RCLONE_CONFIG_NAME": "mlc-inference", "CM_PACKAGE_URL": "mlc-inference:mlcommons-inference-wg-public/model_weights" } }, diff --git a/script/get-ml-model-gptj/_cm.json b/script/get-ml-model-gptj/_cm.json index 38629a793b..2b9c67e62a 100644 --- a/script/get-ml-model-gptj/_cm.json +++ b/script/get-ml-model-gptj/_cm.json @@ -87,7 +87,7 @@ "CM_UNZIP": "yes", "CM_DOWNLOAD_CHECKSUM_NOT_USED": "e677e28aaf03da84584bb3073b7ee315", "CM_PACKAGE_URL": "https://cloud.mlcommons.org/index.php/s/QAZ2oM94MkFtbQx/download", - "CM_RCLONE_CONFIG_CMD": "rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com", + "CM_RCLONE_CONFIG_NAME": "mlc-inference", "CM_RCLONE_URL": "mlc-inference:mlcommons-inference-wg-public/gpt-j" }, "required_disk_space": 22700 diff --git a/script/get-ml-model-stable-diffusion/_cm.json b/script/get-ml-model-stable-diffusion/_cm.json index 39390a193d..2e062a080a 100644 --- a/script/get-ml-model-stable-diffusion/_cm.json +++ b/script/get-ml-model-stable-diffusion/_cm.json @@ -160,7 +160,7 @@ "rclone": { "group": "download-tool", "env": { - "CM_RCLONE_CONFIG_CMD": "rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com", + "CM_RCLONE_CONFIG_NAME": "mlc-inference", "CM_DOWNLOAD_TOOL": "rclone" }, "adr": { diff --git a/script/get-preprocessed-dataset-criteo/_cm.json b/script/get-preprocessed-dataset-criteo/_cm.json index 337d93a102..e910d11282 100644 --- a/script/get-preprocessed-dataset-criteo/_cm.json +++ b/script/get-preprocessed-dataset-criteo/_cm.json @@ -191,7 +191,7 @@ ], "extra_cache_tags": "criteo,preprocessed,dataset", "env": { - "CM_RCLONE_CONFIG_CMD": "rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com", + "CM_RCLONE_CONFIG_NAME": "mlc-inference", "CM_RCLONE_URL": "mlc-inference:mlcommons-inference-wg-public/dlrm_preprocessed", "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_DATASET_PREPROCESSED_PATH", "CM_EXTRACT_FINAL_ENV_NAME": "CM_DATASET_PREPROCESSED_PATH", diff --git a/script/get-preprocessed-dataset-openorca/_cm.json b/script/get-preprocessed-dataset-openorca/_cm.json index 18317961f4..bce99bf63e 100644 --- a/script/get-preprocessed-dataset-openorca/_cm.json +++ b/script/get-preprocessed-dataset-openorca/_cm.json @@ -143,7 +143,6 @@ "mlcommons": { "env": { "CM_DATASET_PREPROCESSED_BY_MLC": "yes", - "CM_RCLONE_CONFIG_CMD": "rclone config create mlc-inference s3 provider=Cloudflare access_key_id=f65ba5eef400db161ea49967de89f47b secret_access_key=fbea333914c292b854f14d3fe232bad6c5407bf0ab1bebf78833c2b359bdfd2b endpoint=https://c2686074cb2caf5cbaf6d134bdba8b47.r2.cloudflarestorage.com", "CM_RCLONE_URL": "mlc-inference:mlcommons-inference-wg-public/open_orca" }, "deps": [ @@ -152,6 +151,7 @@ "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_OPENORCA_PREPROCESSED_ROOT", "CM_EXTRACT_FINAL_ENV_NAME": "CM_OPENORCA_PREPROCESSED_ROOT", "CM_EXTRACT_TO_FOLDER": "openorca-preprocessed" + "CM_RCLONE_CONFIG_NAME": "mlc-inference" }, "tags": "download-and-extract,_rclone", "update_tags_from_env_with_prefix": { From f724f597a4d5f711e51ccc7da5147a6b2158d58c Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 18 Sep 2024 15:19:47 +0100 Subject: [PATCH 15/16] Uses get-rclone-config script for MLC-inference configs --- script/get-preprocessed-dataset-openorca/_cm.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/get-preprocessed-dataset-openorca/_cm.json b/script/get-preprocessed-dataset-openorca/_cm.json index bce99bf63e..e3fa7ed110 100644 --- a/script/get-preprocessed-dataset-openorca/_cm.json +++ b/script/get-preprocessed-dataset-openorca/_cm.json @@ -150,7 +150,7 @@ "env": { "CM_DOWNLOAD_FINAL_ENV_NAME": "CM_OPENORCA_PREPROCESSED_ROOT", "CM_EXTRACT_FINAL_ENV_NAME": "CM_OPENORCA_PREPROCESSED_ROOT", - "CM_EXTRACT_TO_FOLDER": "openorca-preprocessed" + "CM_EXTRACT_TO_FOLDER": "openorca-preprocessed", "CM_RCLONE_CONFIG_NAME": "mlc-inference" }, "tags": "download-and-extract,_rclone", From 6c85bc21f6a3da407de8b39b1d6405d9264763b7 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Wed, 18 Sep 2024 15:46:26 +0100 Subject: [PATCH 16/16] Reduced the test_query_count for ABTF gh action --- .github/workflows/test-mlperf-inference-abtf-poc.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test-mlperf-inference-abtf-poc.yml b/.github/workflows/test-mlperf-inference-abtf-poc.yml index bb5ab71b20..ffd9142ab5 100644 --- a/.github/workflows/test-mlperf-inference-abtf-poc.yml +++ b/.github/workflows/test-mlperf-inference-abtf-poc.yml @@ -37,7 +37,7 @@ jobs: cm pull repo mlcommons@cm4abtf --branch=poc - name: Test MLPerf Inference ABTF POC using ${{ matrix.backend }} on docker run: | - cm run script --tags=run-abtf,inference,_poc-demo --test_query_count=5 --adr.compiler.tags=gcc --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 --quiet -v + cm run script --tags=run-abtf,inference,_poc-demo --test_query_count=2 --adr.compiler.tags=gcc --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 --quiet -v build2: runs-on: ${{ matrix.os }} @@ -62,7 +62,7 @@ jobs: cm pull repo mlcommons@cm4abtf --branch=poc - name: Test MLPerf Inference ABTF POC using ${{ matrix.backend }} on ${{ matrix.os }} run: | - cm run script --tags=run-abtf,inference,_poc-demo --adr.compiler.tags=gcc --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 --quiet -v + cm run script --tags=run-abtf,inference,_poc-demo --test_query_count=2 --adr.compiler.tags=gcc --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 --quiet -v build3: runs-on: ${{ matrix.os }} @@ -89,4 +89,4 @@ jobs: cm pull repo mlcommons@cm4abtf --branch=poc - name: Test MLPerf Inference ABTF POC using ${{ matrix.backend }} on ${{ matrix.os }} run: | - cm run script --tags=run-abtf,inference,_poc-demo --quiet --env.CM_MLPERF_LOADGEN_BUILD_FROM_SRC=off --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 -v + cm run script --tags=run-abtf,inference,_poc-demo --test_query_count=2 --quiet --env.CM_MLPERF_LOADGEN_BUILD_FROM_SRC=off --adr.cocoeval.version_max=1.5.7 --adr.cocoeval.version_max_usable=1.5.7 -v