Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

v2.0.3 release #1182

Merged
merged 9 commits into from
Mar 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion cm-mlops/cfg/benchmark-hardware-compute/google-tpu.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
{
"uid": "b3be7ac9ef954f5a",
"name": "Google TPU",
"tags": "tpu,google"
"tags": "tpu,google",
"mlperf_inference_device": "tpu"
}
6 changes: 6 additions & 0 deletions cm-mlops/cfg/benchmark-hardware-compute/habana-gaudi.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"uid": "a42388a2a8cd412c",
"name": "Habana Gauidi 2",
"tags": "gaudi,habana",
"mlperf_inference_device": "gaudi"
}
1 change: 1 addition & 0 deletions cm-mlops/cfg/benchmark-list/mlperf-inference.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,4 @@ supported_compute:
- gpu,amd
- accelerator,acc,qualcomm,ai,100,ai-100
- tpu,google
- gaudi,habana
6 changes: 4 additions & 2 deletions cm-mlops/script/gui/playground_howtorun.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,7 @@ def page(st, params, action = ''):
############################################################################################
# Check if has customization
extra = {}
skip = False

script_tags = script_meta.get('tags_help','')
if script_tags =='':
Expand Down Expand Up @@ -265,11 +266,12 @@ def page(st, params, action = ''):
r = func(ii)
if r['return'] > 0 : return r

extra = r.get('extra',{})
extra = r.get('extra', {})
skip = r.get('skip', False)

############################################################################################
# Show official GUI
if script_path!='':
if script_path!='' and not skip:
import script

ii = {'st': st,
Expand Down
3 changes: 3 additions & 0 deletions cm-mlops/script/gui/script.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,6 +349,9 @@ def page(i):
if len(meta.get('docker',{}))>0:
run_via_docker = st.toggle('Use Docker', key='run_via_docker', value=False)

if run_via_docker:
st.markdown("*WARNING: CM automatically generates containers for a give script - it's a beta functionality - feel free to [test and provide feedback](https://discord.gg/JjWNWXKxwT)!*")

action = 'docker' if run_via_docker else 'run'
cli = 'cm {} script {} {}\n'.format(action, tags, flags)

Expand Down
55 changes: 43 additions & 12 deletions cm-mlops/script/run-mlperf-inference-app/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,6 +338,7 @@ def gui(i):
script_tags = i.get('script_tags', '')

compute_meta = i.get('compute_meta',{})
compute_tags = compute_meta.get('tags', [])
bench_meta = i.get('bench_meta',{})

compute_uid = compute_meta.get('uid','')
Expand All @@ -352,27 +353,40 @@ def gui(i):
inp = script_meta['input_description']

# Here we can update params
st.markdown('---')
st.markdown('**How would you like to run the MLPerf inference benchmark?**')


v = compute_meta.get('mlperf_inference_device')
if v!=None and v!='':
inp['device']['force'] = v

if v in ['tpu', 'gaudi']:
st.markdown('----')
st.markdown('**WARNING: unified CM workflow support for this hardware is pending - please [feel free to help](https://discord.gg/JjWNWXKxwT)!**')
return {'return':0, 'skip': True, 'end_html':end_html}

st.markdown('---')
st.markdown('**How would you like to run the MLPerf inference benchmark?**')

r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_device', 'desc':inp['device']})
device = r.get('value2')
inp['device']['force'] = device


if device == 'cpu':
inp['implementation']['choices']=['mlcommons-python', 'mlcommons-cpp', 'intel', 'ctuning-cpp-tflite']
inp['implementation']['default']='mlcommons-python'
inp['backend']['choices']=['onnxruntime','deepsparse','pytorch','tf','tvm-onnx']
inp['backend']['default']='onnxruntime'
if 'intel' in compute_tags:
inp['implementation']['default']='intel'
else:
inp['implementation']['default']='mlcommons-python'
inp['backend']['choices']=['onnxruntime','deepsparse','pytorch','tf','tvm-onnx']
inp['backend']['default']='onnxruntime'
elif device == 'rocm':
inp['implementation']['force']='mlcommons-python'
inp['precision']['choices']=['']
inp['precision']['force']=''
inp['backend']['force']='onnxruntime'
st.markdown('*WARNING: CM-MLPerf inference workflow was not tested thoroughly for AMD GPU - please feel free to test and improve!*')
elif device == 'qaic':
inp['implementation']['force']='qualcomm'
inp['precision']['force']=''
inp['backend']['force']='glow'


Expand Down Expand Up @@ -433,7 +447,7 @@ def gui(i):
inp['model']['choices'] = ['resnet50', 'retinanet']
st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-mlcommons-cpp)]*')
elif implementation == 'mlcommons-python':
inp['precision']['default']='float32'
inp['precision']['force']='float32'
if device == 'cuda':
inp['backend']['choices']=['onnxruntime','pytorch','tf']
inp['backend']['default'] = 'onnxruntime'
Expand All @@ -442,14 +456,14 @@ def gui(i):
inp['precision']['force']='float32'
inp['model']['force']='resnet50'
st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-ctuning-cpp-tflite)]*')

elif implementation == 'nvidia':
inp['backend']['force'] = 'tensorrt'
st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-nvidia)]*')
elif implementation == 'intel':
inp['model']['choices'] = ['bert-99', 'bert-99.9', 'gptj-99']
inp['model']['choices'] = ['bert-99', 'gptj-99']
inp['model']['default'] = 'bert-99'
inp['precision']['force'] = 'uint8'
inp['precision']['choices'] = ['int8', 'int4']
inp['precision']['default'] = 'int8'
inp['category']['force'] = 'datacenter'
inp['backend']['force'] = 'pytorch'
# st.markdown('*:red[Note: Intel implementation require extra CM command to build and run Docker container - you will run CM commands to run MLPerf benchmarks there!]*')
Expand All @@ -475,10 +489,14 @@ def gui(i):
if backend == 'deepsparse':
inp['model']['choices'] = ['resnet50', 'retinanet', 'bert-99', 'bert-99.9']
inp['model']['default'] = 'bert-99'
inp['precision']['choices'] = ['float32', 'int8']
inp['precision']['default'] = 'int8'
if 'force' in inp['precision']: del(inp['precision']['force'])



#############################################################################
# Model
r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_model', 'desc':inp['model']})
model = r.get('value2')
inp['model']['force'] = model
Expand Down Expand Up @@ -514,7 +532,20 @@ def gui(i):

if github_doc_model == '': github_doc_model = model

extra_notes_online = '[Extra notes online](https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference/{})\n'.format(github_doc_model)
model_cm_url='https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference/{}'.format(github_doc_model)
extra_notes_online = '[Extra notes online]({})\n'.format(model_cm_url)

st.markdown('*[CM GitHub docs for this model]({})*'.format(model_cm_url))

#############################################################################
# Precision
if implementation == 'intel':
if model == 'bert-99':
inp['precision']['force'] = 'int8'
elif model == 'gptj-99':
inp['precision']['force'] = 'int4'



r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_precision', 'desc':inp['precision']})
precision = r.get('value2')
Expand Down
5 changes: 5 additions & 0 deletions cm/CHANGES.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
## V2.0.3
- added support to handle broken CM repositories: https://github.com/mlcommons/ck/issues/1177
- added "cm checkout repo mlcommons@ck --branch=dev" to make it easier to switch branches
- added "cm import repo" to import repository in the current directory

## V2.0.2
- added support to update all CM Git repos in one go: "cm pull repo"
- added support to show extra info about CM Git repos: "cm show repo"
Expand Down
2 changes: 1 addition & 1 deletion cm/cmind/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "2.0.2"
__version__ = "2.0.3"

from cmind.core import access
from cmind.core import error
Expand Down
5 changes: 4 additions & 1 deletion cm/cmind/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,8 @@ def access(self, i, out = None):
elif not utils.is_cm_uid(xuid):
return {'return':1, 'error':'you must use CM UID after automation {} when using --common'.format(parsed_automation[0][0])}

automation_meta = {}

if automation != '' and not use_common_automation:
# If wildcards in automation, use the common one (usually for search across different automations)
# However, still need above "parse_automation" for proper search
Expand Down Expand Up @@ -458,7 +460,8 @@ def access(self, i, out = None):

if action in self.cfg['action_substitutions']:
action = self.cfg['action_substitutions'][action]

elif action in automation_meta.get('action_substitutions',{}):
action = automation_meta['action_substitutions'][action]

# Check if common automation and --help
if (use_common_automation or automation=='') and cm_help:
Expand Down
3 changes: 2 additions & 1 deletion cm/cmind/repo.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ def load(self):

r = utils.load_yaml_and_json(file_name_without_ext = full_path)
if r['return'] >0:
r['error']='CM repository is broken ({})'.format(r['error'])
r['error'] = 'CM repository is broken ({})'.format(r['error'])
r['return'] = 16
return r

self.meta = r['meta']
Expand Down
3 changes: 3 additions & 0 deletions cm/cmind/repo/automation/repo/_cm.json
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
{
"action_substitutions": {
"import":"ximport"
},
"alias": "repo",
"automation_alias": "automation",
"automation_uid": "bbeb15d8f0a944a4",
Expand Down
54 changes: 47 additions & 7 deletions cm/cmind/repo/automation/repo/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ def pull(self, i):
(pat) (str): Personal Access Token (if supported and url=='')
(branch) (str): Git branch
(checkout) (str): Git checkout
(checkout_only) (bool): only checkout existing repo
(depth) (int): Git depth
(desc) (str): brief repository description (1 line)
(prefix) (str): extra directory to keep CM artifacts
Expand All @@ -48,6 +49,8 @@ def pull(self, i):
prefix = i.get('prefix','')
pat = i.get('pat','')

checkout_only = i.get('checkout_only', False)

if url == '':
if alias != '':
url = self.cmind.cfg['repo_url_prefix']
Expand Down Expand Up @@ -80,7 +83,7 @@ def pull(self, i):

if url == '':
pull_repos = []

for repo in sorted(self.cmind.repos.lst, key = lambda x: x.meta.get('alias','')):
meta = repo.meta

Expand All @@ -89,7 +92,7 @@ def pull(self, i):
# Pick it up from the path

repo_path = repo.path

pull_repos.append({'alias': os.path.basename(repo_path),
'path_to_repo': repo_path})
else:
Expand Down Expand Up @@ -127,15 +130,23 @@ def pull(self, i):

# Prepare path to repo
repos = self.cmind.repos

r = repos.pull(alias = alias, url = url, branch = branch, checkout = checkout, console = console,
desc=desc, prefix=prefix, depth=depth, path_to_repo=path_to_repo)

r = repos.pull(alias = alias,
url = url,
branch = branch,
checkout = checkout,
console = console,
desc=desc,
prefix=prefix,
depth=depth,
path_to_repo=path_to_repo,
checkout_only=checkout_only)
if r['return']>0: return r

repo_meta = r['meta']

repo_metas[alias] = repo_meta

if len(pull_repos)>0 and self.cmind.use_index:
if console:
print (self.cmind.cfg['line'])
Expand All @@ -145,6 +156,28 @@ def pull(self, i):

return {'return':0, 'meta':repo_meta, 'metas': repo_metas}



############################################################
def checkout(self, i):
"""
Checkout repository

Args:
(branch) (str): branch name
(checkout) (str): checkout

See "pull" action

Returns:
See "pull" action
"""

i['checkout_only'] = True

return self.pull(i)


############################################################
def show(self, i):
"""
Expand All @@ -162,7 +195,6 @@ def show(self, i):
return self.search(i)



############################################################
def search(self, i):
"""
Expand Down Expand Up @@ -414,6 +446,14 @@ def delete(self, i):

return r

############################################################
def ximport(self, i):

if i.get('path','')!='':
i['here']=True

return self.init(i)

############################################################
def init(self, i):
"""
Expand Down
Loading
Loading