diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000000..e69de29bb2 diff --git a/404.html b/404.html new file mode 100644 index 0000000000..19de19aa02 --- /dev/null +++ b/404.html @@ -0,0 +1,588 @@ + + + + + + + + + + + + + + + + + + + Collective Mind Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/_generator/api/conf.py b/_generator/api/conf.py new file mode 100644 index 0000000000..fdd33bae7b --- /dev/null +++ b/_generator/api/conf.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# + +import os +import sys +import re + +sys.path.insert(0, os.path.abspath('..')) +sys.path.insert(0, os.path.abspath('_ext')) +sys.path.insert(0, os.path.abspath('.')) + +# -- Project information ----------------------------------------------------- + +project = u'Collective Mind (CM)' +copyright = u'2022-2024 MLCommons' +author = u'Grigori Fursin' + +version='' +release=version + +edit_on_github_url='https://github.com' +edit_on_github_project = 'mlcommons/ck/tree/master/docs/' + +kernel_file=os.path.join('..', '..', '..', 'cm', 'cmind', '__init__.py') + +with open(kernel_file, encoding="utf-8") as f: + search = re.search(r'__version__ = ["\']([^"\']+)', f.read()) + + if not search: + raise ValueError("We can't find the Collective Mind version in cmind/__init__.py") + + version = search.group(1) + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', + 'sphinx.ext.coverage', + 'sphinx.ext.mathjax', + 'sphinx.ext.ifconfig', + 'sphinx.ext.viewcode', + 'sphinx.ext.githubpages', + 'recommonmark', + 'sphinx.ext.napoleon', + 'sphinx_markdown_tables' +] + +autosummary_generate = True + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = False +napoleon_include_init_with_doc = False +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = True +napoleon_use_admonition_for_notes = True +napoleon_use_admonition_for_references = True +napoleon_use_ivar = False +napoleon_use_param = False +napoleon_use_rtype = False + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = ['.rst', '.md', '.html'] +#source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +# language = + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +html_show_sourcelink = False + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = { + 'style_nav_header_background': 'black', + 'collapse_navigation': False, + 'style_external_links': True, + 'analytics_id': 'UA-5727962-14', # Provided by Google in your dashboard +} + +html_context = { + "display_github": True, + "github_user": "mlcommons", + "github_repo": "ck", + "github_version": "master/docs/", +} + +html_logo = 'static/logo.png' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'CMindDoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + 'papersize': 'a4paper', + + # The font size ('10pt', '11pt' or '12pt'). + # + 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'cmind.tex', u'Collective Mind', + u'Grigori Fursin', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'cmind.tex', u'Collective Mind', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'cmind.tex', u'Collective Mind', + author, 'CM', 'common-research-languge', 'reusability', 'automation', 'mlops2devops', 'mlops', 'devops'), +] + + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + + +# -- Extension configuration ------------------------------------------------- + +# -- Options for intersphinx extension --------------------------------------- + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'https://docs.python.org/': None} + +# -- Options for todo extension ---------------------------------------------- + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + +autoclass_content = 'both' + +def setup(app): +# app.add_stylesheet('css/custom.css') + app.add_css_file('css/custom.css') diff --git a/_generator/api/generate.bat b/_generator/api/generate.bat new file mode 100644 index 0000000000..0904630e0c --- /dev/null +++ b/_generator/api/generate.bat @@ -0,0 +1,16 @@ +@ECHO OFF + +rd /Q /S api + +sphinx-apidoc -f -T -o api ../../../cm/cmind + +cm replace_string_in_file utils --input=api/cmind.rst --string="cmind package" --replacement="CM python package API" + +sphinx-build -M html . api + +cd api/html +tar cf api.tar * +bzip2 api.tar + +move api.tar.bz2 ../.. + diff --git a/_generator/api/generate.sh b/_generator/api/generate.sh new file mode 100644 index 0000000000..8c92cefc4a --- /dev/null +++ b/_generator/api/generate.sh @@ -0,0 +1,11 @@ +rm -rf api + +sphinx-apidoc -H "CM python package API" -f -T -o api ../../../cm/cmind + +sphinx-build -M html . api + +cd api/html +tar cf api.tar * +bzip2 api.tar + +move api.tar.bz2 ../.. diff --git a/_generator/api/index.rst b/_generator/api/index.rst new file mode 100644 index 0000000000..d621a5d48f --- /dev/null +++ b/_generator/api/index.rst @@ -0,0 +1,13 @@ +.. Copyright 2021-2024 MLCommons + +.. CM documentation master file + +.. toctree:: + :maxdepth: 2 + + api/cmind + +Index +===== + +* :ref:`genindex` diff --git a/_generator/api/static/css/custom.css b/_generator/api/static/css/custom.css new file mode 100644 index 0000000000..528540242f --- /dev/null +++ b/_generator/api/static/css/custom.css @@ -0,0 +1,6 @@ +/* adds scrollbar to sidenav */ +.wy-side-scroll { + width: auto; + scrollbar-width:thin; + overflow-y: auto; +} diff --git a/_generator/api/static/logo.png b/_generator/api/static/logo.png new file mode 100644 index 0000000000..6a130f9e3d Binary files /dev/null and b/_generator/api/static/logo.png differ diff --git a/_generator/generate_api.bat b/_generator/generate_api.bat new file mode 100644 index 0000000000..a1c92473b3 --- /dev/null +++ b/_generator/generate_api.bat @@ -0,0 +1,4 @@ +pip install sphinx recommonmark sphinx_markdown_tables sphinx_rtd_theme + +cd api +generate.bat diff --git a/_generator/generate_api.sh b/_generator/generate_api.sh new file mode 100644 index 0000000000..85eb8d168a --- /dev/null +++ b/_generator/generate_api.sh @@ -0,0 +1,4 @@ +pip install sphinx recommonmark sphinx_markdown_tables sphinx_rtd_theme + +cd api +./generate.sh diff --git a/_generator/generate_toc.cmd b/_generator/generate_toc.cmd new file mode 100644 index 0000000000..e720d9afb9 --- /dev/null +++ b/_generator/generate_toc.cmd @@ -0,0 +1,38 @@ +cd ../tutorials + +cm create-toc-from-md utils --input=scc23-mlperf-inference-bert.md +cm create-toc-from-md utils --input=sc22-scc-mlperf.md +cm create-toc-from-md utils --input=sc22-scc-mlperf-part2.md +cm create-toc-from-md utils --input=sc22-scc-mlperf-part3.md +cm create-toc-from-md utils --input=mlperf-inference-submission.md +cm create-toc-from-md utils --input=concept.md +cm create-toc-from-md utils --input=reproduce-mlperf-tiny.md +cm create-toc-from-md utils --input=automate-mlperf-tiny.md +cm create-toc-from-md utils --input=reproduce-mlperf-training.md +cm create-toc-from-md utils --input=common-interface-to-reproduce-research-projects.md + +cd ../artifact-evaluation + +cm create-toc-from-md utils --input=faq.md + +cd ../ + +cm create-toc-from-md utils --input=taskforce.md +cm create-toc-from-md utils --input=installation.md +cm create-toc-from-md utils --input=faq.md +cm create-toc-from-md utils --input=README.md +cm create-toc-from-md utils --input=getting-started.md + +cd mlperf/inference + +cm create-toc-from-md utils --input=README.md + +cd ../../../ +cd cm-mlops/project/mlperf-inference-v3.0-submissions/docs +cm create-toc-from-md utils --input=crowd-benchmark-mlperf-bert-inference-cuda.md + +cd ../../../automation/script +cm create-toc-from-md utils --input=README-extra.md + +cd ../experiment +cm create-toc-from-md utils --input=README-extra.md diff --git a/_generator/list_automations.cmd b/_generator/list_automations.cmd new file mode 100644 index 0000000000..5aa98b84b2 --- /dev/null +++ b/_generator/list_automations.cmd @@ -0,0 +1 @@ +cm doc automation --output_dir=.. \ No newline at end of file diff --git a/_generator/list_scripts.cmd b/_generator/list_scripts.cmd new file mode 100644 index 0000000000..217c24f5d3 --- /dev/null +++ b/_generator/list_scripts.cmd @@ -0,0 +1 @@ +cm doc script --output_dir=.. \ No newline at end of file diff --git a/_generator/list_scripts_test.cmd b/_generator/list_scripts_test.cmd new file mode 100644 index 0000000000..af6012a0be --- /dev/null +++ b/_generator/list_scripts_test.cmd @@ -0,0 +1 @@ +cm doc script app-loadgen-generic-python --output_dir=.. \ No newline at end of file diff --git a/archive/taskforce-2022/index.html b/archive/taskforce-2022/index.html new file mode 100644 index 0000000000..6ae9215444 --- /dev/null +++ b/archive/taskforce-2022/index.html @@ -0,0 +1,943 @@ + + + + + + + + + + + + + + + + + + + Taskforce 2022 - Collective Mind Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

[ Back to index ]

+

MLCommons Taskforce on Education and Reproducibility

+

Mission

+
    +
  • help you automate and validate your MLPerf inference benchmark submissions to the v3.0 round for any hardware target (deadline: March 3, 2023) - + join the related Discord channel;
  • +
  • enable faster innovation while adapting to the world of rapidly evolving software, hardware, + and data by encoding everyone’s knowledge in a form of + portable, iteroperable and customizable automation recipes + reusable across the community;
  • +
  • modularize AI and ML Systems by decomposing them into above automation recipes + using the MLCommons CK2 automation meta-framework (aka CM);
  • +
  • automate benchmarking, design space exploration and optimization of AI and ML Systems across diverse software and hardware stacks;
  • +
  • help the community reproduce MLPerf benchmarks, prepare their own submissions and deploy Pareto-optimal ML/AI systems in the real world;
  • +
  • support student competitions, reproducibility initiatives and artifact evaluation at ML and Systems conferences using the rigorous MLPerf methodology and the MLCommons automation meta-framework.
  • +
+

Co-chairs and tech leads

+ +

Discord server

+ +

Meeting notes and news

+ +

Conf-calls

+

Following our successful community submission to MLPerf inference v3.0, +we will set up new weekly conf-calls shortly - please stay tuned for more details!

+

Please add your topics for discussion in the meeting notes +or via GitHub tickets.

+

Mailing list

+

Please join our mailing list here.

+

GUI for MLPerf inference

+ +

On-going projects

+

See our R&D roadmap for Q4 2022 and Q1 2023

+
    +
  • Modularize MLPerf benchmarks and make it easier to run, optimize, customize and reproduce them across rapidly evolving software, hardware and data.
  • +
  • Implement and enhance cross-platform CM scripts to make MLOps and DevOps more interoperable, reusable, portable, deterministic and reproducible.
  • +
  • Lower the barrier of entry for new MLPerf submitters and reduce their associated costs.
  • +
  • Develop universal, modular and portable benchmarking workflow that can run on any software/hardware stack from the cloud to embedded devices.
  • +
  • Automate design space exploration and optimization of the whole ML/SW/HW stack to trade off performance, accuracy, energy, size and costs.
  • +
  • Automate submission of Pareto-efficient configurations to MLPerf.
  • +
  • Help end-users of ML Systems visualize all MLPerf results, reproduce them and deploy Pareto-optimal ML/SW/HW stacks in production.
  • +
+

Purpose

+

MLCommons is a non-profit consortium of 50+ companies that was originally created +to develop a common, reproducible and fair benchmarking methodology for new AI and ML hardware.

+

MLCommons has developed an open-source reusable module called loadgen +that efficiently and fairly measures the performance of inference systems. +It generates traffic for scenarios that were formulated by a diverse set of experts from MLCommons +to emulate the workloads seen in mobile devices, autonomous vehicles, robotics, and cloud-based setups.

+

MLCommons has also prepared several reference ML tasks, models and datasets +for vision, recommendation, language processing and speech recognition +to let companies benchmark and compare their new hardware in terms of accuracy, latency, throughput and energy +in a reproducible way twice a year.

+

The first goal of this open automation and reproducibility taskforce is to +develop a light-weight and open-source automation meta-framework +that can make MLOps and DevOps more interoperable, reusable, portable, +deterministic and reproducible.

+

We then use this automation meta-framework to develop plug&play workflows +for the MLPerf benchmarks to make it easier for the newcomers to run them +across diverse hardware, software and data and automatically plug in +their own ML tasks, models, data sets, engines, libraries and tools.

+

Another goal is to use these portable MLPerf workflows to help students, researchers and +engineers participate in crowd-benchmarking and exploration of the design space tradeoffs +(accuracy, latency, throughput, energy, size, etc.) of their ML Systems from the cloud to the +edge using the mature MLPerf methodology while automating the submission +of their Pareto-efficient configurations to the open division of the MLPerf +inference benchmark.

+

The final goal is to help end-users reproduce MLPerf results +and deploy the most suitable ML/SW/HW stacks in production +based on their requirements and constraints.

+

Technology

+

This MLCommons taskforce is developing an open-source and technology-neutral +Collective Mind meta-framework (CM) +to modularize ML Systems and automate their benchmarking, optimization +and design space exploration across continuously changing software, hardware and data.

+

CM is the second generation of the MLCommons CK workflow automation framework +that was originally developed to make it easier to reproduce research papers and validate them in the real world.

+

As a proof-of-concept, this technology was successfully used to automate +MLPerf benchmarking and submissions +from Qualcomm, HPE, Dell, Lenovo, dividiti, Krai, the cTuning foundation and OctoML. +For example, it was used and extended by Arjun Suresh +with several other engineers to automate the record-breaking MLPerf inference benchmark submission for Qualcomm AI 100 devices.

+

The goal of this group is to help users automate all the steps to prepare and run MLPerf benchmarks +across any ML models, data sets, frameworks, compilers and hardware +using the MLCommons CM framework.

+

Here is an example of current manual and error-prone MLPerf benchmark preparation steps:

+

+

Here is the concept of CM-based automated workflows:

+

+

We have finished prototyping the new CM framework in summer 2022 based on the feedback of CK users +and successfully used it to modularize MLPerf and automate the submission of benchmarking results to the MLPerf inference v2.1. +See this tutorial for more details.

+

We continue developing CM as an open-source educational toolkit +to help the community learn how to modularize, crowd-benchmark, optimize and deploy +Pareto-efficient ML Systems based on the mature MLPerf methodology and portable CM scripts - +please check the deliverables section to keep track of our community developments +and do not hesitate to join this community effort!

+

Agenda

+

See our R&D roadmap for Q4 2022 and Q1 2023

+

2022

+
    +
  • Prototype the new CM toolkit to modularize AI&ML systems based on the original CK concepts:
  • +
  • DONE - GitHub .
  • +
  • Decompose MLPerf inference benchmark into portable, reusable and plug&play CM components:
  • +
  • DONE for image classification and object detection - GitHub.
  • +
  • Demonstrate CM-based automation to submit results to MLPerf inference:
  • +
  • DONE - showcased CM automation concept for MLPerf inference v2.1 submission.
  • +
  • Prepare CM-based MLPerf modularization and automation tutorial:
  • +
  • DONE - link
  • +
  • Add tests to cover critical functionality of portable CM scripts for MLPerf:
  • +
  • DONE - link
  • +
  • Improve CM workflow/script automaton to modularize ML Systems:
  • +
  • DONE - link
  • +
  • Prototype CM-based modularization of the MLPerf inference benchmark with C++ back-end and loadgen + to automatically plug in different ML models, data sets, frameworks, SDKs, compilers and tools + and automatically run it across different hardware and run-times:
  • +
  • Ongoing internship of Thomas Zhu from Oxford University
  • +
  • Prototype CM-based automation for TinyMLPerf:
  • +
  • Ongoing
  • +
  • Add basic TVM back-end to the latest MLPerf inference repo:
  • +
  • Ongoing
  • +
  • Convert outdated CK components for MLPerf and MLOps into the new CM format
  • +
  • Ongoing
  • +
  • Develop a methodology to create modular containers and MLCommons MLCubes that contain CM components to run the MLPerf inference benchmarks out of the box:
  • +
  • Ongoing
  • +
  • Prototype CM integration with power infrastructure (power WG) and logging infrastructure (infra WG):
  • +
  • TBD
  • +
  • Process feedback from the community about CM-based modularization and crowd-benchmarking of MLPerf:
  • +
  • TBD
  • +
+

2023

+
    +
  • Upload all stable CM components for MLPerf to Zenodo or any other permanent archive to ensure the stability of all CM workflows for MLPerf and modular ML Systems.
  • +
  • Develop CM automation for community crowd-benchmarking of the MLPerf benchmarks across different models, data sets, frameworks, compilers, run-times and platforms.
  • +
  • Develop a customizable dashboard to visualize and analyze all MLPerf crowd-benchmarking results based on these examples from the legacy CK prototype: + 1, + 2.
  • +
  • Share MLPerf benchmarking results in a database compatible with FAIR principles (mandated by the funding agencies in the USA and Europe) -- + ideally, eventually, the MLCommons general datastore.
  • +
  • Connect CM-based MLPerf inference submission system with our reproducibility initiatives at ML and Systems conferences. + Organize open ML/SW/HW optimization and co-design tournaments using CM and the MLPerf methodology + based on our ACM ASPLOS-REQUEST'18 proof-of-concept.
  • +
  • Enable automatic submission of the Pareto-efficient crowd-benchmarking results (performance/accuracy/energy/size trade-off - + see this example from the legacy CK prototype) + to MLPerf on behalf of MLCommons.
  • +
  • Share deployable MLPerf inference containers with Pareto-efficient ML/SW/HW stacks.
  • +
+

Resources

+ +

Acknowledgments

+

This project is supported by MLCommons, OctoML +and many great contributors.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/artifact-evaluation/checklist/index.html b/artifact-evaluation/checklist/index.html new file mode 100644 index 0000000000..e6cfcfd5b0 --- /dev/null +++ b/artifact-evaluation/checklist/index.html @@ -0,0 +1,608 @@ + + + + + + + + + + + + + + + + + + + Checklist - Collective Mind Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Checklist

+ +

Moved to https://github.com/ctuning/artifact-evaluation/blob/master/docs/checklist.md

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/artifact-evaluation/faq/index.html b/artifact-evaluation/faq/index.html new file mode 100644 index 0000000000..7877567fca --- /dev/null +++ b/artifact-evaluation/faq/index.html @@ -0,0 +1,608 @@ + + + + + + + + + + + + + + + + + + + Faq - Collective Mind Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Faq

+ +

Moved to https://github.com/ctuning/artifact-evaluation/blob/master/docs/faq.md

+ + + + + + + + + + + + + +
+
+ + + + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/artifact-evaluation/hotcrp-config/HotCRP_Settings__Basics__PPoPP'19_AE.pdf b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Basics__PPoPP'19_AE.pdf new file mode 100644 index 0000000000..7ccdb90774 Binary files /dev/null and b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Basics__PPoPP'19_AE.pdf differ diff --git a/artifact-evaluation/hotcrp-config/HotCRP_Settings__Decisions__PPoPP'19_AE.pdf b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Decisions__PPoPP'19_AE.pdf new file mode 100644 index 0000000000..b98d0e2022 Binary files /dev/null and b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Decisions__PPoPP'19_AE.pdf differ diff --git a/artifact-evaluation/hotcrp-config/HotCRP_Settings__Messages__PPoPP'19_AE.pdf b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Messages__PPoPP'19_AE.pdf new file mode 100644 index 0000000000..aa70b543a3 Binary files /dev/null and b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Messages__PPoPP'19_AE.pdf differ diff --git a/artifact-evaluation/hotcrp-config/HotCRP_Settings__Review_form__PPoPP'19_AE.pdf b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Review_form__PPoPP'19_AE.pdf new file mode 100644 index 0000000000..620de78cb1 Binary files /dev/null and b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Review_form__PPoPP'19_AE.pdf differ diff --git a/artifact-evaluation/hotcrp-config/HotCRP_Settings__Reviews__PPoPP'19_AE.pdf b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Reviews__PPoPP'19_AE.pdf new file mode 100644 index 0000000000..70846a5d66 Binary files /dev/null and b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Reviews__PPoPP'19_AE.pdf differ diff --git a/artifact-evaluation/hotcrp-config/HotCRP_Settings__Submission_form__PPoPP'19_AE.pdf b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Submission_form__PPoPP'19_AE.pdf new file mode 100644 index 0000000000..9369b49dca Binary files /dev/null and b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Submission_form__PPoPP'19_AE.pdf differ diff --git a/artifact-evaluation/hotcrp-config/HotCRP_Settings__Submissions__PPoPP'19_AE.pdf b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Submissions__PPoPP'19_AE.pdf new file mode 100644 index 0000000000..85e2a57e9d Binary files /dev/null and b/artifact-evaluation/hotcrp-config/HotCRP_Settings__Submissions__PPoPP'19_AE.pdf differ diff --git a/artifact-evaluation/hotcrp-config/hotcrp-config-acm-ieee-micro-2023-ae.json b/artifact-evaluation/hotcrp-config/hotcrp-config-acm-ieee-micro-2023-ae.json new file mode 100644 index 0000000000..5d77c3b6f6 --- /dev/null +++ b/artifact-evaluation/hotcrp-config/hotcrp-config-acm-ieee-micro-2023-ae.json @@ -0,0 +1,869 @@ +{ + "accepted_author_visibility": true, + "acm_c_abstract": "", + "acm_c_acronym": "", + "acm_c_conf_number": 0, + "acm_c_dates": "", + "acm_c_proc_class": "", + "acm_c_proc_title": "", + "acm_confid": "", + "acm_proceedingfirstpage": 1, + "acm_proceedingtag": "", + "author_visibility": "open", + "automatic_tag": [], + "badge": [ + { + "style": "black", + "tags": "" + }, + { + "style": "red", + "tags": "acm:artifacts-evaluated-functional" + }, + { + "style": "orange", + "tags": "acm:artifacts-evaluated-reusable" + }, + { + "style": "yellow", + "tags": "" + }, + { + "style": "green", + "tags": "acm:artifacts-available" + }, + { + "style": "blue", + "tags": "acm:results-validated-reproduced" + }, + { + "style": "purple", + "tags": "" + }, + { + "style": "gray", + "tags": "" + }, + { + "style": "white", + "tags": "" + }, + { + "style": "pink", + "tags": "" + } + ], + "comment_allow_always": false, + "comment_allow_author": true, + "comment_visibility_anonymous": false, + "conference_abbreviation": "MICRO 2023 Artifact Evaluation", + "conference_name": "Artifact Evaluation for the 56th IEEE\/ACM International Symposium on Microarchitecture", + "conference_url": "https:\/\/cTuning.org\/ae\/micro2023.html", + "conflict_description": "This includes past advisors and students, people with the same affiliation, and any recent (~2 years) coauthors and collaborators.", + "conflict_visibility": "if_authors_visible", + "decision": [ + { + "id": 1, + "name": "Artifact passed evaluation with at least one ACM badge", + "category": "accept" + }, + { + "id": -1, + "name": "Artifact did not pass evaluation", + "category": "reject" + } + ], + "decision_statistics_public": false, + "decision_visibility_author": "yes", + "decision_visibility_author_condition": "", + "decision_visibility_reviewer": "yes", + "draft_submission_early_visibility": false, + "email_default_cc": "suvinay@google.com, ksmurthy@google.com, grigori.fursin@ctuning.org", + "email_default_reply_to": "suvinay@google.com, ksmurthy@google.com, grigori.fursin@ctuning.org", + "final_done": "", + "final_edit_message": "Congratulations! The paper has been accepted. Update the paper’s final version here. {deadline} You may also edit paper contacts, allowing others to view reviews and make changes.", + "final_open": false, + "final_soft": "", + "format": [], + "home_message": "MICRO 2023 artifact evaluation website: cTuning.org\/ae\/micro2023.html<\/a>.", + "mailbody_requestreview": "", + "pc_warn_bulk_download": true, + "preference_instructions": "

A review preference is a small integer that indicates how much you want to review a submission. Positive numbers mean you want to review, negative numbers mean you don’t, and −100 means you think you have a conflict. −20 to 20 is a typical range for real preferences; multiple submissions can have the same preference. The automatic assignment algorithm attempts to assign reviews in descending preference order, using topic scores to break ties. Different users’ preference values are not compared and need not use the same scale.<\/p>\n\n

The list shows all submissions and their topics (high interest topics<\/span>, low interest topics<\/span>). “Topic score” summarizes your interest in the submission’s topics. Select a column heading to sort by that column. Enter preferences in the text boxes or on the paper pages. You may also upload preferences from a text file; see the “Download” and “Upload” links below the paper list.<\/p>", + "random_pids": true, + "response": [ + { + "id": 1, + "name": "", + "open": "", + "done": "", + "grace": 0, + "condition": "all", + "wordlimit": 500, + "truncate": false, + "instructions": "The authors’ response should address reviewer concerns and correct misunderstandings. Make it short and to the point; the conference deadline has passed. Try to stay within {wordlimit} words." + } + ], + "response_active": false, + "review": [ + { + "id": 3, + "name": "Full-Evaluation", + "soft": "", + "done": "", + "external_soft": "", + "external_done": "" + }, + { + "id": 2, + "name": "Kick-the-Tires", + "soft": "", + "done": "", + "external_soft": "", + "external_done": "" + } + ], + "review_blind": "blind", + "review_default_external_round": "", + "review_default_round": "Kick-the-Tires", + "review_identity_visibility_external": "after_review", + "review_identity_visibility_pc": false, + "review_open": false, + "review_proposal": "no", + "review_proposal_editable": "no", + "review_rating": "pc", + "review_self_assign": true, + "review_terms": "Please, use the following guidelines to review artifacts: cTuning.org\/ae\/reviewing.html<\/a>.", + "review_visibility_author": "yes", + "review_visibility_author_condition": "", + "review_visibility_author_tags": "", + "review_visibility_external": true, + "review_visibility_lead": true, + "review_visibility_pc": "assignment_complete", + "rf": [ + { + "id": "s04", + "name": "Evaluator expertise", + "order": 1, + "type": "radio", + "description": "", + "required": true, + "visibility": "au", + "condition": "all", + "values": [ + { + "id": 1, + "symbol": 1, + "name": "Some familiarity", + "order": 1 + }, + { + "id": 2, + "symbol": 2, + "name": "Knowledgeable", + "order": 2 + }, + { + "id": 3, + "symbol": 3, + "name": "Expert", + "order": 3 + } + ], + "scheme": "svr" + }, + { + "id": "s01", + "name": "Artifact publicly available?", + "order": 2, + "type": "radio", + "description": "The author-created artifacts relevant to this paper will receive an ACM \"artifact available\" badge only if<\/strong> they have been placed on a publicly accessible archival repository such as Zenodo, FigShare and Dryad. A DOI will be then assigned to their artifacts and must be provided in the Artifact Appendix!\n\n

Note: publisher repositories, institutional repositories or open commercial repositories are acceptable only if they have a declared plan to enable permanent accessibility! Personal web pages, GitHub, GitLab, BitBucket, Google Drive and DropBox are not acceptable for this purpose!<\/p>\n\n

Artifacts do not need to have been formally evaluated in order for an article to receive this badge. In addition, they need not be complete in the sense described above. They simply need to be relevant to the study and add value beyond the text in the article. Such artifacts could be something as simple as the data from which the figures are drawn, or as complex as a complete software system under study.<\/p>", + "required": true, + "visibility": "au", + "condition": "all", + "values": [ + { + "id": 1, + "symbol": 1, + "name": "Publicly available", + "order": 1 + }, + { + "id": 2, + "symbol": 2, + "name": "Not publicly available", + "order": 2 + } + ], + "scheme": "svr" + }, + { + "id": "s02", + "name": "Artifact functional?", + "order": 3, + "type": "radio", + "description": "