diff --git a/.azure-pipelines/Windows-CI.yml b/.azure-pipelines/Windows-CI.yml index d92768e1efac..4da30a485a06 100644 --- a/.azure-pipelines/Windows-CI.yml +++ b/.azure-pipelines/Windows-CI.yml @@ -1,4 +1,4 @@ -# Builds and tests ONNX MLIR +# Builds and tests ONNX-MLIR # Downloads MLIR artifact if it exists for the current LLVM commit, otherwise builds MLIR and publishes artifact trigger: diff --git a/.circleci/config.yml b/.circleci/config.yml index 1dee5ff06231..a34991de6304 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -33,7 +33,7 @@ jobs: paths: - llvm-project - run: - name: Install ONNX MLIR + name: Install ONNX-MLIR command: source onnx-mlir/utils/install-onnx-mlir.sh - run: name: Run End-To-End Tests diff --git a/CMakeLists.txt b/CMakeLists.txt index 323581a21e9e..17a8e11e0685 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,7 +5,7 @@ cmake_minimum_required(VERSION 3.13.4) project(onnx-mlir) -option(ONNX_MLIR_BUILD_TESTS "Build ONNX MLIR test executables. If OFF, just generate build targets." ON) +option(ONNX_MLIR_BUILD_TESTS "Build ONNX-MLIR test executables. If OFF, just generate build targets." ON) option(ONNX_MLIR_SUPPRESS_THIRD_PARTY_WARNINGS "Suppress warning in third_party code." ON) set(CMAKE_CXX_STANDARD 14) diff --git a/README.md b/README.md index 931bec597eb4..4a24f7ba397c 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

-# ONNX MLIR +# ONNX-MLIR The Open Neural Network Exchange implementation in MLIR (http://onnx.ai/onnx-mlir/). | System | Build Status | @@ -12,326 +12,49 @@ The Open Neural Network Exchange implementation in MLIR (http://onnx.ai/onnx-mli | amd64-Windows | [![Build Status](https://dev.azure.com/onnx-pipelines/onnx/_apis/build/status/MLIR-Windows-CI?branchName=main)](https://dev.azure.com/onnx-pipelines/onnx/_build/latest?definitionId=9&branchName=main) | | amd64-macOS | [![Build Status](https://github.com/onnx/onnx-mlir/workflows/Build%20x86%20onnx-mlir%20on%20macOS/badge.svg)](https://github.com/onnx/onnx-mlir/actions?query=workflow%3A%22Build+x86+onnx-mlir+on+macOS%22) | -## Prebuilt Containers -An easy way to get started with ONNX-MLIR is to use a prebuilt docker image. -These images are created as a result of a successful merge build on the trunk. -This means that the latest image represents the tip of the trunk. -Currently there are both Release and Debug mode images for `amd64`, `ppc64le` and `s390x` saved in Docker Hub as, respectively, [onnxmlirczar/onnx-mlir](https://hub.docker.com/r/onnxmlirczar/onnx-mlir) and [onnxmlirczar/onnx-mlir-dev](https://hub.docker.com/r/onnxmlirczar/onnx-mlir-dev). -To use one of these images either pull it directly from Docker Hub, launch a container and run an interactive bash shell in it, or use it as the base image in a dockerfile. -The onnx-mlir image just contains the built compiler and you can use it immediately to compile your model without any installation. A python convenience script is provided to allow you to run ONNX-MLIR inside a docker container as if running the ONNX-MLIR compiler directly on the host. For example, -``` -# docker/onnx-mlir.py --EmitLib mnist/model.onnx -505a5a6fb7d0: Pulling fs layer -505a5a6fb7d0: Verifying Checksum -505a5a6fb7d0: Download complete -505a5a6fb7d0: Pull complete -Shared library model.so has been compiled. -``` -The script will pull the onnx-mlir image if it's not available locally, mount the directory containing the `model.onnx` into the container, and compile and generate the `model.so` in the same directory. +## Setting up ONNX-MLIR using Prebuilt Containers -The onnx-mlir-dev image contains the full build tree including the prerequisites and a clone of the source code. -The source can be modified and onnx-mlir rebuilt from within the container, so it is possible to use it -as a development environment. -It is also possible to attach vscode to the running container. -An example Dockerfile useful for development and vscode configuration files can be seen in the docs folder. -If the workspace directory and the vscode files are not present in the directory where the Docker build is run, then the lines referencing them should be commented out or deleted. -The Dockerfile is shown here. +The prefered approach to using and developing ONNX-MLIR is to used Docker Images and Containers, as getting the proper code dependences may be tricky on some systems. Our instructions on using ONNX-MLIR with dockers are [here](docs/Docker.md). -[same-as-file]: <> (docs/docker-example/Dockerfile) -``` -FROM onnxmlirczar/onnx-mlir-dev -WORKDIR /workdir -ENV HOME=/workdir - -# 1) Install packages. -ENV PATH=$PATH:/workdir/bin -RUN apt-get update -RUN apt-get install -y python-numpy -RUN apt-get install -y python3-pip -RUN python -m pip install --upgrade pip -RUN apt-get install -y gdb -RUN apt-get install -y lldb -RUN apt-get install -y emacs -RUN apt-get install -y vim -# 2) Instal optional packages, uncomment/add as you see fit. -# RUN apt-get install -y valgrind -# RUN apt-get install -y libeigen3-dev -# RUN apt-get install -y clang-format -# RUN python -m pip install wheel -# RUN python -m pip install numpy -# RUN python -m pip install torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html -# RUN git clone https://github.com/onnx/tutorials.git -# Install clang-12. -# RUN apt-get install -y lsb-release wget software-properties-common -# RUN bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" - -# 3) When using vscode, copy your .vscode in the Dockerfile dir and -# uncomment the two lines below. -# WORKDIR /workdir/.vscode -# ADD .vscode /workdir/.vscode - -# 4) When using a personal workspace folder, set your workspace sub-directory -# in the Dockerfile dir and uncomment the two lines below. -# WORKDIR /workdir/workspace -# ADD workspace /workdir/workspace - -# 5) Fix git by reattaching head and making git see other branches than main. -WORKDIR /workdir/onnx-mlir -RUN git checkout main -RUN git fetch --unshallow - -# 6) Set the PATH environment vars for make/debug mode. Replace Debug -# with Release in the PATH below when using Release mode. -WORKDIR /workdir -ENV MLIR_DIR=/workdir/llvm-project/build/lib/cmake/mlir -ENV NPROC=4 -ENV PATH=$PATH:/workdir/onnx-mlir/build/Debug/bin/:/workdir/onnx-mlir/build/Debug/lib:/workdir/llvm-project/build/bin -``` +## Setting up ONNX-MLIR directly -## Prerequisites +### Prerequisites + ``` gcc >= 6.4 libprotoc >= 3.11.0 cmake >= 3.15.4 ninja >= 1.10.2 ``` -GCC can be found [here](https://gcc.gnu.org/install/), or if you have [Homebrew](https://docs.brew.sh/Installation), you can use `brew install gcc`. To check what version of gcc you have installed, run `gcc --version`. - -The instructions to install libprotoc can be found [here](http://google.github.io/proto-lens/installing-protoc.htm). Or alternatively, if you have Homebrew, you can run `brew install protobuf`. To check what version you have installed, run `protoc --version`. - -Cmake can be found [here](https://cmake.org/download/). However, to use Cmake, you need to follow the "How to Install For Command Line Use" tutorial, which can be found in Cmake under Tools>How to Install For Command Line Use. To check which version you have, you can either look in the desktop version under CMake>About, or run `cmake --version`. - -The instructions for installing Ninja can be found [here](https://ninja-build.org/). Or, using Homebrew, you can run `brew install ninja`. To check the version, run `ninja --version`. - - - -At any point in time, ONNX MLIR depends on a specific commit of the LLVM project that has been shown to work with the project. Periodically the maintainers -need to move to a more recent LLVM level. Among other things, this requires that the commit string in utils/clone-mlir.sh be updated. A consequence of -making this change is that the TravisCI build will fail until the Docker images that contain the prereqs are rebuilt. There is a GitHub workflow that rebuilds -this image for the amd64 architecture, but currently the ppc64le and s390x images must be rebuilt manually. The Dockerfiles to accomplish that are in the repo. - -## Installation on UNIX - -#### MLIR -Firstly, install MLIR (as a part of LLVM-Project): - -[same-as-file]: <> (utils/clone-mlir.sh) -``` bash -git clone https://github.com/llvm/llvm-project.git -# Check out a specific branch that is known to work with ONNX MLIR. -cd llvm-project && git checkout 0bf230d4220660af8b2667506f8905df2f716bdf && cd .. -``` - -[same-as-file]: <> (utils/build-mlir.sh) -``` bash -mkdir llvm-project/build -cd llvm-project/build -cmake -G Ninja ../llvm \ - -DLLVM_ENABLE_PROJECTS=mlir \ - -DLLVM_TARGETS_TO_BUILD="host" \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_ENABLE_ASSERTIONS=ON \ - -DLLVM_ENABLE_RTTI=ON - -cmake --build . -- ${MAKEFLAGS} -cmake --build . --target check-mlir -``` - -#### ONNX-MLIR (this project) -The following environment variables can be set before building onnx-mlir (or alternatively, they need to be passed as CMake variables): -- MLIR_DIR should point to the mlir cmake module inside an llvm-project build or install directory (e.g., llvm-project/build/lib/cmake/mlir). - -This project uses lit ([LLVM's Integrated Tester](http://llvm.org/docs/CommandGuide/lit.html)) for unit tests. When running CMake, we can also specify the path to the lit tool from LLVM using the LLVM_EXTERNAL_LIT define but it is not required as long as MLIR_DIR points to a build directory of llvm-project. If MLIR_DIR points to an install directory of llvm-project, LLVM_EXTERNAL_LIT is required. - -To build ONNX-MLIR, use the following commands: - -[same-as-file]: <> ({"ref": "utils/install-onnx-mlir.sh", "skip-doc": 2}) -```bash -git clone --recursive https://github.com/onnx/onnx-mlir.git - -# Export environment variables pointing to LLVM-Projects. -export MLIR_DIR=$(pwd)/llvm-project/build/lib/cmake/mlir - -mkdir onnx-mlir/build && cd onnx-mlir/build -if [[ -z "$pythonLocation" ]]; then - cmake -G Ninja -DCMAKE_CXX_COMPILER=/usr/bin/c++ .. -else - cmake -G Ninja -DCMAKE_CXX_COMPILER=/usr/bin/c++ -DPython3_ROOT_DIR=$pythonLocation .. -fi -cmake --build . - -# Run lit tests: -export LIT_OPTS=-v -cmake --build . --target check-onnx-lit -``` - -If you are running on OSX Big Sur, you need to add `-DCMAKE_CXX_COMPILER=/usr/bin/c++` -to the `cmake ..` command due to changes in the compilers. The environment variable -`$pythonLocation` may be used to specify the base directory of the Python compiler. -After the above commands succeed, an `onnx-mlir` executable should appear in the `bin` directory. - -##### LLVM and ONNX-MLIR CMake variables - -The following CMake variables from LLVM and ONNX MLIR can be used when compiling ONNX MLIR. - -**MLIR_DIR**:PATH - Path to to the mlir cmake module inside an llvm-project build or install directory (e.g., c:/repos/llvm-project/build/lib/cmake/mlir). - This is required if **MLIR_DIR** is not specified as an environment variable. - -**LLVM_EXTERNAL_LIT**:PATH - Path to the lit tool. Defaults to an empty string and LLVM will find the tool based on **MLIR_DIR** if possible. - This is required when **MLIR_DIR** points to an install directory. - -### MacOS Issues - -There is a known issue when building onnx-mlir. If you see a error of this sorts -``` shell -Cloning into '/home/chentong/onnx-mlir/build/src/Runtime/jni/jsoniter'... - -[...] - -make[2]: *** [src/Runtime/jni/CMakeFiles/jsoniter.dir/build.make:74: src/Runtime/jni/jsoniter/target/jsoniter-0.9.23.jar] Error 127 -make[1]: *** [CMakeFiles/Makefile2:3349: src/Runtime/jni/CMakeFiles/jsoniter.dir/all] Error 2 -make: *** [Makefile:146: all] Error 2 -``` - -The suggested workaround before it's fixed: `brew install maven` and run `alias nproc="sysctl -n hw.logicalcpu"` in your shell. - -## Installation on Windows -Building onnx-mlir on Windows requires building some additional prerequisites that are not available by default. - -Note that the instructions in this file assume you are using [Visual Studio 2019 Community Edition](https://visualstudio.microsoft.com/downloads/) with ninja. It is recommended that you have the **Desktop development with C++** and **Linux development with C++** workloads installed. This ensures you have all toolchains and libraries needed to compile this project and its dependencies on Windows. - -Run all the commands from a shell started from **"Developer Command Prompt for VS 2019"**. - -#### Protobuf -Build protobuf as a static library. - -[same-as-file]: <> (utils/install-protobuf.cmd) -```shell -git clone --recurse-submodules https://github.com/protocolbuffers/protobuf.git -REM Check out a specific branch that is known to work with ONNX MLIR. -REM This corresponds to the v3.11.4 tag -cd protobuf && git checkout d0bfd5221182da1a7cc280f3337b5e41a89539cf && cd .. - -set root_dir=%cd% -md protobuf_build -cd protobuf_build -call cmake %root_dir%\protobuf\cmake -G "Ninja" ^ - -DCMAKE_INSTALL_PREFIX="%root_dir%\protobuf_install" ^ - -DCMAKE_BUILD_TYPE=Release ^ - -Dprotobuf_BUILD_EXAMPLES=OFF ^ - -Dprotobuf_BUILD_SHARED_LIBS=OFF ^ - -Dprotobuf_BUILD_TESTS=OFF ^ - -Dprotobuf_MSVC_STATIC_RUNTIME=OFF ^ - -Dprotobuf_WITH_ZLIB=OFF - -call cmake --build . --config Release -call cmake --build . --config Release --target install -``` - -Before running CMake for onnx-mlir, ensure that the bin directory to this protobuf is before any others in your PATH: -```shell -set PATH=%root_dir%\protobuf_install\bin;%PATH% -``` - -#### MLIR -Install MLIR (as a part of LLVM-Project): - -[same-as-file]: <> (utils/clone-mlir.sh) -```shell -git clone https://github.com/llvm/llvm-project.git -# Check out a specific branch that is known to work with ONNX MLIR. -cd llvm-project && git checkout 0bf230d4220660af8b2667506f8905df2f716bdf && cd .. -``` - -[same-as-file]: <> (utils/build-mlir.cmd) -```shell -set root_dir=%cd% -md llvm-project\build -cd llvm-project\build -call cmake %root_dir%\llvm-project\llvm -G "Ninja" ^ - -DCMAKE_INSTALL_PREFIX="%root_dir%\llvm-project\build\install" ^ - -DLLVM_ENABLE_PROJECTS=mlir ^ - -DLLVM_TARGETS_TO_BUILD="host" ^ - -DCMAKE_BUILD_TYPE=Release ^ - -DLLVM_ENABLE_ASSERTIONS=ON ^ - -DLLVM_ENABLE_RTTI=ON ^ - -DLLVM_ENABLE_ZLIB=OFF ^ - -DLLVM_INSTALL_UTILS=ON - -call cmake --build . --config Release -call cmake --build . --config Release --target install -call cmake --build . --config Release --target check-mlir -``` - -#### ONNX-MLIR (this project) -The following environment variables can be set before building onnx-mlir (or alternatively, they need to be passed as CMake variables): -- MLIR_DIR should point to the mlir cmake module inside an llvm-project build or install directory (e.g., c:/repos/llvm-project/build/lib/cmake/mlir). - -This project uses lit ([LLVM's Integrated Tester](http://llvm.org/docs/CommandGuide/lit.html)) for unit tests. When running CMake, we can specify the path to the lit tool from LLVM using the LLVM_EXTERNAL_LIT define, as in the example below. If MLIR_DIR points to an install directory of llvm-project, LLVM_EXTERNAL_LIT is required and %lit_path% should point to a valid lit. It is not required if MLIR_DIR points to a build directory of llvm-project, which will contain lit. -To build ONNX MLIR, use the following commands: +Help to update the prerequisites is found [here](docs/Prerequisite.md). -[same-as-file]: <> ({"ref": "utils/build-onnx-mlir.cmd", "skip-doc": 2}) -```shell -git clone --recursive https://github.com/onnx/onnx-mlir.git - -set root_dir=%cd% +At any point in time, ONNX-MLIR depends on a specific commit of the LLVM project that has been shown to work with the project. +Periodically the maintainers need to move to a more recent LLVM level. +Among other things, this requires to update the commit string in (utils/clone-mlir.sh). +When updating ONNX-MLIR, it is good practice to check that the commit string of the MLIR/LLVM is the same as the one listed in that file. -md onnx-mlir\build -cd onnx-mlir\build -call cmake %root_dir%\onnx-mlir -G "Ninja" ^ - -DCMAKE_BUILD_TYPE=Release ^ - -DCMAKE_PREFIX_PATH=%root_dir%\protobuf_install ^ - -DLLVM_EXTERNAL_LIT=%lit_path% ^ - -DLLVM_LIT_ARGS=-v ^ - -DMLIR_DIR=%root_dir%\llvm-project\build\lib\cmake\mlir +### Build on Linux or OSX -call cmake --build . --config Release --target onnx-mlir -``` +Directions to install MLIR and ONNX-MLIR are provided [here](docs/BuildOnLinuxOSX.md). -To run the lit ONNX MLIR tests, use the following command: +### Build on Windows -[same-as-file]: <> ({"ref": "utils/check-onnx-mlir.cmd", "skip-ref": 1}) -```shell -call cmake --build . --config Release --target check-onnx-lit -``` +Directions to install Protobuf, MLIR, and ONNX-MLIR are provided [here](docs/BuildOnWindows.md). -To run the numerical ONNX MLIR tests, use the following command: +### Testing build and summary of custom envrionment variables -[same-as-file]: <> ({"ref": "utils/check-onnx-numerical.cmd", "skip-ref": 1}) -```shell -call cmake --build . --config Release --target check-onnx-numerical -``` - -To run the doc ONNX MLIR tests, use the following command after installing third_party ONNX: - -[same-as-file]: <> ({"ref": "utils/check-docs.cmd", "skip-ref": 1}) -```shell -call cmake --build . --config Release --target check-docs -``` +After installation, an `onnx-mlir` executable should appear in the `build/Debug/bin` or `build/Release/bin` directory. -After the above commands succeed, an `onnx-mlir` executable should appear in the `bin` directory. - -##### LLVM and ONNX-MLIR CMake variables - -The following CMake variables from LLVM and ONNX MLIR can be used when compiling ONNX MLIR. - -**MLIR_DIR**:PATH - Path to to the mlir cmake module inside an llvm-project build or install directory (e.g., c:/repos/llvm-project/build/lib/cmake/mlir). - This is required if **MLIR_DIR** is not specified as an environment variable. - -**LLVM_EXTERNAL_LIT**:PATH - Path to the lit tool. Defaults to an empty string and LLVM will find the tool based on **MLIR_DIR** if possible. - This is required when **MLIR_DIR** points to an install directory. +There are several cmake targets that are used to verify the validity of the `onnx-mlir` compiler, which are listed [here](docs/TestingHighLevel.md). ## Using ONNX-MLIR The usage of `onnx-mlir` is as such: + ``` -OVERVIEW: ONNX MLIR modular optimizer driver +OVERVIEW: ONNX-MLIR modular optimizer driver USAGE: onnx-mlir [options] @@ -343,20 +66,32 @@ Generic Options: --help-list - Display list of available options (--help-list-hidden for more) --version - Display the version of this program -ONNX MLIR Options: +ONNX-MLIR Options: These are frontend options. Choose target to emit: --EmitONNXBasic - Ingest ONNX and emit the basic ONNX operations without inferred shapes. - --EmitONNXIR - Ingest ONNX and emit corresponding ONNX dialect. - --EmitMLIR - Lower model to MLIR built-in transformation dialect. - --EmitLLVMIR - Lower model to LLVM IR (LLVM dialect). - --EmitLib - Lower model to LLVM IR, emit (to file) LLVM bitcode for model, compile and link it to a shared library. + --EmitONNXIR - Ingest ONNX and emit corresponding ONNX dialect. + --EmitMLIR - Lower input to MLIR built-in transformation dialect. + --EmitLLVMIR - Lower input to LLVM IR (LLVM MLIR dialect). + --EmitLib - Lower input to LLVM IR, emit LLVM bitcode, + compile and link it to a shared library (default). + --EmitJNI - Lower input to LLVM IR -> LLVM bitcode -> JNI shared library -> + jar. + + Optimization levels: + --O0 - Optimization level 0 (default). + --O1 - Optimization level 1. + --O2 - Optimization level 2. + --O3 - Optimization level 3. ``` -## Simple Example +The full list of options is given by the `--help` option. Note that just as most compilers, the default optimization level is `-O0`. +We recommend using `-O3` for most applications. -For example, to lower an ONNX model (e.g., add.onnx) to ONNX dialect, use the following command: +### Simple Example + +For example, use the following command to lower an ONNX model (e.g., add.onnx) to ONNX dialect: ```shell ./onnx-mlir --EmitONNXIR add.onnx ``` @@ -372,43 +107,10 @@ module { An example based on the add operation is found [here](docs/doc_example), which build an ONNX model using a python script, and then provide a main program to load the model's value, compute, and print the models output. -## End to end example +### End to End Example An end to end example is provided [here](docs/mnist_example/README.md), which train, compile, and execute a simple MNIST example using both the C++ or Python interface. - -## Troubleshooting - -If the latest LLVM project fails to work due to the latest changes to the MLIR subproject please consider using a slightly older version of LLVM. One such version, which we use, can be found [here](https://github.com/clang-ykt/llvm-project). - -## Installing `third_party ONNX` for Backend Tests or Rebuilding ONNX Operations - -Backend tests are triggered by `make check-onnx-backend` in the build directory and require a few preliminary steps to run successfully. Similarily, rebuilding the ONNX operations in ONNX-MLIR from their ONNX descriptions is triggered by `make OMONNXOpsIncTranslation`. - -You will need to install python 3.x if its not default in your environment, and possibly set the cmake `PYTHON_EXECUTABLE` varialbe in your top cmake file. - -You will also need `pybind11` which may need to be installed (mac: `brew install pybind11` for example) and you may need to indicate where to find the software (Mac, POWER, possibly other platforms: `export pybind11_DIR=`). Then install the `third_party/onnx` software (Mac: `pip install -e third_party/onnx`) typed in the top directory. - -On Macs/POWER and possibly other platforms, there is currently an issue that arises when installing ONNX. If you get an error during the build, try a fix where you edit the top CMakefile as reported in this PR: `https://github.com/onnx/onnx/pull/2482/files`. - -While running `make check-onnx-backend` on a Mac you might encouter the following error: - -```shell -Fatal Python error: Aborted - -Current thread 0x0000000107919e00 (most recent call first): - File "/usr/local/Cellar/python@3.9/3.9.7/Frameworks/Python.framework/Versions/3.9/lib/python3.9/urllib/request.py", line 2632 in getproxies_macosx_sysconf - File "/usr/local/Cellar/python@3.9/3.9.7/Frameworks/Python.framework/Versions/3.9/lib/python3.9/urllib/request.py", line 2650 in getproxies - File "/usr/local/Cellar/python@3.9/3.9.7/Frameworks/Python.framework/Versions/3.9/lib/python3.9/urllib/request.py", line 795 in __init__ - ... - ``` - - A known workaround is to export the `no_proxy` environment variable in your shell as follow, and rerun the tests. - - ```shell - % export no_proxy="*" - ``` - ## Slack channel We have a slack channel established under the Linux Foundation AI and Data Workspace, named `#onnx-mlir-discussion`. This channel can be used for asking quick questions related to this project. A direct link is [here](https://lfaifoundation.slack.com/archives/C01J4NAL4A2). diff --git a/docs/BuildONNX.md b/docs/BuildONNX.md new file mode 100644 index 000000000000..7769fb51bc81 --- /dev/null +++ b/docs/BuildONNX.md @@ -0,0 +1,31 @@ + + +# Installing `third_party ONNX` for Backend Tests or Rebuilding ONNX Operations + +Backend tests are triggered by `make check-onnx-backend` in the build directory and require a few preliminary steps to run successfully. Similarily, rebuilding the ONNX operations in ONNX-MLIR from their ONNX descriptions is triggered by `make OMONNXOpsIncTranslation`. + +You will need to install python 3.x if its not default in your environment, and possibly set the cmake `PYTHON_EXECUTABLE` varialbe in your top cmake file. + +You will also need `pybind11` which may need to be installed (mac: `brew install pybind11` for example) and you may need to indicate where to find the software (Mac, POWER, possibly other platforms: `export pybind11_DIR=`). Then install the `third_party/onnx` software (Mac: `pip install -e third_party/onnx`) typed in the top directory. + +## Known issues + +On Macs/POWER and possibly other platforms, there is currently an issue that arises when installing ONNX. If you get an error during the build, try a fix where you edit the top CMakefile as reported in this PR: `https://github.com/onnx/onnx/pull/2482/files`. + +While running `make check-onnx-backend` on a Mac you might encouter the following error: + +```shell +Fatal Python error: Aborted + +Current thread 0x0000000107919e00 (most recent call first): + File "/usr/local/Cellar/python@3.9/3.9.7/Frameworks/Python.framework/Versions/3.9/lib/python3.9/urllib/request.py", line 2632 in getproxies_macosx_sysconf + File "/usr/local/Cellar/python@3.9/3.9.7/Frameworks/Python.framework/Versions/3.9/lib/python3.9/urllib/request.py", line 2650 in getproxies + File "/usr/local/Cellar/python@3.9/3.9.7/Frameworks/Python.framework/Versions/3.9/lib/python3.9/urllib/request.py", line 795 in __init__ + ... + ``` + + A known workaround is to export the `no_proxy` environment variable in your shell as follow, and rerun the tests. + + ```shell + % export no_proxy="*" + ``` \ No newline at end of file diff --git a/docs/BuildOnLinuxOSX.md b/docs/BuildOnLinuxOSX.md new file mode 100644 index 000000000000..187364c87cb5 --- /dev/null +++ b/docs/BuildOnLinuxOSX.md @@ -0,0 +1,86 @@ + + +# Installation of ONNX-MLIR on Linux / OSX + +We provide here directions to insall ONNX-MLIR on Linux and OSX. +On Mac, there are a couple of commands that are different. +These differences will be listed in the explanation below, when relevant. + +## MLIR + +Firstly, install MLIR (as a part of LLVM-Project): + +[same-as-file]: <> (utils/clone-mlir.sh) +``` bash +git clone https://github.com/llvm/llvm-project.git +# Check out a specific branch that is known to work with ONNX-MLIR. +cd llvm-project && git checkout 0bf230d4220660af8b2667506f8905df2f716bdf && cd .. +``` + +[same-as-file]: <> (utils/build-mlir.sh) +``` bash +mkdir llvm-project/build +cd llvm-project/build +cmake -G Ninja ../llvm \ + -DLLVM_ENABLE_PROJECTS=mlir \ + -DLLVM_TARGETS_TO_BUILD="host" \ + -DCMAKE_BUILD_TYPE=Release \ + -DLLVM_ENABLE_ASSERTIONS=ON \ + -DLLVM_ENABLE_RTTI=ON + +cmake --build . -- ${MAKEFLAGS} +cmake --build . --target check-mlir +``` + +## ONNX-MLIR (this project) + +### Build + +The following environment variables can be set before building onnx-mlir (or alternatively, they need to be passed as CMake variables): +- `MLIR_DIR` should point to the mlir cmake module inside an llvm-project build or install directory (e.g., llvm-project/build/lib/cmake/mlir). + +This project uses lit ([LLVM's Integrated Tester](http://llvm.org/docs/CommandGuide/lit.html)) for unit tests. When running CMake, we can also specify the path to the lit tool from LLVM using the LLVM_EXTERNAL_LIT define but it is not required as long as MLIR_DIR points to a build directory of llvm-project. If MLIR_DIR points to an install directory of llvm-project, LLVM_EXTERNAL_LIT is required. + +To build ONNX-MLIR, use the following commands: + +[same-as-file]: <> ({"ref": "utils/install-onnx-mlir.sh", "skip-doc": 2}) +```bash +git clone --recursive https://github.com/onnx/onnx-mlir.git + +# Export environment variables pointing to LLVM-Projects. +export MLIR_DIR=$(pwd)/llvm-project/build/lib/cmake/mlir + +mkdir onnx-mlir/build && cd onnx-mlir/build +if [[ -z "$pythonLocation" ]]; then + cmake -G Ninja -DCMAKE_CXX_COMPILER=/usr/bin/c++ .. +else + cmake -G Ninja -DCMAKE_CXX_COMPILER=/usr/bin/c++ -DPython3_ROOT_DIR=$pythonLocation .. +fi +cmake --build . + +# Run lit tests: +export LIT_OPTS=-v +cmake --build . --target check-onnx-lit +``` + +Since OSX Big Sur, add the `-DCMAKE_CXX_COMPILER=/usr/bin/c++` option to the above `cmake ..` command due to changes in default compilers. + +The environment variable `$pythonLocation` may be used to specify the base directory of the Python compiler. + +After the above commands succeed, an `onnx-mlir` executable should appear in the `Debug/bin` or `Release/bin` directory. + +### Known MacOS Issues + +There is a known issue when building onnx-mlir. If you see a error of this sorts: + +``` shell +Cloning into '/home/user/onnx-mlir/build/src/Runtime/jni/jsoniter'... + +[...] + +make[2]: *** [src/Runtime/jni/CMakeFiles/jsoniter.dir/build.make:74: src/Runtime/jni/jsoniter/target/jsoniter-0.9.23.jar] Error 127 +make[1]: *** [CMakeFiles/Makefile2:3349: src/Runtime/jni/CMakeFiles/jsoniter.dir/all] Error 2 +make: *** [Makefile:146: all] Error 2. +``` + +The suggested workaround until jsoniter is fixed is as follows: install maven (e.g. `brew install maven`) and run `alias nproc="sysctl -n hw.logicalcpu"` in your shell. diff --git a/docs/BuildOnWindows.md b/docs/BuildOnWindows.md new file mode 100644 index 000000000000..8f9b07080509 --- /dev/null +++ b/docs/BuildOnWindows.md @@ -0,0 +1,101 @@ + + +# Installation of ONNX-MLIR on Windows + +Building onnx-mlir on Windows requires building some additional prerequisites that are not available by default. + +Note that the instructions in this file assume you are using [Visual Studio 2019 Community Edition](https://visualstudio.microsoft.com/downloads/) with ninja. +It is recommended that you have the **Desktop development with C++** and **Linux development with C++** workloads installed. +This ensures you have all toolchains and libraries needed to compile this project and its dependencies on Windows. + +Run all the commands from a shell started from **"Developer Command Prompt for VS 2019"**. + +## Protobuf +Build protobuf as a static library. + +[same-as-file]: <> (utils/install-protobuf.cmd) +```shell +git clone --recurse-submodules https://github.com/protocolbuffers/protobuf.git +REM Check out a specific branch that is known to work with ONNX-MLIR. +REM This corresponds to the v3.11.4 tag +cd protobuf && git checkout d0bfd5221182da1a7cc280f3337b5e41a89539cf && cd .. + +set root_dir=%cd% +md protobuf_build +cd protobuf_build +call cmake %root_dir%\protobuf\cmake -G "Ninja" ^ + -DCMAKE_INSTALL_PREFIX="%root_dir%\protobuf_install" ^ + -DCMAKE_BUILD_TYPE=Release ^ + -Dprotobuf_BUILD_EXAMPLES=OFF ^ + -Dprotobuf_BUILD_SHARED_LIBS=OFF ^ + -Dprotobuf_BUILD_TESTS=OFF ^ + -Dprotobuf_MSVC_STATIC_RUNTIME=OFF ^ + -Dprotobuf_WITH_ZLIB=OFF + +call cmake --build . --config Release +call cmake --build . --config Release --target install +``` + +Before running CMake for onnx-mlir, ensure that the bin directory to this protobuf is before any others in your PATH: +```shell +set PATH=%root_dir%\protobuf_install\bin;%PATH% +``` + +#### MLIR +Install MLIR (as a part of LLVM-Project): + +[same-as-file]: <> (utils/clone-mlir.sh) +```shell +git clone https://github.com/llvm/llvm-project.git +# Check out a specific branch that is known to work with ONNX-MLIR. +cd llvm-project && git checkout 0bf230d4220660af8b2667506f8905df2f716bdf && cd .. +``` + +[same-as-file]: <> (utils/build-mlir.cmd) +```shell +set root_dir=%cd% +md llvm-project\build +cd llvm-project\build +call cmake %root_dir%\llvm-project\llvm -G "Ninja" ^ + -DCMAKE_INSTALL_PREFIX="%root_dir%\llvm-project\build\install" ^ + -DLLVM_ENABLE_PROJECTS=mlir ^ + -DLLVM_TARGETS_TO_BUILD="host" ^ + -DCMAKE_BUILD_TYPE=Release ^ + -DLLVM_ENABLE_ASSERTIONS=ON ^ + -DLLVM_ENABLE_RTTI=ON ^ + -DLLVM_ENABLE_ZLIB=OFF ^ + -DLLVM_INSTALL_UTILS=ON + +call cmake --build . --config Release +call cmake --build . --config Release --target install +call cmake --build . --config Release --target check-mlir +``` + +## ONNX-MLIR (this project) + +### Build +The following environment variables can be set before building onnx-mlir (or alternatively, they need to be passed as CMake variables): +- MLIR_DIR should point to the mlir cmake module inside an llvm-project build or install directory (e.g., c:/repos/llvm-project/build/lib/cmake/mlir). + +This project uses lit ([LLVM's Integrated Tester](http://llvm.org/docs/CommandGuide/lit.html)) for unit tests. When running CMake, we can specify the path to the lit tool from LLVM using the LLVM_EXTERNAL_LIT define, as in the example below. If MLIR_DIR points to an install directory of llvm-project, LLVM_EXTERNAL_LIT is required and %lit_path% should point to a valid lit. It is not required if MLIR_DIR points to a build directory of llvm-project, which will contain lit. + +To build ONNX-MLIR, use the following commands: + +[same-as-file]: <> ({"ref": "utils/build-onnx-mlir.cmd", "skip-doc": 2}) +```shell +git clone --recursive https://github.com/onnx/onnx-mlir.git + +set root_dir=%cd% + +md onnx-mlir\build +cd onnx-mlir\build +call cmake %root_dir%\onnx-mlir -G "Ninja" ^ + -DCMAKE_BUILD_TYPE=Release ^ + -DCMAKE_PREFIX_PATH=%root_dir%\protobuf_install ^ + -DLLVM_EXTERNAL_LIT=%lit_path% ^ + -DLLVM_LIT_ARGS=-v ^ + -DMLIR_DIR=%root_dir%\llvm-project\build\lib\cmake\mlir + +call cmake --build . --config Release --target onnx-mlir +``` +After the above commands succeed, an `onnx-mlir` executable should appear in the `Debug/bin` or `Release/bin` directory. \ No newline at end of file diff --git a/docs/Docker.md b/docs/Docker.md new file mode 100644 index 000000000000..ffa4f515c55c --- /dev/null +++ b/docs/Docker.md @@ -0,0 +1,149 @@ + + +# Building and Developping ONNX-MLIR using Docker + +## Prebuilt Containers + +An easy way to get started with ONNX-MLIR is to use a prebuilt docker image. +These images are created as a result of a successful merge build on the trunk. +This means that the latest image represents the tip of the trunk. +Currently there are both Release and Debug mode images for `amd64`, `ppc64le` and `s390x` saved in Docker Hub as, respectively, [onnxmlirczar/onnx-mlir](https://hub.docker.com/r/onnxmlirczar/onnx-mlir) and [onnxmlirczar/onnx-mlir-dev](https://hub.docker.com/r/onnxmlirczar/onnx-mlir-dev). +To use one of these images either pull it directly from Docker Hub, launch a container and run an interactive bash shell in it, or use it as the base image in a dockerfile. +The onnx-mlir image just contains the built compiler and you can use it immediately to compile your model without any installation. + +## Easy Script to Compile a Model + +A python convenience script is provided to allow you to run ONNX-MLIR inside a docker container as if running the ONNX-MLIR compiler directly on the host. +The resulting output is an Linux ELF library implemening the ONNX model. +The `onnx-mlir.py` script is located in the [docker](../docker) directory. For example, compiling a mninst model can be done as follows. +``` +# docker/onnx-mlir.py -O3 --EmitLib mnist/model.onnx +505a5a6fb7d0: Pulling fs layer +505a5a6fb7d0: Verifying Checksum +505a5a6fb7d0: Download complete +505a5a6fb7d0: Pull complete +Shared library model.so has been compiled. +``` + +The script will pull the onnx-mlir image if it's not available locally, mount the directory containing the `model.onnx` into the container, and compile and generate the `model.so` in the same directory. + +This script takes the same option as the normal `onnx-mlir` command used to compile a ONNX model. Typical options are `-O0` (default) or `-O3` to define an optimization level and `--EmitLib` (default) or `--EmitJNI` to generate a dynamic library or a jar file. +A complete list of options is provided by using the traditional `--help` option. + +This script generates codes that can be executed on a Linux system or within a Docker container. + +## Building ONNX-MLIR in a docker environment + +The onnx-mlir-dev image contains the full build tree including the prerequisites and a clone of the source code. +The source can be modified and `onnx-mlir` can be rebuilt from within the container, so it is possible to use it as a development environment. +New pull requests can be generated, and the repository can be updated to the latest using git commands. +It is also possible to attach vscode to the running container. +An example Dockerfile useful for development and vscode configuration files can be seen in the [docs/docker-example](docker-example) folder. +If the workspace directory and the vscode files are not present in the directory where the Docker build is run, then the lines referencing them should be commented out or deleted. + +The Dockerfile is shown here, and should be modified according to one's need. The file below includes debugging tools as well as pytorch, which can be used to train the mnist model in our end-to-end example provided in the [docs/mnist_example](mnist_example) directory. + +[same-as-file]: <> (docs/docker-example/Dockerfile) +``` +FROM onnxmlirczar/onnx-mlir-dev +WORKDIR /workdir +ENV HOME=/workdir + +# 1) Install packages. +ENV PATH=$PATH:/workdir/bin +RUN apt-get update +RUN apt-get install -y python-numpy +RUN apt-get install -y python3-pip +RUN python -m pip install --upgrade pip +RUN apt-get install -y gdb +RUN apt-get install -y lldb + +# 2) Instal optional packages, comment/uncomment/add as you see fit. +RUN apt-get install -y vim +RUN apt-get install -y emacs +RUN apt-get install -y valgrind +RUN apt-get install -y libeigen3-dev +RUN apt-get install -y clang-format +RUN python -m pip install wheel +RUN python -m pip install numpy +RUN python -m pip install torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html +RUN git clone https://github.com/onnx/tutorials.git +# Install clang +RUN apt-get install -y lsb-release wget software-properties-common +RUN bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" + +# 3) When using vscode, copy your .vscode in the Dockerfile dir and +# uncomment the two lines below. +# WORKDIR /workdir/.vscode +# ADD .vscode /workdir/.vscode + +# 4) When using a personal workspace folder, set your workspace sub-directory +# in the Dockerfile dir and uncomment the two lines below. +# WORKDIR /workdir/workspace +# ADD workspace /workdir/workspace + +# 5) Fix git by reattaching head and making git see other branches than main. +WORKDIR /workdir/onnx-mlir +RUN git remote rename origin upstream +RUN git checkout main +RUN git fetch --unshallow +# Add optional personal fork and disable pushing to upstream (best practice). +# RUN git remote add origin https://github.com/<>/onnx-mlir.git +# RUN git remote set-url --push upstream no_push + +# 6) Set the PATH environment vars for make/debug mode. Replace Debug +# with Release in the PATH below when using Release mode. +WORKDIR /workdir +ENV MLIR_DIR=/workdir/llvm-project/build/lib/cmake/mlir +ENV NPROC=4 +ENV PATH=$PATH:/workdir/onnx-mlir/build/Debug/bin/:/workdir/onnx-mlir/build/Debug/lib:/workdir/llvm-project/build/bin +``` + +### Developping with Docker in VSCode + +The first step is to copy the [docs/ocker-example](docker-example) directory to another directory outside of the repo, say `~/DockerOnnxMlir`. + +Then, the `Dockerfile` in the copied directory should then be modified to suit one's need. In particuliar, we recommend developpers to use their own fork for development. Uncomment the lines associated with git (Step 5 in the file) and substitute the appropriate GitHub Id in the commented out directives. +The lines associated with VSCode (Step 3 in the file) should be also uncommented when using VSCode. +Finally, we recommend creating a subdirectory named `workspace` that contains test examples you would like to have in your Docker Image and Container. +If so, uncomment the lines associated with copying a personal workspace folder (Step 4 in the file), and that subdirectory's contend will be copied over to the Docker Image. + +The next step is to create a Docker imange. This step can be performed using the `docker build --tag imageName .` shell command. Once this command is successful, we must start a container. This can be done by a command line (e.g. `docker run -it imageName`) or by opening the Docker Dashboard, locating the Image Tab, and clicking the `run` button associated with the image just created (e.g. `imageName` above). + +These steps are summarized here. +``` shell +# Starting in the onnx-mlir directory, copy the Docker example directory. +cp -prf docs/docker-example ~/DockerOnnxMlir +cd ~/DockerOnnxMlir +# Edit the Docker file. +vi Dockerfile +# Build the Docker image. +docker build --tag onnx-mlir-dev +# Start a container using the Docker dashboard or a docker run command. +``` + +The next step is to open VSCode, load the Docker Extension if not already present, and then open the docker tab on the left pane. Locate the container that was just started in the previous step, right click on it, and select the `Attach Visual Studio Code` option. +This will open a new VSCode window. Open a local folder on the `workdir` directory, this will give you access to all of the ONNX/MLIR/LLVM code as well as the `workspace` subdirectory. + +You may then open a shell, go to the `onnx-mlir` subdirectory, and check that all of the git is properly setup. + +If you opted to add your own fork, it will be listed under `origin` with `upstream` being the official ONNX-MLIR repo. For example: +``` shell +git remote -v +#origin https://github.com/AlexandreEichenberger/onnx-mlir.git (fetch) +#origin https://github.com/AlexandreEichenberger/onnx-mlir.git (push) +#upstream https://github.com/onnx/onnx-mlir.git (fetch) +#upstream no_push (push) +``` + +Now, you may fetch your own branches using `git fetch origin`, and switch to one of your branch (say `my-opt`) using the `git checkout --track origin/my-opt` command. The `--track` option is recommended as `upstream` was cloned and `origin` was added as remote. Once you want to push your changes, you should use `git push -u origin my-opt`, using the `-u` option to link the local branch with the `origin` remote repo. + +The `main` branch will default to the upstream repo. If you prefer it to be associated with your own fork's `main` branch, you may update your main branch to the latest and associate the local main branch with `origin` using the commands listed below. +``` shell +git checkout main +git branch --unset-upstream +git push --set-upstream origin main +``` + +A docker container can be used to investigate a bug, or to develop a new feature. Some like to create a new images for each new version of ONNX-MLIR; others prefer to create one image and use git to update the main branch and use git to switch between multiple branches. Both are valid approaches. + diff --git a/docs/ImportONNXDefs.md b/docs/ImportONNXDefs.md index d72d86aa925a..d01fd469871a 100644 --- a/docs/ImportONNXDefs.md +++ b/docs/ImportONNXDefs.md @@ -5,7 +5,7 @@ ONNX specifications are defined under `onnx/defs` directory in the ONNX project repository. There is a python script [utils/gen_onnx_mlir.py](../utils/gen_onnx_mlir.py) that automatically generates documents about operations in ONNX (docs/Operations.md). ONNX-MLIR modified this script to import ONNX specifications into ONNX-MLIR. -There are two files generated for ONNX MLIR with the modified gen_onnx_mlir.py: +There are two files generated for ONNX-MLIR with the modified gen_onnx_mlir.py: 1. `src/Dialect/ONNX/ONNXOps.td.inc`: Operation definition for MLIR TableGen. `src/Dialect/ONNX/ONNXOps.td` includes this file. 2. `src/Builder/OpBuildTable.inc`: C++ code for ONNX-MLIR frontend to import operation nodes from ONNX model. `src/Builder/FrontendDialectTransformer.cpp` includes this file. diff --git a/docs/Options.md b/docs/Options.md index ae7854c710ba..61d451789fb3 100644 --- a/docs/Options.md +++ b/docs/Options.md @@ -1,11 +1,11 @@ -# Define and Use Command-line Options for ONNX MLIR +# Define and Use Command-line Options for ONNX-MLIR -Command-line options can be used to alter the default behavior of onnx-mlir, or onnx-mlir-opt, and help user experimenting, debugging or performance tuning. We implemented command-line in ONNX MLIR based on the command-line utility provided by LLVM. We did not define `Option` or `ListOption` with MLIR pass classes(see discussion). +Command-line options can be used to alter the default behavior of onnx-mlir, or onnx-mlir-opt, and help user experimenting, debugging or performance tuning. We implemented command-line in ONNX-MLIR based on the command-line utility provided by LLVM. We did not define `Option` or `ListOption` with MLIR pass classes(see discussion). ## Organize Options -Refer [llvm document](https://llvm.org/docs/CommandLine.html) for basic idea of how to define an option. In ONNX MLIR, options are put into groups (`llvm::cl::OptionCategory`). +Refer [llvm document](https://llvm.org/docs/CommandLine.html) for basic idea of how to define an option. In ONNX-MLIR, options are put into groups (`llvm::cl::OptionCategory`). One group of options are only used by onnx-mlir to configure its input or output. These options are defined in src/main.cpp and src/Compiler/CompilerUtils.cpp within OnnxMlirOptions category. The rest of options may be used by both onnx-mlir and onnx-mlir-opt to control the behavior of a pass or passes. So far, only one group is defined as an example. diff --git a/docs/Prerequisite.md b/docs/Prerequisite.md new file mode 100644 index 000000000000..42e8e77bd80b --- /dev/null +++ b/docs/Prerequisite.md @@ -0,0 +1,20 @@ + + +# Getting the prerequisite software + + +``` +gcc >= 6.4 +libprotoc >= 3.11.0 +cmake >= 3.15.4 +ninja >= 1.10.2 +``` + +GCC can be found [here](https://gcc.gnu.org/install/), or if you have [Homebrew](https://docs.brew.sh/Installation), you can use `brew install gcc`. To check what version of gcc you have installed, run `gcc --version`. + +The instructions to install libprotoc can be found [here](https://google.github.io/proto-lens/installing-protoc.html). Or alternatively, if you have Homebrew, you can run `brew install protobuf`. To check what version you have installed, run `protoc --version`. +Custom directions for installing protobuf under Windows are provided [here](BuildOnWindows.md#protobuf). + +Cmake can be found [here](https://cmake.org/download/). However, to use Cmake, you need to follow the "How to Install For Command Line Use" tutorial, which can be found in Cmake under Tools>How to Install For Command Line Use. To check which version you have, you can either look in the desktop version under CMake>About, or run `cmake --version`. + +The instructions for installing Ninja can be found [here](https://ninja-build.org/). Or, using Homebrew, you can run `brew install ninja`. To check the version, run `ninja --version`. diff --git a/docs/Testing.md b/docs/Testing.md index de4a5e00f3a6..7fbd259231cc 100644 --- a/docs/Testing.md +++ b/docs/Testing.md @@ -16,7 +16,7 @@ To invoke the test, use the following command: ``` cmake --build . --config Release --target check-onnx-backend[-jni] ``` -Packages, such as third_party/onnx, needs to be installed to run the backend test. JNI test requires the jsoniter jar which is downloaed from maven repository by default if no installed version is found on the system. If the user turns on the cmake option `ONNX_MLIR_BUILD_JSONITER` when building ONNX-MLIR, the jsoniter jar will be built locally from the source cloned from its github repository. Note that building jsoniter jar locally requires maven build tool to be installed. +Packages, such as third_party/onnx, needs to be installed to run the backend test. JNI test requires the jsoniter jar which is downloaded from its maven repository by default if no installed version is found on the system. If the user turns on the cmake option `ONNX_MLIR_BUILD_JSONITER` when building ONNX-MLIR, the jsoniter jar will be built locally from the source cloned from its github repository. Note that building jsoniter jar locally requires the maven build tool to be installed. The node and model tests in onnx that will be run by check-onnx-backend is defined by variable test_to_enable in test/backend/test.py. User can test one test case by environment variable `TEST_CASE_BY_USER`. For example, ``` diff --git a/docs/TestingHighLevel.md b/docs/TestingHighLevel.md new file mode 100644 index 000000000000..7e2f6d483926 --- /dev/null +++ b/docs/TestingHighLevel.md @@ -0,0 +1,39 @@ + + +# ONNX-MLIR: Testing and Specific Environment variables + +## High level testing of ONNX-MLIR + +To run the lit ONNX-MLIR tests, use the following command: + +[same-as-file]: <> ({"ref": "utils/check-onnx-mlir.cmd", "skip-ref": 1}) +```shell +call cmake --build . --config Release --target check-onnx-lit +``` + +To run the numerical ONNX-MLIR tests, use the following command: + +[same-as-file]: <> ({"ref": "utils/check-onnx-numerical.cmd", "skip-ref": 1}) +```shell +call cmake --build . --config Release --target check-onnx-numerical +``` + +To run the doc ONNX-MLIR tests, use the following command after installing third_party ONNX shown below. Details to first install the third_party ONNX project are detailed [here](BuildONNX.md). Note that it is key to install the ONNX project's version listed in our third_party subdirectory, as ONNX-MLIR may be behind the latest version from the ONNX standard. + +[same-as-file]: <> ({"ref": "utils/check-docs.cmd", "skip-ref": 1}) +```shell +call cmake --build . --config Release --target check-docs +``` + +## Summary of LLVM and ONNX-MLIR Environment Variables + +The following CMake variables from LLVM and ONNX-MLIR can be used when compiling ONNX-MLIR. + +**MLIR_DIR**:PATH + Path to to the mlir cmake module inside an llvm-project build or install directory (e.g., c:/repos/llvm-project/build/lib/cmake/mlir). + This is required if **MLIR_DIR** is not specified as an environment variable. + +**LLVM_EXTERNAL_LIT**:PATH + Path to the lit tool. Defaults to an empty string and LLVM will find the tool based on **MLIR_DIR** if possible. + This is required when **MLIR_DIR** points to an install directory. + diff --git a/docs/docker-example/Dockerfile b/docs/docker-example/Dockerfile index ac43ae64043c..122458e72674 100644 --- a/docs/docker-example/Dockerfile +++ b/docs/docker-example/Dockerfile @@ -10,19 +10,20 @@ RUN apt-get install -y python3-pip RUN python -m pip install --upgrade pip RUN apt-get install -y gdb RUN apt-get install -y lldb -RUN apt-get install -y emacs + +# 2) Instal optional packages, comment/uncomment/add as you see fit. RUN apt-get install -y vim -# 2) Instal optional packages, uncomment/add as you see fit. -# RUN apt-get install -y valgrind -# RUN apt-get install -y libeigen3-dev -# RUN apt-get install -y clang-format -# RUN python -m pip install wheel -# RUN python -m pip install numpy -# RUN python -m pip install torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html -# RUN git clone https://github.com/onnx/tutorials.git -# Install clang-12. -# RUN apt-get install -y lsb-release wget software-properties-common -# RUN bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" +RUN apt-get install -y emacs +RUN apt-get install -y valgrind +RUN apt-get install -y libeigen3-dev +RUN apt-get install -y clang-format +RUN python -m pip install wheel +RUN python -m pip install numpy +RUN python -m pip install torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html +RUN git clone https://github.com/onnx/tutorials.git +# Install clang +RUN apt-get install -y lsb-release wget software-properties-common +RUN bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" # 3) When using vscode, copy your .vscode in the Dockerfile dir and # uncomment the two lines below. @@ -36,8 +37,12 @@ RUN apt-get install -y vim # 5) Fix git by reattaching head and making git see other branches than main. WORKDIR /workdir/onnx-mlir +RUN git remote rename origin upstream RUN git checkout main RUN git fetch --unshallow +# Add optional personal fork and disable pushing to upstream (best practice). +# RUN git remote add origin https://github.com/<>/onnx-mlir.git +# RUN git remote set-url --push upstream no_push # 6) Set the PATH environment vars for make/debug mode. Replace Debug # with Release in the PATH below when using Release mode. diff --git a/src/Builder/FrontendDialectTransformer.hpp b/src/Builder/FrontendDialectTransformer.hpp index a8c851896197..cdbbe5527e5a 100644 --- a/src/Builder/FrontendDialectTransformer.hpp +++ b/src/Builder/FrontendDialectTransformer.hpp @@ -30,7 +30,7 @@ class OwningModuleRef; } // namespace mlir //===----------------------------------------------------------------------===// -// Import a model into the ONNX MLIR dialect. +// Import a model into the ONNX-MLIR dialect. //===----------------------------------------------------------------------===// namespace onnx_mlir { diff --git a/src/Compiler/CompilerUtils.cpp b/src/Compiler/CompilerUtils.cpp index e3471f830c8c..6fc9f5a33369 100644 --- a/src/Compiler/CompilerUtils.cpp +++ b/src/Compiler/CompilerUtils.cpp @@ -34,7 +34,7 @@ using namespace mlir; using namespace onnx_mlir; llvm::cl::OptionCategory OnnxMlirOptions( - "ONNX MLIR Options", "These are frontend options."); + "ONNX-MLIR Options", "These are frontend options."); namespace { @@ -312,7 +312,7 @@ void setTargetTriple(const std::string &triple) { mtriple = triple; } void LoadMLIR(string inputFilename, mlir::MLIRContext &context, mlir::OwningModuleRef &module) { - // Handle '.mlir' input to the ONNX MLIR frontend. + // Handle '.mlir' input to the ONNX-MLIR frontend. // The mlir format indicates that one or more of the supported // representations are used in the file. string errorMessage; diff --git a/src/Dialect/ONNX/ONNXOps.td b/src/Dialect/ONNX/ONNXOps.td index d992cc8e9aa2..e3ca5e4087cf 100644 --- a/src/Dialect/ONNX/ONNXOps.td +++ b/src/Dialect/ONNX/ONNXOps.td @@ -62,7 +62,7 @@ class ONNX_Op traits = []> : //the tablegen code onnxop.in is generated with gen_doc.py //clone and install onnx // git clone --recursive https://github.com/onnx/onnx.git -// set up env for anaconda3 and for ONNX MLIR (BOOSTROOT, cmake, gcc ...) +// set up env for anaconda3 and for ONNX-MLIR (BOOSTROOT, cmake, gcc ...) // cd onnx //install onnx // CC=gcc CXX=g++ pip install -e . diff --git a/src/Pass/Passes.hpp b/src/Pass/Passes.hpp index f16af3deac9f..0e392e707470 100644 --- a/src/Pass/Passes.hpp +++ b/src/Pass/Passes.hpp @@ -2,13 +2,13 @@ * SPDX-License-Identifier: Apache-2.0 */ -//===---------- Passes.hpp - ONNX MLIR Passes Definition ------------------===// +//===---------- Passes.hpp - ONNX-MLIR Passes Definition ------------------===// // // Copyright 2019-2020 The IBM Research Authors. // // ============================================================================= // -// This file exposes the entry points to create compiler passes for ONNX MLIR. +// This file exposes the entry points to create compiler passes for ONNX-MLIR. // //===----------------------------------------------------------------------===// diff --git a/src/Support/OMOptions.cpp b/src/Support/OMOptions.cpp index 7c1144db41cc..f1f0332994e1 100644 --- a/src/Support/OMOptions.cpp +++ b/src/Support/OMOptions.cpp @@ -14,7 +14,7 @@ #include "src/Support/OMOptions.hpp" -llvm::cl::OptionCategory OMPassOptions("ONNX MLIR Pass Options", +llvm::cl::OptionCategory OMPassOptions("ONNX-MLIR Pass Options", "These are options to provide fine control on passes"); llvm::cl::opt instrumentONNXOps("instrument-onnx-ops", diff --git a/src/Tool/ONNXMLIROpt/ONNXMLIROpt.cpp b/src/Tool/ONNXMLIROpt/ONNXMLIROpt.cpp index 915eb0cd6bc2..1b6d6ef14a97 100644 --- a/src/Tool/ONNXMLIROpt/ONNXMLIROpt.cpp +++ b/src/Tool/ONNXMLIROpt/ONNXMLIROpt.cpp @@ -96,7 +96,7 @@ int main(int argc, char **argv) { mlir::registerPassManagerCLOptions(); mlir::PassPipelineCLParser passPipeline("", "Compiler passes to run"); llvm::cl::ParseCommandLineOptions( - argc, argv, "ONNX MLIR modular optimizer driver\n"); + argc, argv, "ONNX-MLIR modular optimizer driver\n"); // Set up the input file. std::string error_message; diff --git a/src/main.cpp b/src/main.cpp index 388fc0c85fd5..4c09c255109c 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -37,19 +37,19 @@ int main(int argc, char *argv[]) { clEnumVal( EmitONNXIR, "Ingest ONNX and emit corresponding ONNX dialect."), clEnumVal( - EmitMLIR, "Lower model to MLIR built-in transformation dialect."), - clEnumVal(EmitLLVMIR, "Lower model to LLVM IR (LLVM dialect)."), - clEnumVal(EmitLib, "Lower model to LLVM IR, emit (to file) " - "LLVM bitcode for model, compile and link it to a " - "shared library."), - clEnumVal(EmitJNI, "Lower model to LLVM IR -> LLVM bitcode " + EmitMLIR, "Lower input to MLIR built-in transformation dialect."), + clEnumVal(EmitLLVMIR, "Lower input to LLVM IR (LLVM MLIR dialect)."), + clEnumVal(EmitLib, "Lower input to LLVM IR, emit " + "LLVM bitcode, compile and link it to a " + "shared library (default)."), + clEnumVal(EmitJNI, "Lower input to LLVM IR -> LLVM bitcode " "-> JNI shared library -> jar")), llvm::cl::init(EmitLib), llvm::cl::cat(OnnxMlirOptions)); // llvm::cl::HideUnrelatedOptions(OnnxMlirOptions); mlir::registerPassManagerCLOptions(); llvm::cl::ParseCommandLineOptions( - argc, argv, "ONNX MLIR modular optimizer driver\n"); + argc, argv, "ONNX-MLIR modular optimizer driver\n"); mlir::OwningModuleRef module; std::string errorMessage; diff --git a/test/mlir/CMakeLists.txt b/test/mlir/CMakeLists.txt index c413705c72bb..9c9d42dabec1 100644 --- a/test/mlir/CMakeLists.txt +++ b/test/mlir/CMakeLists.txt @@ -33,7 +33,7 @@ set(ONNX_MLIR_TEST_DEPENDS ) add_lit_testsuite(check-onnx-lit - "Running the ONNX MLIR regression tests" + "Running the ONNX-MLIR regression tests" ${CMAKE_CURRENT_BINARY_DIR} DEPENDS ${ONNX_MLIR_TEST_DEPENDS}) diff --git a/test/numerical/CMakeLists.txt b/test/numerical/CMakeLists.txt index 32c0be65434b..4a99289b4024 100644 --- a/test/numerical/CMakeLists.txt +++ b/test/numerical/CMakeLists.txt @@ -4,7 +4,7 @@ add_custom_target(numerical) set_target_properties(numerical PROPERTIES FOLDER "Tests") add_custom_target(check-onnx-numerical - COMMENT "Running the ONNX MLIR numerical regression tests" + COMMENT "Running the ONNX-MLIR numerical regression tests" COMMAND "${CMAKE_CTEST_COMMAND}" -L numerical --output-on-failure -C $ --force-new-ctest-process USES_TERMINAL DEPENDS numerical @@ -47,7 +47,7 @@ function(add_numerical_unittest test_name) endif() endfunction() -# All libraries and executables coming from llvm or ONNX MLIR have had their +# All libraries and executables coming from llvm or ONNX-MLIR have had their # compile flags updated via llvm_update_compile_flags, so we need to do that to # rapidcheck as well, so that we can successfully link against it. Otherwise some # of the flags for exceptions (among others) are not set correctly. diff --git a/utils/RunONNXLib.cpp b/utils/RunONNXLib.cpp index c48060f4d751..cb3ec0e61f0b 100644 --- a/utils/RunONNXLib.cpp +++ b/utils/RunONNXLib.cpp @@ -63,7 +63,7 @@ Usage: run-onnx-lib [options] model.so // Json reader & LLVM suport. #include "llvm/Support/JSON.h" -// Include ONNX MLIR Runtime support. +// Include ONNX-MLIR Runtime support. #include "OnnxMlirRuntime.h" using namespace std; diff --git a/utils/clone-mlir.sh b/utils/clone-mlir.sh index 33b0cc2ee237..288ceef38096 100644 --- a/utils/clone-mlir.sh +++ b/utils/clone-mlir.sh @@ -1,3 +1,3 @@ git clone https://github.com/llvm/llvm-project.git -# Check out a specific branch that is known to work with ONNX MLIR. +# Check out a specific branch that is known to work with ONNX-MLIR. cd llvm-project && git checkout 0bf230d4220660af8b2667506f8905df2f716bdf && cd .. diff --git a/utils/install-protobuf.cmd b/utils/install-protobuf.cmd index 47c8fc41d3bd..0b87010f6031 100644 --- a/utils/install-protobuf.cmd +++ b/utils/install-protobuf.cmd @@ -1,5 +1,5 @@ git clone --recurse-submodules https://github.com/protocolbuffers/protobuf.git -REM Check out a specific branch that is known to work with ONNX MLIR. +REM Check out a specific branch that is known to work with ONNX-MLIR. REM This corresponds to the v3.11.4 tag cd protobuf && git checkout d0bfd5221182da1a7cc280f3337b5e41a89539cf && cd ..