forked from neo-ai/tvm
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[BYOC][Contrib] Arm Compute Library integration (apache#5915)
* [BYOC][Contrib] Arm Compute Library integration Arm Compute Library (ACL) integration using the BYOC infrastructure. This will enable offloading select operators from a relay graph to ACL so we can achieve faster inference times on Arm CPU's due to hand crafted optimized routines. The PR adds initial support for offloading FP32 conv2d, maxpool2d and reshape to ACL. ACL codegen is used to generate a JSON representation of an operator or 'ACL layer', the ACL runtime then uses this representation to construct a layer, cache it and create a packed function to for the graph runtime to call into. RFC here: https://discuss.tvm.ai/t/rfc-byoc-arm-compute-library-integration/7082 Change-Id: If756dcea787ea346b1508e9a191b7eed7bd02b7f * Refactor ACL integration to support JSON runtime * Now uses JSON runtime * Addresses tutorial comments * Rename acl to arm_compute_lib in user facing api Change-Id: I3b5ef80607f713e898363e82ab4398fbc2cf267a * Address comments Change-Id: I041fda14f3bf9975f3518ba8a4e3ab43ba98403d * Address comments * correct mistakes in tutorial * reshuffle runtime to use fewer macro blocks * preprocess module using "optimize" functionality * use new module api Change-Id: I219488e617e5767edd7489b43b8bfce876cd24b8 * Enable ACL codegen tests in CI * Skips runtime tests as these are not supported on x86. Change-Id: I6843c003a2604afe95cfdccf2323d2a336b56fe5 * Fix check for runtime Change-Id: I3f9eec15c599f01b1105d624fb053b73bfb6ed41 * Address comments * Add warning to ACL engine creation * Correct runtime check so it doesn't fail when codegen not present * Improve testing to check acl partitions is what is expected * Check results of multiple runs test Change-Id: I9522950930805b9b601dad03269adcf8ed3138cc * Address comments * Multiple style improvements * Use base class for creating json node for single op * Move GetSource to base class * Improve annotation checks Change-Id: I8219659c4b99e86df887cd914720157cb94c61a0 * Improve tutorial Change-Id: I8f610bd37af1e3740fd48c2d502bcc4727d9d712 * Initialize conv with nullptr Change-Id: I6c37f0d75a064001c74e171ff83b9f7a7c3f1918
- Loading branch information
Showing
27 changed files
with
2,188 additions
and
26 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,66 @@ | ||
# Licensed to the Apache Software Foundation (ASF) under one | ||
# or more contributor license agreements. See the NOTICE file | ||
# distributed with this work for additional information | ||
# regarding copyright ownership. The ASF licenses this file | ||
# to you under the Apache License, Version 2.0 (the | ||
# "License"); you may not use this file except in compliance | ||
# with the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, | ||
# software distributed under the License is distributed on an | ||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
# KIND, either express or implied. See the License for the | ||
# specific language governing permissions and limitations | ||
# under the License. | ||
|
||
# We separate the codegen and runtime build since ACL can only be built | ||
# for AArch. In the world where we take the cross compilation approach, | ||
# which is common with arm devices, we need to be able to cross-compile | ||
# a relay graph on x86 for AArch and then run the graph on AArch. | ||
if(USE_ARM_COMPUTE_LIB) | ||
file(GLOB ACL_RELAY_CONTRIB_SRC src/relay/backend/contrib/arm_compute_lib/*.cc) | ||
file(GLOB ACL_RUNTIME_MODULE src/runtime/contrib/arm_compute_lib/acl_runtime.cc) | ||
list(APPEND COMPILER_SRCS ${ACL_RELAY_CONTRIB_SRC}) | ||
list(APPEND COMPILER_SRCS ${ACL_RUNTIME_MODULE}) | ||
message(STATUS "Build with Arm Compute Library support...") | ||
endif() | ||
|
||
if(USE_ARM_COMPUTE_LIB_GRAPH_RUNTIME) | ||
set(ACL_PATH ${CMAKE_CURRENT_SOURCE_DIR}/acl) | ||
# Detect custom ACL path. | ||
if (NOT USE_ARM_COMPUTE_LIB_GRAPH_RUNTIME STREQUAL "ON") | ||
set(ACL_PATH ${USE_ARM_COMPUTE_LIB_GRAPH_RUNTIME}) | ||
endif() | ||
|
||
file(GLOB ACL_CONTRIB_SRC src/runtime/contrib/arm_compute_lib/*) | ||
|
||
set(ACL_INCLUDE_DIRS ${ACL_PATH}/include ${ACL_PATH}) | ||
include_directories(${ACL_INCLUDE_DIRS}) | ||
|
||
find_library(EXTERN_ACL_COMPUTE_LIB | ||
NAMES arm_compute libarm_compute | ||
HINTS "${ACL_PATH}" "${ACL_PATH}/lib" "${ACL_PATH}/build" | ||
) | ||
find_library(EXTERN_ACL_COMPUTE_CORE_LIB | ||
NAMES arm_compute_core libarm_compute_core | ||
HINTS "${ACL_PATH}" "${ACL_PATH}/lib" "${ACL_PATH}/build" | ||
) | ||
find_library(EXTERN_ACL_COMPUTE_GRAPH_LIB | ||
NAMES arm_compute_graph libarm_compute_graph | ||
HINTS "${ACL_PATH}" "${ACL_PATH}/lib" "${ACL_PATH}/build" | ||
) | ||
|
||
list(APPEND TVM_RUNTIME_LINKER_LIBS ${EXTERN_ACL_COMPUTE_LIB}) | ||
list(APPEND TVM_RUNTIME_LINKER_LIBS ${EXTERN_ACL_COMPUTE_CORE_LIB}) | ||
list(APPEND TVM_RUNTIME_LINKER_LIBS ${EXTERN_ACL_COMPUTE_GRAPH_LIB}) | ||
list(APPEND RUNTIME_SRCS ${ACL_CONTRIB_SRC}) | ||
message(STATUS "Build with Arm Compute Library graph runtime support: " | ||
${EXTERN_ACL_COMPUTE_LIB} ", \n" | ||
${EXTERN_ACL_COMPUTE_CORE_LIB} ", \n" | ||
${EXTERN_ACL_COMPUTE_GRAPH_LIB}) | ||
|
||
# Set flag to detect ACL graph runtime support. | ||
add_definitions(-DTVM_GRAPH_RUNTIME_ARM_COMPUTE_LIB) | ||
endif() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,139 @@ | ||
.. Licensed to the Apache Software Foundation (ASF) under one | ||
or more contributor license agreements. See the NOTICE file | ||
distributed with this work for additional information | ||
regarding copyright ownership. The ASF licenses this file | ||
to you under the Apache License, Version 2.0 (the | ||
"License"); you may not use this file except in compliance | ||
with the License. You may obtain a copy of the License at | ||
.. http://www.apache.org/licenses/LICENSE-2.0 | ||
.. Unless required by applicable law or agreed to in writing, | ||
software distributed under the License is distributed on an | ||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
KIND, either express or implied. See the License for the | ||
specific language governing permissions and limitations | ||
under the License. | ||
Relay Arm|reg| Compute Library Integration | ||
========================================== | ||
|
||
Introduction | ||
------------ | ||
|
||
Arm Compute Library (ACL) is an open source project that provides accelerated kernels for Arm CPU's | ||
and GPU's. Currently the integration offloads operators to ACL to use hand-crafted assembler | ||
routines in the library. By offloading select operators from a relay graph to ACL we can achieve | ||
a performance boost on such devices. | ||
|
||
Building with ACL support | ||
------------------------- | ||
|
||
The current implementation has two separate build options in cmake. The reason for this split is | ||
because ACL cannot be used on an x86 machine. However, we still want to be able compile an ACL | ||
runtime module on an x86 machine. | ||
|
||
* USE_ARM_COMPUTE_LIB=ON/OFF - Enabling this flag will add support for compiling an ACL runtime module. | ||
* USE_ARM_COMPUTE_LIB_GRAPH_RUNTIME=ON/OFF/path-to-acl - Enabling this flag will allow the graph runtime to | ||
compute the ACL offloaded functions. | ||
|
||
These flags can be used in different scenarios depending on your setup. For example, if you want | ||
to compile an ACL module on an x86 machine and then run the module on a remote Arm device via RPC, you will | ||
need to use USE_ARM_COMPUTE_LIB=ON on the x86 machine and USE_ARM_COMPUTE_LIB_GRAPH_RUNTIME=ON on the remote | ||
AArch64 device. | ||
|
||
Usage | ||
----- | ||
|
||
.. note:: | ||
|
||
This section may not stay up-to-date with changes to the API. | ||
|
||
Create a relay graph. This may be a single operator or a whole graph. The intention is that any | ||
relay graph can be input. The ACL integration will only pick supported operators to be offloaded | ||
whilst the rest will be computed via TVM. (For this example we will use a single | ||
max_pool2d operator). | ||
|
||
.. code:: python | ||
import tvm | ||
from tvm import relay | ||
data_type = "float32" | ||
data_shape = (1, 14, 14, 512) | ||
strides = (2, 2) | ||
padding = (0, 0, 0, 0) | ||
pool_size = (2, 2) | ||
layout = "NHWC" | ||
output_shape = (1, 7, 7, 512) | ||
data = relay.var('data', shape=data_shape, dtype=data_type) | ||
out = relay.nn.max_pool2d(data, pool_size=pool_size, strides=strides, layout=layout, padding=padding) | ||
module = tvm.IRModule.from_expr(out) | ||
Annotate and partition the graph for ACL. | ||
|
||
..code:: python | ||
|
||
from tvm.relay.op.contrib.arm_compute_lib import partition_for_arm_compute_lib | ||
module = partition_for_arm_compute_lib(module) | ||
|
||
|
||
Build the Relay graph. | ||
|
||
.. code:: python | ||
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+neon" | ||
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): | ||
lib = relay.build(module, target=target) | ||
Export the module. | ||
|
||
.. code:: python | ||
lib_path = '~/lib_acl.so' | ||
cross_compile = 'aarch64-linux-gnu-c++' | ||
lib.export_library(lib_path, cc=cross_compile) | ||
Run Inference. This must be on an Arm device. If compiling on x86 device and running on AArch64, | ||
consider using the RPC mechanism. Tutorials for using the RPC mechanism: | ||
https://tvm.apache.org/docs/tutorials/cross_compilation_and_rpc.html#sphx-glr-tutorials-cross-compilation-and-rpc-py | ||
|
||
.. code:: python | ||
ctx = tvm.cpu(0) | ||
loaded_lib = tvm.runtime.load_module('lib_acl.so') | ||
gen_module = tvm.contrib.graph_runtime.GraphModule(loaded_lib['default'](ctx)) | ||
d_data = np.random.uniform(0, 1, data_shape).astype(data_type) | ||
map_inputs = {'data': d_data} | ||
gen_module.set_input(**map_inputs) | ||
gen_module.run() | ||
More examples | ||
------------- | ||
The example above only shows a basic example of how ACL can be used for offloading a single | ||
Maxpool2D. If you would like to see more examples for each implemented operator and for | ||
networks refer to the tests: `tests/python/contrib/test_arm_compute_lib`. Here you can modify | ||
`infrastructure.py` to use the remote device you have setup. | ||
|
||
|
||
Adding a new operator | ||
--------------------- | ||
Adding a new operator requires changes to a series of places. This section will give a hint on | ||
what needs to be changed and where, it will not however dive into the complexities for an | ||
individual operator. This is left to the developer. | ||
|
||
There are a series of files we need to make changes to: | ||
* `python/relay/op/contrib/arm_compute_lib.py` In this file we define the operators we wish to offload using the | ||
`op.register` decorator. This will mean the annotation pass recognizes this operator as ACL | ||
offloadable. | ||
* `src/relay/backend/contrib/arm_compute_lib/codegen.cc` Implement `Create[OpName]JSONNode` method. This is where we | ||
declare how the operator should be represented by JSON. This will be used to create the ACL module. | ||
* `src/runtime/contrib/arm_compute_lib/acl_kernel.h` Implement `Create[OpName]Layer` method. This is where we | ||
define how the JSON representation can be used to create an ACL function. We simply define how to | ||
translate from the JSON representation to ACL API. | ||
* `tests/python/contrib/test_arm_compute_lib` Add unit tests for the given operator. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,131 @@ | ||
# Licensed to the Apache Software Foundation (ASF) under one | ||
# or more contributor license agreements. See the NOTICE file | ||
# distributed with this work for additional information | ||
# regarding copyright ownership. The ASF licenses this file | ||
# to you under the Apache License, Version 2.0 (the | ||
# "License"); you may not use this file except in compliance | ||
# with the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, | ||
# software distributed under the License is distributed on an | ||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
# KIND, either express or implied. See the License for the | ||
# specific language governing permissions and limitations | ||
# under the License. | ||
# pylint: disable=invalid-name, unused-argument | ||
"""Arm Compute Library supported operators.""" | ||
import tvm | ||
from tvm.relay import transform | ||
from tvm.relay.build_module import bind_params_by_name | ||
|
||
from ...dataflow_pattern import wildcard, is_op, is_constant | ||
from .register import register_pattern_table | ||
|
||
|
||
def is_arm_compute_runtime_enabled(): | ||
"""Check if the ACL graph runtime is present. | ||
Returns | ||
------- | ||
ret: bool | ||
True if present, False if not. | ||
""" | ||
check_enabled = tvm.get_global_func("relay.op.is_arm_compute_runtime_enabled", True) | ||
if check_enabled: | ||
return check_enabled() | ||
return False | ||
|
||
|
||
def partition_for_arm_compute_lib(mod, params=None): | ||
"""Partition the graph greedily offloading supported | ||
operators to Arm Compute Library. | ||
Parameters | ||
---------- | ||
mod : Module | ||
The module to run passes on. | ||
params : Optional[Dict[str, NDArray]] | ||
Constant input parameters. | ||
Returns | ||
------- | ||
ret : annotated and partitioned module. | ||
""" | ||
if params: | ||
mod['main'] = bind_params_by_name(mod['main'], params) | ||
|
||
seq = tvm.transform.Sequential([transform.MergeComposite(arm_compute_lib_pattern_table()), | ||
transform.AnnotateTarget('arm_compute_lib'), | ||
transform.PartitionGraph()]) | ||
|
||
return seq(mod) | ||
|
||
|
||
@register_pattern_table("arm_compute_lib") | ||
def arm_compute_lib_pattern_table(): | ||
"""Get the ACL pattern table.""" | ||
|
||
def conv_pattern(): | ||
"""Create a convolution pattern. | ||
Returns | ||
------- | ||
pattern : dataflow_pattern.AltPattern | ||
Denotes the convolution pattern. | ||
""" | ||
pattern = is_op('nn.pad')(wildcard()) | wildcard() | ||
pattern = is_op('nn.conv2d')(pattern, is_constant()) | ||
pattern = pattern.optional(lambda x: is_op('nn.bias_add')(x, is_constant())) | ||
pattern = pattern.optional(is_op('nn.relu')) | ||
return pattern | ||
|
||
def check_conv(extract): | ||
"""Check conv pattern is supported by ACL.""" | ||
call = extract | ||
while call.op.name != "nn.conv2d": | ||
call = call.args[0] | ||
return conv2d(call.attrs, call.args) | ||
|
||
return [('arm_compute_lib.conv2d', conv_pattern(), check_conv)] | ||
|
||
|
||
def _register_external_op_helper(op_name, supported=True): | ||
@tvm.ir.register_op_attr(op_name, "target.arm_compute_lib") | ||
def _func_wrapper(attrs, args): | ||
return supported | ||
|
||
return _func_wrapper | ||
|
||
|
||
_register_external_op_helper("reshape") | ||
|
||
|
||
@tvm.ir.register_op_attr("nn.conv2d", "target.arm_compute_lib") | ||
def conv2d(attrs, args): | ||
"""Check if the external ACL codegen for conv2d should be used.""" | ||
if attrs.groups != 1: | ||
return False | ||
if attrs.data_layout != "NHWC": | ||
return False | ||
if attrs.out_dtype != "float32" and attrs.out_dtype != "": | ||
return False | ||
data_typ = args[0].checked_type | ||
if len(data_typ.shape) != 4 or data_typ.shape[0] != 1 or data_typ.dtype != "float32": | ||
return False | ||
kernel_typ = args[1].checked_type | ||
if kernel_typ.dtype != "float32": | ||
return False | ||
return True | ||
|
||
|
||
@tvm.ir.register_op_attr("nn.max_pool2d", "target.arm_compute_lib") | ||
def max_pool2d(attrs, args): | ||
"""Check if the external ACL codegen for maxpool2d should be used.""" | ||
if attrs.layout != "NHWC": | ||
return False | ||
typ = args[0].checked_type | ||
if typ.dtype != "float32": | ||
return False | ||
return True |
Oops, something went wrong.