Skip to content

Commit

Permalink
Merge branch 'main' into feature/2022-06-16/use-curand
Browse files Browse the repository at this point in the history
  • Loading branch information
junrushao authored Jun 18, 2022
2 parents e3445bc + 4b15746 commit 98071c7
Show file tree
Hide file tree
Showing 61 changed files with 2,939 additions and 325 deletions.
17 changes: 12 additions & 5 deletions apps/microtvm/arduino/template_project/microtvm_api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,14 +214,21 @@ def _template_model_header(self, source_dir, metadata):
with open(source_dir / "model.h", "r") as f:
model_h_template = Template(f.read())

assert (
metadata["style"] == "full-model"
all_module_names = []
for name in metadata["modules"].keys():
all_module_names.append(name)

assert all(
metadata["modules"][mod_name]["style"] == "full-model" for mod_name in all_module_names
), "when generating AOT, expect only full-model Model Library Format"

template_values = {
"workspace_size_bytes": metadata["memory"]["functions"]["main"][0][
workspace_size_bytes = 0
for mod_name in all_module_names:
workspace_size_bytes += metadata["modules"][mod_name]["memory"]["functions"]["main"][0][
"workspace_size_bytes"
],
]
template_values = {
"workspace_size_bytes": workspace_size_bytes,
}

with open(source_dir / "model.h", "w") as f:
Expand Down
4 changes: 4 additions & 0 deletions docker/Dockerfile.ci_gpu
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,10 @@ COPY install/ubuntu_install_sccache.sh /install/ubuntu_install_sccache.sh
RUN bash /install/ubuntu_install_sccache.sh
ENV PATH /opt/sccache:$PATH

# dnnl
COPY install/ubuntu_install_dnnl.sh /install/ubuntu_install_dnnl.sh
RUN bash /install/ubuntu_install_dnnl.sh

# Environment variables
ENV PATH=/usr/local/nvidia/bin:${PATH}
ENV PATH=/usr/local/cuda/bin:${PATH}
Expand Down
113 changes: 113 additions & 0 deletions docker/clear-stale-images.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Remove tvm-related docker images from the local system which
# are not used by the currently-checked-out branch in this git
# repository plus any linked worktrees.

set -euo pipefail

dry_run=0
repositories=( "$(cd $(dirname "$0") && git rev-parse --show-toplevel)" )
skip_confirm=0
verbose=0
while [ "${1+x}" == "x" ]; do
case "$1" in
--help|-h)
echo "usage: $0 [-n] [-v] [-y] <repository> [<repository> ...]"
echo ""
echo "Remove tvm-related docker images from the local system which"
echo "are not used by the currently-checked-out branch in this git"
echo "repository plus any linked worktrees."
echo ""
echo 'This command should remove only docker images beginning with "tlcpack"'
echo ""
echo "Options:"
echo " -n Perform a dry-run and just print the docker rmi command"
echo " -v Verbosely list the images kept and why"
echo " -y Skip confirmation"
echo " <repository> Additional git repositories to consult."
exit 2
;;
-n)
dry_run=1
;;
-v)
verbose=1
;;
-y)
skip_confirm=1
;;
*)
repositories=( "${repositories[@]}" "$1" )
;;
esac
shift
done

declare -a used_images
for r in "${repositories[@]}"; do
if [ -d "${r}/.git" ]; then
worktree="${r}"
else
worktree="$(cat "${r}/.git")"
fi
while read wt; do
d="${wt:9:${#wt}}" # strip "worktree " prefix
for img in $(cat "${d}/Jenkinsfile" | grep -E '^ci_[a-z]+ = ' | sed -E "s/ci_[a-z]+ = '([^\"]*)'/\1/"); do
used_images=( "${used_images[@]}" "${img}" )
done
done < <(cd "${worktree}" && git worktree list --porcelain | grep '^worktree ')
done

declare -a to_rm
while read image; do
if [ "${image}" == "<none>:<none>" ]; then
continue
fi
grep -qE "^tlcpack" < <(echo "$image") && is_tlcpack=1 || is_tlcpack=0
if [ $is_tlcpack -eq 0 ]; then # non-tlcpack image
if [ $verbose -ne 0 ]; then
echo "skipping (non-tvm): $image"
fi
continue
fi
grep -q "$image" < <(echo "${used_images[@]}") && is_used=1 || is_used=0
if [ $is_used -eq 1 ]; then # Image was found in used_images
if [ $verbose -ne 0 ]; then
echo "skipping (image used): $image"
fi
continue
fi
to_rm=( "${to_rm[@]}" "${image}" )
done < <(docker images --format '{{.Repository}}:{{.Tag}}')

docker_cmd=( docker rmi "${to_rm[@]}" )
if [ ${dry_run} -ne 0 ]; then
echo "would run: ${docker_cmd[@]}"
else
if [ $skip_confirm -eq 0 ]; then
echo "will run: ${docker_cmd[@]}"
read -p "Proceed? [y/N] " proceed
if [ "${proceed-}" != "y" -a "${proceed-}" != "Y" ]; then
echo "Aborted."
exit 2
fi
fi
"${docker_cmd[@]}"
fi
8 changes: 8 additions & 0 deletions docs/contribute/pull_request.rst
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,14 @@ each time (e.g. you can test a change in CPU and i386 while retaining incrementa
# run the CPU build and drop into a shell in the container
python tests/scripts/ci.py cpu --interactive
We regularly update our docker images and, over time, stale images may unnecessarily consume disk
space. You can remove stale images that aren't used in the presently checked-out branch plus any
other worktrees using the following command:

.. code:: bash
docker/clear-stale-images.sh
Consult the ``--help`` for more options.

C++ (local)
^^^^^^^^^^^
Expand Down
27 changes: 27 additions & 0 deletions include/tvm/meta_schedule/database.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,9 @@ struct WorkloadEqual {
}
};

/*! \brief The class of measure candidates. */
class MeasureCandidate;

/*! \brief The class of tuning records. */
class TuningRecordNode : public runtime::Object {
public:
Expand All @@ -123,6 +126,9 @@ class TuningRecordNode : public runtime::Object {
static constexpr const char* _type_key = "meta_schedule.TuningRecord";
TVM_DECLARE_FINAL_OBJECT_INFO(TuningRecordNode, runtime::Object);

/*! \brief Construct the measure candidate given the initial IR module and trace
* stored in the tuning record. */
MeasureCandidate AsMeasureCandidate() const;
/*!
* \brief Export the tuning record to a JSON string.
* \return An array containing the trace, running secs, serialized target, and
Expand Down Expand Up @@ -187,6 +193,11 @@ class DatabaseNode : public runtime::Object {
* \return An array of top K tuning records for the given workload.
*/
virtual Array<TuningRecord> GetTopK(const Workload& workload, int top_k) = 0;
/*!
* \brief Get all tuning records from the database.
* \return An Array of all the tuning records in the database.
*/
virtual Array<TuningRecord> GetAllTuningRecords() = 0;
/*!
* \brief Get the size of the database.
* \return The size of the database.
Expand Down Expand Up @@ -224,6 +235,11 @@ class PyDatabaseNode : public DatabaseNode {
* \return An array of top K tuning records for the given workload.
*/
using FGetTopK = runtime::TypedPackedFunc<Array<TuningRecord>(const Workload&, int)>;
/*!
* \brief The function type of `GetAllTuningRecords` method.
* \return An Array of all the tuning records in the database.
*/
using FGetAllTuningRecords = runtime::TypedPackedFunc<Array<TuningRecord>()>;
/*!
* \brief The function type of `Size` method.
* \return The size of the database.
Expand All @@ -238,6 +254,8 @@ class PyDatabaseNode : public DatabaseNode {
FCommitTuningRecord f_commit_tuning_record;
/*! \brief The packed function to the `GetTopK` function. */
FGetTopK f_get_top_k;
/*! \brief The packed function to the `GetAllTuningRecords` function. */
FGetAllTuningRecords f_get_all_tuning_records;
/*! \brief The packed function to the `Size` function. */
FSize f_size;

Expand All @@ -249,6 +267,7 @@ class PyDatabaseNode : public DatabaseNode {
// `f_commit_workload` is not visited
// `f_commit_tuning_record` is not visited
// `f_get_top_k` is not visited
// `f_get_all_tuning_records` is not visited
// `f_size` is not visited
}

Expand All @@ -273,6 +292,12 @@ class PyDatabaseNode : public DatabaseNode {
return f_get_top_k(workload, top_k);
}

Array<TuningRecord> GetAllTuningRecords() final {
ICHECK(f_get_all_tuning_records != nullptr)
<< "PyDatabase's GetAllTuningRecords method not implemented!";
return f_get_all_tuning_records();
}

int64_t Size() final {
ICHECK(f_size != nullptr) << "PyDatabase's Size method not implemented!";
return f_size();
Expand Down Expand Up @@ -302,13 +327,15 @@ class Database : public runtime::ObjectRef {
* \param f_commit_workload The packed function of `CommitWorkload`.
* \param f_commit_tuning_record The packed function of `CommitTuningRecord`.
* \param f_get_top_k The packed function of `GetTopK`.
* \param f_get_all_tuning_records The packed function of `GetAllTuningRecords`.
* \param f_size The packed function of `Size`.
* \return The created database.
*/
TVM_DLL static Database PyDatabase(PyDatabaseNode::FHasWorkload f_has_workload,
PyDatabaseNode::FCommitWorkload f_commit_workload,
PyDatabaseNode::FCommitTuningRecord f_commit_tuning_record,
PyDatabaseNode::FGetTopK f_get_top_k,
PyDatabaseNode::FGetAllTuningRecords f_get_all_tuning_records,
PyDatabaseNode::FSize f_size);
TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(Database, runtime::ObjectRef, DatabaseNode);
};
Expand Down
2 changes: 2 additions & 0 deletions python/tvm/auto_scheduler/testing/tune_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
from tvm.meta_schedule.utils import cpu_count
from tvm.relay.frontend import from_onnx
from tvm.support import describe


def _parse_args():
Expand Down Expand Up @@ -148,6 +149,7 @@ def main():
else:
raise NotImplementedError(f"Unsupported target {ARGS.target}")

describe()
print(f"Workload: {ARGS.model_name}")
onnx_model = onnx.load(ARGS.onnx_path)
shape_dict = {}
Expand Down
4 changes: 3 additions & 1 deletion python/tvm/auto_scheduler/testing/tune_relay.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,14 +146,16 @@ def main():
)
else:
raise NotImplementedError(f"Unsupported target {ARGS.target}")

describe()
print(f"Workload: {ARGS.workload}")
mod, params, (input_name, input_shape, input_dtype) = get_network(
ARGS.workload,
ARGS.input_shape,
cache_dir=ARGS.cache_dir,
)
input_info = {input_name: input_shape}
input_data = {}
print(f"Workload: {ARGS.workload}")
for input_name, input_shape in input_info.items():
print(f" input_name: {input_name}")
print(f" input_shape: {input_shape}")
Expand Down
2 changes: 2 additions & 0 deletions python/tvm/auto_scheduler/testing/tune_te.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@ def _parse_args():


def main():
describe()
print(f"Workload: {ARGS.workload}")
log_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}.json")
workload_func, params = CONFIGS[ARGS.workload]
params = params[0] # type: ignore
Expand Down
11 changes: 11 additions & 0 deletions python/tvm/contrib/pipeline_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ def __init__(self, module):
self._get_input = self.module["get_input"]
self._get_output = self.module["get_output"]
self._get_num_outputs = self.module["get_num_outputs"]
self._get_num_inputs = self.module["get_num_inputs"]
self._get_input_pipeline_map = self.module["get_input_pipeline_map"]
self._get_pipe_execute_count = self.module["get_execute_count"]

Expand Down Expand Up @@ -159,6 +160,16 @@ def num_outputs(self):
"""
return self._get_num_outputs()

@property
def num_inputs(self):
"""Get the number of inputs
Returns
-------
count : int
The number of inputs
"""
return self._get_num_inputs()

@staticmethod
def load_library(config_file_name):
"""Import files to create a pipeline executor.
Expand Down
17 changes: 14 additions & 3 deletions python/tvm/driver/tvmc/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,9 +391,20 @@ def import_package(self, package_path: str):
with open(temp.relpath("metadata.json")) as metadata_json:
metadata = json.load(metadata_json)

has_graph_executor = "graph" in metadata["executors"]
graph = temp.relpath("executor-config/graph/graph.json") if has_graph_executor else None
params = temp.relpath(f'parameters/{metadata["model_name"]}.params')
all_module_names = []
for name in metadata["modules"].keys():
all_module_names.append(name)
assert len(all_module_names) == 1, "Multiple modules in MLF is not supported."

module_name = all_module_names[0]
module_metdata = metadata["modules"][module_name]
has_graph_executor = "graph" in module_metdata["executors"]
graph = (
temp.relpath(f"executor-config/graph/{module_name}.graph")
if has_graph_executor
else None
)
params = temp.relpath(f"parameters/{module_name}.params")

self.type = "mlf"
else:
Expand Down
Loading

0 comments on commit 98071c7

Please sign in to comment.