Skip to content

Commit

Permalink
Fixed minor misspelling (#7499)
Browse files Browse the repository at this point in the history
Co-authored-by: mshr-h <mshr-h@users.noreply.github.com>
  • Loading branch information
mshr-h and mshr-h authored Feb 22, 2021
1 parent d666b41 commit 84359a9
Show file tree
Hide file tree
Showing 28 changed files with 33 additions and 33 deletions.
2 changes: 1 addition & 1 deletion include/tvm/ir/attrs.h
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ class BaseAttrsNode : public Object {
virtual void VisitAttrs(AttrVisitor* v) {}
/*!
* \brief Initialize the attributes by sequence of arguments
* \param args The postional arguments in the form
* \param args The positional arguments in the form
* [key0, value0, key1, value1, ..., key_n, value_n]
*/
template <typename... Args>
Expand Down
2 changes: 1 addition & 1 deletion include/tvm/runtime/packed_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -1204,7 +1204,7 @@ struct func_signature_helper<R (T::*)(Args...) const> {

/*!
* \brief template class to get function signature of a function or functor.
* \tparam T The funtion/functor type.
* \tparam T The function/functor type.
*/
template <typename T>
struct function_signature {
Expand Down
2 changes: 1 addition & 1 deletion include/tvm/topi/einsum.h
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ inline std::vector<std::string> Split(const std::string& str, const std::string&
* \param subscripts input subscripts.
* \param operands operand tensors.
*
* \return vector of strings, vector[0] represents the input part, vector[1] represents the ouput.
* \return vector of strings, vector[0] represents the input part, vector[1] represents the output.
* if no output, the vector[1] is NULL.
* "ab, bc -> ac" => ["ab,bc", "ac"]
*/
Expand Down
4 changes: 2 additions & 2 deletions nnvm/src/core/symbolic.cc
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ std::vector<std::string> Symbol::ListInputNames(ListInputOption option) const {
}

std::vector<std::string> Symbol::ListOutputNames() const {
static auto& flist_ouputs = Op::GetAttr<FListOutputNames>("FListOutputNames");
static auto& flist_outputs = Op::GetAttr<FListOutputNames>("FListOutputNames");

std::vector<std::string> ret;
ret.reserve(outputs.size());
Expand All @@ -250,7 +250,7 @@ std::vector<std::string> Symbol::ListOutputNames() const {
} else {
const std::string& hname = head.node->attrs.name;
std::string rname;
FListOutputNames fn = flist_ouputs.get(head.node->op(), nullptr);
FListOutputNames fn = flist_outputs.get(head.node->op(), nullptr);
if (fn != nullptr) {
rname = fn(head.node->attrs)[head.index];
} else {
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/micro/contrib/zephyr.py
Original file line number Diff line number Diff line change
Expand Up @@ -537,7 +537,7 @@ class QemuStartupFailureError(Exception):


class QemuFdTransport(file_descriptor.FdTransport):
"""An FdTransport subclass that escapes written data to accomodate the QEMU monitor.
"""An FdTransport subclass that escapes written data to accommodate the QEMU monitor.
It's supposedly possible to disable the monitor, but Zephyr controls most of the command-line
arguments for QEMU and there are too many options which implictly enable the monitor, so this
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/coreml.py
Original file line number Diff line number Diff line change
Expand Up @@ -524,7 +524,7 @@ def coreml_op_to_relay(op, inname, outnames, etab):
outname = outnames if isinstance(outnames, _base.string_types) else outnames[0]
etab.set_expr(outname, outs, force_override=True)
else:
# the number of ouputs from model op and tvm relay must be same
# the number of outputs from model op and tvm relay must be same
assert len(outnames) == len(outs)
for outname, out in zip(outnames, outs):
etab.set_expr(outname, out, force_override=True)
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/testing/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def resnet(
Channel size of each stage
num_classes : int
Ouput size of symbol
Output size of symbol
data_shape : tuple of int.
The shape of input data.
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/testing/resnet_3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def resnet(
Channel size of each stage
num_classes : int
Ouput size of symbol
Output size of symbol
data_shape : tuple of int.
The shape of input data.
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/transform/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -985,7 +985,7 @@ def transform(func, mod, ctx):
"""

if opt_level is None:
raise ValueError("Please provide opt_level for the funtion pass.")
raise ValueError("Please provide opt_level for the function pass.")

required = required if required else []
if not isinstance(required, (list, tuple)):
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/tir/stmt.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ class For(Stmt):
The loop variable.
min_val : PrimExpr
The begining value.
The beginning value.
extent : PrimExpr
The length of the loop.
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/tir/transform/function_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def transform(func, mod, ctx):
"""

if opt_level is None:
raise ValueError("Please provide opt_level for the funtion pass.")
raise ValueError("Please provide opt_level for the function pass.")

required = required if required else []
if not isinstance(required, (list, tuple)):
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/topi/arm_cpu/depthwise_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -692,7 +692,7 @@ def _schedule_spatial_pack(cfg, s, data_vec, kernel_vec, conv, output, last):
if kernel_vec.op.name == "kernel_vec":
co, _, _, _, _ = s[kernel_vec].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# kernel packing will be pre-computed during compliation, so we skip
# kernel packing will be pre-computed during compilation, so we skip
# this part to make tuning records correct
s[kernel_vec].pragma(co, "debug_skip_region")
else:
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/topi/cuda/batch_matmul_tensorcore.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ def batch_matmul_tensorcore_cuda(x, y):
x_shape = get_const_tuple(x.shape)
y_shape = get_const_tuple(y.shape)
assert x_shape[0] == y_shape[0], "batch dimension doesn't match"
assert x_shape[2] == y_shape[2], "shapes of x and y is inconsistant"
assert x_shape[2] == y_shape[2], "shapes of x and y is inconsistent"
batch, M, K = x.shape
N = y.shape[1]
out_dtype = x.dtype
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/topi/cuda/rcnn/proposal.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def argsort_ir(data_buf, out_index_buf):


def nms_ir(sorted_bbox_buf, out_buf, nms_threshold):
"""Non-maximum supression.
"""Non-maximum suppression.
Parameters
----------
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/topi/nn/batch_matmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def batch_matmul(x, y, oshape=None, auto_scheduler_rewritten_layout=""):
k = te.reduce_axis((0, K), name="k")
if oshape is None:
assert XB == YB or XB == 1 or YB == 1, "batch dimension doesn't match"
assert x_shape[2] == y_shape[2], "shapes of x and y is inconsistant"
assert x_shape[2] == y_shape[2], "shapes of x and y is inconsistent"
batch = te.max(XB, YB)
N = y.shape[1]
oshape = (batch, M, N)
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/topi/random/kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def _threefry(
Threefry will write to :code:`out_buf[out_offset:out_offset+4*product(out_shape)]`
out_shape: number
Determines the number of ouput states to generate. :code:`state[i]` will correspond to
Determines the number of output states to generate. :code:`state[i]` will correspond to
counter+i.
"""
nrounds = 20
Expand Down
4 changes: 2 additions & 2 deletions python/tvm/topi/testing/strided_slice_python.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def strided_slice_python(data, begin, end, strides, slice_mode="end"):
Input data
begin : list
Begining of the slices.
Beginning of the slices.
end : list
End of the slices.
Expand Down Expand Up @@ -81,7 +81,7 @@ def strided_set_python(data, v, begin, end, strides):
Value data
begin : list
Begining of the slices.
Beginning of the slices.
end : list
End of the slices.
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/topi/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -460,7 +460,7 @@ def make_idx(b, e, s, z, i):
Returns
-------
postion: Expr
position: Expr
int expression that corresponds to an array position in the selection.
"""
bc = tvm.tir.Select(s < 0, i <= e, i < b)
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/topi/vision/rcnn/proposal.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ def argsort_ir(data_buf, out_index_buf):


def nms_ir(sorted_bbox_buf, out_buf, nms_threshold):
"""Non-maximum supression.
"""Non-maximum suppression.
Parameters
----------
Expand Down
6 changes: 3 additions & 3 deletions python/tvm/topi/x86/batch_matmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def batch_matmul(cfg, x, y, out_shape=None):
XB, M, XK = get_const_tuple(x.shape)
YB, N, YK = get_const_tuple(y.shape)
assert (XB == YB) or (YB == 1) or (XB == 1), "batch dimension doesn't match"
assert XK == YK, "shapes of x and y is inconsistant"
assert XK == YK, "shapes of x and y is inconsistent"
B = te.max(XB, YB)
K = XK
if out_shape is not None:
Expand Down Expand Up @@ -151,7 +151,7 @@ def batch_matmul_blas_common(cfg, x, y, out_shape, lib):
3-D with shape [batch, N, K]
out_shape : tuple or None
Shape of the output
lib : A contrib module which implements batch_matmul funtion
lib : A contrib module which implements batch_matmul function
cblas and mkl are supported
Returns
Expand All @@ -163,7 +163,7 @@ def batch_matmul_blas_common(cfg, x, y, out_shape, lib):
XB, M, XK = get_const_tuple(x.shape)
YB, N, YK = get_const_tuple(y.shape)
assert XB == YB, "batch dimension doesn't match"
assert XK == YK, "shapes of x and y is inconsistant"
assert XK == YK, "shapes of x and y is inconsistent"
if out_shape is not None:
assert out_shape[0] == XB, "got invalid output shape"
assert out_shape[1] == M, "got invalid output shape"
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/topi/x86/conv2d_avx_1x1.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def _declaration_conv_nhwc_pack(cfg, Input, Filter, stride, padding, dilation, o
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
# todo: padding filter to accomodate the intrinsic
# todo: padding filter to accommodate the intrinsic

# packing the Filter to let memory access be consecutive for AVX512 intrinsic
# Done in pre-compute stage
Expand Down
2 changes: 1 addition & 1 deletion src/relay/ir/dataflow_matcher.cc
Original file line number Diff line number Diff line change
Expand Up @@ -734,7 +734,7 @@ class PatternGrouper {
// Exit due to overlapping partitions
return;
} else if (kv.second != body) {
// if the node isn't the ouput of the group
// if the node isn't the output of the group
auto node = matcher_->expr_graph_.node_map_.at(kv.first);
for (auto* output : node->outputs_) {
// and the node is used by nodes outside of the group
Expand Down
4 changes: 2 additions & 2 deletions src/relay/ir/indexed_graph.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ IndexedGraph<Expr> CreateIndexedGraph(const Expr& expr) {
return std::move(graph_);
}

/*! Default visitation pushes the parent to the child's ouputs and the child to the parent's
/*! Default visitation pushes the parent to the child's outputs and the child to the parent's
* inputs*/
void VisitExpr(const Expr& expr, NodePtr parent) override {
auto current = graph_.node_map_[expr];
Expand Down Expand Up @@ -220,7 +220,7 @@ IndexedGraph<DFPattern> CreateIndexedGraph(const DFPattern& pattern) {
return std::move(graph_);
}

/*! Default visitation pushes the parent to the child's ouputs */
/*! Default visitation pushes the parent to the child's outputs */
void VisitDFPattern(const DFPattern& pattern, NodePtr parent) override {
auto current = graph_.node_map_[pattern];
if (parent) {
Expand Down
2 changes: 1 addition & 1 deletion src/relay/transforms/partition_graph.cc
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ class Partitioner : public MixedModeMutator {
AnnotatedRegion region = GetRegion(GetRef<Call>(call));

// TODO(@manupa-arm) : need to use the parent function (to which region
// belongs to) name/key for the funtions that are created
// belongs to) name/key for the functions that are created
BaseFunc f = GetFunc(GetRef<Call>(call));

// Traverse subgraph inputs.
Expand Down
2 changes: 1 addition & 1 deletion src/runtime/c_runtime_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ void DeviceAPI::SyncStreamFromTo(TVMContext ctx, TVMStreamHandle event_src,
// {message1}
// {message2}
// {Stack trace:} // stack traces follow by this line
// {trace 0} // two spaces in the begining.
// {trace 0} // two spaces in the beginning.
// {trace 1}
// {trace 2}
//--------------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion src/tir/transforms/hoist_if_then_else.cc
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ class HoistCandidateSelector final : public StmtExprVisitor {
// To stop hoisting if any of the block variables are used.
//
// In case we want to use hoisting in between certain passes
// which have interdependencies of the postioning of if nodes with scope var
// which have interdependencies of the positioning of if nodes with scope var
// it is better to disable this section
if (support_block_scope_hosting_) {
if (IsRecordingOn()) {
Expand Down
2 changes: 1 addition & 1 deletion tests/python/frontend/mxnet/model_zoo/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ def resnet(
filter_list : list
Channel size of each stage
num_classes : int
Ouput size of symbol
Output size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Expand Down
2 changes: 1 addition & 1 deletion tests/python/frontend/onnx/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -3502,7 +3502,7 @@ def verify_roi_align(
# @tvm.testing.uses_gpu
def test_non_max_suppression():
def verify_nms(
boxes, scores, max_ouput_boxes_per_class, iou_threshold, score_threshold, output_dims
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, output_dims
):
input_names = ["boxes", "scores", "max_output_boxes_per_class", "iou_threshold"]
input_nodes = [
Expand Down

0 comments on commit 84359a9

Please sign in to comment.