Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sparse fill empty rows op #7126

Closed
wants to merge 46 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
81e4c7f
Fix trt Test
codeislife99 Dec 2, 2020
8e2ce9a
Fixed stuff
Dec 2, 2020
d719346
Test TRT
Dec 2, 2020
4e160e9
Done
Dec 2, 2020
3104113
fix 0
Dec 2, 2020
24116fd
Trigger Build
Dec 3, 2020
e8b223f
Done
Dec 15, 2020
ff29e0c
SparseReshapeOp
Dec 17, 2020
fe3f7de
Remove Build Module changes
Dec 17, 2020
4fd2e57
Merge
Dec 17, 2020
3f5de52
Reset non-op changes
Dec 17, 2020
a521c1b
Remove stuff
Dec 17, 2020
04da7d4
More changes
Dec 17, 2020
c32b2dd
Op Level 3
Dec 17, 2020
fa5def3
Make Op changes only
Dec 17, 2020
dc8d1ce
Formatting Changes
Dec 17, 2020
2d48888
Only Transform changes
Dec 17, 2020
2e017fd
Correct Clang format version
Dec 17, 2020
b7000ac
Reset_to_clang-format-10
Dec 17, 2020
00e3220
Finish implementation of SparseFillEmptyRowsOp
Dec 18, 2020
ba8bfed
Remove SparseReshape
Dec 18, 2020
0ce98ef
Clang
Dec 18, 2020
c2fd071
Formats
Dec 18, 2020
ef7845e
New Tests + outputs
Dec 21, 2020
5196189
Add comments
Dec 21, 2020
af15ca7
Clang format
Dec 21, 2020
1666e8c
Black
Dec 21, 2020
d532872
Change name
Dec 21, 2020
5100d8a
Change name
Dec 21, 2020
4f733a8
Clang
Dec 21, 2020
f7526b8
PR Comments
Dec 22, 2020
f4b14ce
Explanation
Dec 22, 2020
fdf7a58
Fix sphinx
Dec 22, 2020
8a481ce
Revert 3rd party back to main
Dec 22, 2020
ec25fab
Add TF Frontend Code
Dec 30, 2020
72d5cde
Linter
Dec 30, 2020
a61647e
Update src/relay/op/tensor/transform.cc
codeislife99 Dec 30, 2020
e396514
Update src/relay/op/tensor/transform.cc
codeislife99 Dec 30, 2020
0af56dc
Update src/relay/op/tensor/transform.cc
codeislife99 Dec 30, 2020
dd801d6
Update src/relay/op/tensor/transform.cc
codeislife99 Dec 30, 2020
3a19b7f
Update include/tvm/topi/transform.h
codeislife99 Dec 30, 2020
bddf3cf
Fix
Dec 30, 2020
0ce9677
revert
Dec 30, 2020
d03ddc8
Make Docs better
Dec 30, 2020
6468415
Make descriptions better
Dec 30, 2020
6ba75bd
Typo
Dec 30, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions include/tvm/relay/attrs/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -401,6 +401,14 @@ struct SparseToDenseAttrs : public tvm::AttrsNode<SparseToDenseAttrs> {
}
}; // struct SparseToDenseAttrs

/*! \brief Attributes used in sparsefillemptyRows operator */
struct SparseFillEmptyRowsAttrs : public tvm::AttrsNode<SparseFillEmptyRowsAttrs> {
Array<Integer> dense_shape;

TVM_DECLARE_ATTRS(SparseFillEmptyRowsAttrs, "relay.attrs.SparseFillEmptyRowsAttrs") {
TVM_ATTR_FIELD(dense_shape).describe("Shape of the dense output tensor");
}
}; // struct SparseFillEmptyRowsAttrs
/*! \brief Attributes for ndarray_size operator */
struct NdarraySizeAttrs : public tvm::AttrsNode<NdarraySizeAttrs> {
DataType dtype;
Expand Down
8 changes: 4 additions & 4 deletions include/tvm/support/logging.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,10 +139,10 @@ constexpr const char* kTVM_INTERNAL_ERROR_MESSAGE =
#define ICHECK_GE(x, y) ICHECK_BINARY_OP(_GE, >=, x, y)
#define ICHECK_EQ(x, y) ICHECK_BINARY_OP(_EQ, ==, x, y)
#define ICHECK_NE(x, y) ICHECK_BINARY_OP(_NE, !=, x, y)
#define ICHECK_NOTNULL(x) \
((x) == nullptr ? dmlc::LogMessageFatal(__FILE__, __LINE__).stream() \
<< tvm::kTVM_INTERNAL_ERROR_MESSAGE << __INDENT << "Check not null: " #x \
<< ' ', \
#define ICHECK_NOTNULL(x) \
((x) == nullptr ? dmlc::LogMessageFatal(__FILE__, __LINE__).stream() \
<< tvm::kTVM_INTERNAL_ERROR_MESSAGE << ICHECK_INDENT \
<< "Check not null: " #x << ' ', \
(x) : (x)) // NOLINT(*)

/*! \brief The diagnostic level, controls the printing of the message. */
Expand Down
88 changes: 88 additions & 0 deletions include/tvm/topi/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -1386,6 +1386,94 @@ inline Array<Tensor> meshgrid(const Array<Tensor>& inputs, const std::string& in
return result;
}

/*!
* \brief Fill empty rows of a sparse tensor with default values
*
* \param sparse_indices Indices where values of the dense tensor exist
* \param sparse_values Values at the above indices respectively
* \param default_value Default value at to be used at empty rows
* \param dense_shape Dense shape of the sparse tensor
* \param name The name of the operation
* \param tag The tag to mark the operation
*
* \return A Tensor whose op member is the SparseFillEmptyRows operation
*/
inline Array<Tensor> SparseFillEmptyRows(const Tensor& sparse_indices, const Tensor& sparse_values,
const Tensor& default_value,
const Array<Integer>& dense_shape,
const std::string name = "T_sparse_fill_empty_rows",
std::string tag = kInjective) {
Array<Tensor> result;
Array<PrimExpr> sp_ordered_output_shape;
sp_ordered_output_shape.push_back(dense_shape[0] + sparse_indices->shape[0]);
if (sparse_indices->shape.size() > 1) {
sp_ordered_output_shape.push_back(sparse_indices->shape[1]);
}
auto empty_row_indicator =
compute(Array<PrimExpr>{dense_shape[0]}, [&](const Array<Var>& indices) {
PrimExpr ret = PrimExpr(Bool(1));
for (int i = 0; i < GetConstInt(sparse_indices->shape[0]); ++i) {
codeislife99 marked this conversation as resolved.
Show resolved Hide resolved
PrimExpr sparse_index;
if (sparse_indices->shape.size() == 1) {
sparse_index = sparse_indices[i];
} else {
sparse_index = sparse_indices[i][0];
}
ret = if_then_else(sparse_index == indices[0], PrimExpr(Bool(0)), ret);
}
return ret;
});
result.push_back(compute(
sp_ordered_output_shape,
[&](const Array<Var>& indices) {
PrimExpr ret = -1;
ret = if_then_else(indices[0] < sparse_indices->shape[0], sparse_indices(indices), ret);
PrimExpr empty_row_count = 0;
for (int i = 0; i < static_cast<int>(dense_shape[0]); ++i) {
empty_row_count =
if_then_else(empty_row_indicator[i], empty_row_count + 1, empty_row_count);
PrimExpr at_correct_index =
(indices[0] == (sparse_indices->shape[0] + empty_row_count - 1));
PrimExpr condition = at_correct_index && empty_row_indicator[i];

ret = if_then_else(condition, i, ret);
if (indices.size() > 1) {
ret = if_then_else(condition && indices[1] > 0, 0, ret);
}
}
return ret;
},
name, tag));
result.push_back(empty_row_indicator);
result.push_back(compute(
Array<PrimExpr>{sp_ordered_output_shape[0]},
[&](const Array<Var>& indices) {
PrimExpr ret = -1;
ret = if_then_else(indices[0] < sparse_values->shape[0], sparse_values(indices), ret);
PrimExpr empty_row_count = 0;
for (int i = 0; i < static_cast<int>(dense_shape[0]); ++i) {
empty_row_count =
if_then_else(empty_row_indicator[i], empty_row_count + 1, empty_row_count);
PrimExpr condition =
(indices[0] == sparse_values->shape[0] + empty_row_count - 1) && empty_row_count > 0;
ret = if_then_else(condition, default_value[0], ret);
}
return ret;
},
name, tag));
result.push_back(compute(
Array<PrimExpr>{1},
[&](const Array<Var>& indices) {
PrimExpr non_empty_rows = 0;
for (int i = 0; i < static_cast<int>(dense_shape[0]); ++i) {
non_empty_rows = if_then_else(empty_row_indicator[i], non_empty_rows, non_empty_rows + 1);
}
return non_empty_rows;
},
name, tag));
return result;
}

/*!
* \brief Transform the layout according to \p src_layout and \p dst_layout
* \param src the source input.
Expand Down
49 changes: 49 additions & 0 deletions python/tvm/relay/frontend/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -1365,6 +1365,54 @@ def _impl(inputs, attr, params, mod):
return _impl


def _sparse_fill_empty_rows():
def _impl(inputs, attr, params, mod):
assert len(inputs) == 4, "There should be 4 input tensors"

indices_tensor = _infer_value(inputs[0], params, mod).asnumpy()
values_tensor = _infer_value(inputs[1], params, mod).asnumpy()
dense_shape_tensor = _infer_value(inputs[2], params, mod).asnumpy()
default_value_tensor = _infer_value(inputs[3], params, mod).asnumpy().reshape(1)

indices_data = _expr.const(indices_tensor, indices_tensor.dtype)
values_data = _expr.const(values_tensor, values_tensor.dtype)
default_value_data = _expr.const(default_value_tensor, default_value_tensor.dtype)

(
new_sparse_indices,
empty_row_indicator,
new_sparse_values,
non_empty_rows,
) = get_relay_op("sparse_fill_empty_rows")(
indices_data, values_data, default_value_data, list(dense_shape_tensor)
)
first_column = get_relay_op("split")(new_sparse_indices, indices_tensor.shape[1], axis=1)
sorted_indices = _op.argsort(_op.squeeze(first_column[0]))

final_sparse_indices = _op.strided_slice(
_op.take(new_sparse_indices, sorted_indices, axis=0),
begin=_op.concatenate([non_empty_rows, _expr.const([0])], 0),
end=[-1, -1],
strides=[1, 1],
slice_mode="size",
)

final_sparse_values = _op.strided_slice(
_op.take(new_sparse_values, sorted_indices),
begin=non_empty_rows,
end=_expr.const([-1]),
slice_mode="size",
)

return (
final_sparse_indices,
final_sparse_values,
empty_row_indicator,
)

return _impl


def _bias_add():
def _impl(inputs, attr, params, mod):
# Must expand for proper broadcasting in NCHW.
Expand Down Expand Up @@ -2422,6 +2470,7 @@ def _impl(inputs, attr, params, mod):
"SpaceToBatchND": _space_to_batch_nd(),
"SpaceToDepth": _space_to_depth(),
"SparseToDense": _sparse_to_dense(),
"SparseFillEmptyRows": _sparse_fill_empty_rows(),
"SparseTensorDenseMatMul": _sparse_tensor_dense_matmul(),
"Split": _split(False),
"SplitV": _split(True),
Expand Down
1 change: 1 addition & 0 deletions python/tvm/relay/op/_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@
_reg.register_injective_schedule("sparse_to_dense")
_reg.register_injective_schedule("matrix_set_diag")
_reg.register_injective_schedule("adv_index")
_reg.register_injective_schedule("sparse_fill_empty_rows")

# concatenate
_reg.register_schedule("concatenate", strategy.schedule_concatenate)
Expand Down
80 changes: 80 additions & 0 deletions python/tvm/relay/op/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -1320,3 +1320,83 @@ def adv_index(inputs):
Output tensor.
"""
return _make.adv_index(Tuple(inputs))


def sparse_fill_empty_rows(sparse_indices, sparse_values, default_value, dense_shape):
"""
Fill first column of the empty rows with default values for a sparse array.
It returns a TupleWrapper with four outputs

Parameters
----------
sparse_indices : relay.Expr
A 2-D tensor[N, n_dim] of integers containing location of sparse values, where N is the
number of sparse values and n_dim is the number of dimensions of the dense_shape

sparse_values : relay.Expr
A 1-D tensor[N] containing the sparse values for the sparse indices.

default_value : relay.Expr
A 1-D tensor containing the default value for the remaining locations.

dense_shape : relay.Expr
A list of integers. Shape of the dense output tensor.

Returns
-------
new_sparse_indices : relay.Expr
A 2-D tensor[N + dense_shape[0], n_dim] of integers containing location of new sparse
indices where N is the number of sparse values. It is filled with -1 at irrelevant indices
which will be sliced in a future op discarding non-useful elements. This is done since the
real rows of new_sparse_indices depends on the input.

empty_row_indicator : relay.Expr
A 1-D Boolean tensor[dense_shape[0]] indicating whether the particular row is empty

new_sparse_values : relay.Expr
A 1-D tensor[dense_shape[0]] containing the sparse values for the sparse indices. It is
filled with -1 at indices which will be discarded in the following strided_slice op.
This is done since the real rows of new_sparse_indices depends on the input.

non_empty_rows : relay.Expr
A 1-D tensor containing the amount of non-empty rows in the sparse_indices. This value will
be used to slice irrelevant indices(filled with -1) in new_sparse_values and
new_sparse_indices

Examples
-------
.. code-block:: python

sparse_indices = [[0, 1],
[0, 3],
[2, 0],
[3, 1]]
sparse_values = [1, 2, 3, 4]
default_value = [10]
dense_shape = [5, 6]
new_sparse_indices, empty_row_indicator, new_sparse_values, slice_element_index =
relay.sparse_fill_empty_rows(
sparse_indices,
sparse_values,
default_value,
dense_shape)
new_sparse_indices = [[0, 1],
[0, 3],
[2, 0],
[3, 1],
[1, 0],
[4, 0],
[-1, -1],
[-1, -1],
[-1, -1]]

empty_row_indicator = [False, True, False, False, True]

new_sparse_values = [1, 2, 3, 4, 10, 10, -1, -1, -1]

slice_element_index = [3]

"""
return TupleWrapper(
_make.sparse_fill_empty_rows(sparse_indices, sparse_values, default_value, dense_shape), 4
)
78 changes: 78 additions & 0 deletions python/tvm/topi/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -931,3 +931,81 @@ def adv_index(data, indices):
Output tensor
"""
return cpp.adv_index(data, indices)


def sparse_fill_empty_rows(sparse_indices, sparse_values, default_value, dense_shape):
"""
Fill first column of the empty rows with default values for a sparse array.
It returns a TupleWrapper with four outputs

Parameters
----------
sparse_indices : relay.Expr
A 2-D tensor[N, n_dim] of integers containing location of sparse values, where N is the
number of sparse values and n_dim is the number of dimensions of the dense_shape

sparse_values : relay.Expr
A 1-D tensor[N] containing the sparse values for the sparse indices.

default_value : relay.Expr
A 1-D tensor containing the default value for the remaining locations.

dense_shape : relay.Expr
A list of integers. Shape of the dense output tensor.

Returns
-------
new_sparse_indices : relay.Expr
A 2-D tensor[N + dense_shape[0], n_dim] of integers containing location of new sparse
indices where N is the number of sparse values. It is filled with -1 at irrelevant indices
which will be sliced in a future op discarding non-useful elements. This is done since the
real rows of new_sparse_indices depends on the input.

empty_row_indicator : relay.Expr
A 1-D Boolean tensor[dense_shape[0]] indicating whether the particular row is empty

new_sparse_values : relay.Expr
A 1-D tensor[dense_shape[0]] containing the sparse values for the sparse indices. It is
filled with -1 at indices which will be discarded in the following strided_slice op.
This is done since the real rows of new_sparse_indices depends on the input.

non_empty_rows : relay.Expr
A 1-D tensor containing the amount of non-empty rows in the sparse_indices. This value will
be used to slice irrelevant indices(filled with -1) in new_sparse_values and
new_sparse_indices

Examples
-------
.. code-block:: python

sparse_indices = [[0, 1],
[0, 3],
[2, 0],
[3, 1]]
sparse_values = [1, 2, 3, 4]
default_value = [10]
dense_shape = [5, 6]
new_sparse_indices, empty_row_indicator, new_sparse_values, slice_element_index =
relay.sparse_fill_empty_rows(
sparse_indices,
sparse_values,
default_value,
dense_shape)
new_sparse_indices = [[0, 1],
[0, 3],
[2, 0],
[3, 1],
[1, 0],
[4, 0],
[-1, -1],
[-1, -1],
[-1, -1]]

empty_row_indicator = [False, True, False, False, True]

new_sparse_values = [1, 2, 3, 4, 10, 10, -1, -1, -1]

slice_element_index = [3]

"""
return cpp.sparse_fill_empty_rows(sparse_indices, sparse_values, default_value, dense_shape)
Loading