Skip to content

Commit

Permalink
Add a padding operator. (#95)
Browse files Browse the repository at this point in the history
The implemented operator supports padding in all dimensions and asymmetric padding. And it is implemented in software only, no accelerated version is available. The code is tested in both CPP and python API. In the CPP test, I test it with different input sizes (2D, 4D) and different padding patterns (asymmetric, symmetric). The python API testing is mainly focused on it can work in a graph.

Example Python code.
```python
      # consider input_tensor is a 4D tensor, we pad the last 2 dimension with 1
      out = array_ops.padding(input_tensor, [0, 0, 0, 0, 1, 1, 1, 1], "padding")
      # consider input_tensor is a 4D tensor, we pad the 2nd dimension with symmetric 
      # padding with size 1 and the 3rd dimension with asymmetric padiing of size 1 and 2.
      out = array_ops.padding(input_tensor, [0, 0, 0, 0, 1, 1, 1, 2], "padding")
```

Fixes issue #94.
  • Loading branch information
mrbeann authored Jul 17, 2021
1 parent 0da9cf1 commit e54f53d
Show file tree
Hide file tree
Showing 10 changed files with 398 additions and 42 deletions.
1 change: 1 addition & 0 deletions make/Makefile.common
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ TESTS = smaug/core/tensor_test.cpp \
smaug/operators/split_op_test.cpp \
smaug/operators/reshape_op_test.cpp \
smaug/operators/repeat_op_test.cpp \
smaug/operators/padding_op_test.cpp \
smaug/operators/control_flow_ops_test.cpp \
smaug/operators/smv/smv_convolution_tiling_test.cpp \
smaug/operators/smv/smv_convolution_op_test.cpp \
Expand Down
35 changes: 19 additions & 16 deletions smaug/core/backend.cpp
Original file line number Diff line number Diff line change
@@ -1,38 +1,39 @@
#include "smaug/core/backend.h"
#include "smaug/operators/batch_norm_op.h"
#include "smaug/operators/concat_op.h"
#include "smaug/operators/control_flow_ops.h"
#include "smaug/operators/convolution_op.h"
#include "smaug/operators/data_op.h"
#include "smaug/operators/depthwise_convolution_op.h"
#include "smaug/operators/eltwise_add_op.h"
#include "smaug/operators/eltwise_mul_op.h"
#include "smaug/operators/less_op.h"
#include "smaug/operators/greater_op.h"
#include "smaug/operators/control_flow_ops.h"
#include "smaug/operators/elu_op.h"
#include "smaug/operators/greater_op.h"
#include "smaug/operators/inner_product_op.h"
#include "smaug/operators/less_op.h"
#include "smaug/operators/padding_op.h"
#include "smaug/operators/pooling_op.h"
#include "smaug/operators/relu_op.h"
#include "smaug/operators/reorder_op.h"
#include "smaug/operators/concat_op.h"
#include "smaug/operators/split_op.h"
#include "smaug/operators/reshape_op.h"
#include "smaug/operators/repeat_op.h"
#include "smaug/operators/reshape_op.h"
#include "smaug/operators/sigmoid_op.h"
#include "smaug/operators/softmax_op.h"
#include "smaug/operators/tanh_op.h"
#include "smaug/operators/smv/smv_batch_norm_op.h"
#include "smaug/operators/smv/smv_convolution_op.h"
#include "smaug/operators/smv/smv_eltwise_add_op.h"
#include "smaug/operators/smv/smv_eltwise_mul_op.h"
#include "smaug/operators/smv/smv_elu_op.h"
#include "smaug/operators/smv/smv_greater_op.h"
#include "smaug/operators/smv/smv_inner_product_op.h"
#include "smaug/operators/smv/smv_less_op.h"
#include "smaug/operators/smv/smv_pooling_op.h"
#include "smaug/operators/smv/smv_batch_norm_op.h"
#include "smaug/operators/smv/smv_relu_op.h"
#include "smaug/operators/smv/smv_elu_op.h"
#include "smaug/operators/smv/smv_tanh_op.h"
#include "smaug/operators/smv/smv_sigmoid_op.h"
#include "smaug/operators/smv/smv_softmax_op.h"
#include "smaug/operators/smv/smv_eltwise_add_op.h"
#include "smaug/operators/smv/smv_eltwise_mul_op.h"
#include "smaug/operators/smv/smv_less_op.h"
#include "smaug/operators/smv/smv_greater_op.h"
#include "smaug/operators/smv/smv_tanh_op.h"
#include "smaug/operators/softmax_op.h"
#include "smaug/operators/split_op.h"
#include "smaug/operators/tanh_op.h"

namespace smaug {

Expand Down Expand Up @@ -79,6 +80,7 @@ DEF_CREATE_OP(EluOp, ReferenceBackend)
DEF_CREATE_OP(SeluOp, ReferenceBackend)
DEF_CREATE_OP(TanhOp, ReferenceBackend)
DEF_CREATE_OP(HardTanhOp, ReferenceBackend)
DEF_CREATE_OP(PaddingOp, ReferenceBackend)

DEF_CREATE_SMV_OP(ConvolutionOp)
DEF_CREATE_SMV_OP(InnerProductOp)
Expand Down Expand Up @@ -108,7 +110,9 @@ DEF_CREATE_OP(RepeatOp, SmvBackend)
DEF_CREATE_OP(FlattenOp, SmvBackend)
DEF_CREATE_OP(SwitchOp, SmvBackend)
DEF_CREATE_OP(MergeOp, SmvBackend)
DEF_CREATE_OP(PaddingOp, SmvBackend)

// for simple tracing.
namespace ref {
const unsigned kConvolutionHw = 0x0001;
const unsigned kInnerProductHw = 0x0002;
Expand Down Expand Up @@ -140,5 +144,4 @@ float* spad1;
float* spad2;
} // namespace smv


} // namespace smaug
6 changes: 4 additions & 2 deletions smaug/core/backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ template <typename Backend> class EluOp;
template <typename Backend> class SeluOp;
template <typename Backend> class TanhOp;
template <typename Backend> class HardTanhOp;
template <typename Backend> class PaddingOp;

#endif

/**
Expand Down Expand Up @@ -123,9 +125,9 @@ class ReferenceBackend {
DECL_CREATE_OP(SeluOp);
DECL_CREATE_OP(TanhOp);
DECL_CREATE_OP(HardTanhOp);
DECL_CREATE_OP(PaddingOp);

#undef DECL_CREATE_OP

};

/**
Expand Down Expand Up @@ -238,10 +240,10 @@ class SmvBackend {
DECL_CREATE_OP(FlattenOp);
DECL_CREATE_OP(SwitchOp);
DECL_CREATE_OP(MergeOp);
DECL_CREATE_OP(PaddingOp);

#undef DECL_SMV_OP
#undef DECL_CREATE_OP

};

} // namespace smaug
Expand Down
51 changes: 28 additions & 23 deletions smaug/core/network_builder.cpp
Original file line number Diff line number Diff line change
@@ -1,56 +1,57 @@
#include <iostream>
#include <fstream>
#include <fcntl.h>
#include <fstream>
#include <iostream>

#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/text_format.h>

#include "smaug/core/backend.h"
#include "smaug/core/tensor.h"
#include "smaug/core/graph.pb.h"
#include "smaug/core/network.h"
#include "smaug/core/network_builder.h"
#include "smaug/core/workspace.h"
#include "smaug/core/graph.pb.h"
#include "smaug/core/node.pb.h"
#include "smaug/core/tensor.h"
#include "smaug/core/tensor.pb.h"
#include "smaug/core/types.pb.h"
#include "smaug/operators/common.h"
#include "smaug/core/workspace.h"
#include "smaug/operators/batch_norm_op.h"
#include "smaug/operators/common.h"
#include "smaug/operators/concat_op.h"
#include "smaug/operators/control_flow_ops.h"
#include "smaug/operators/convolution_op.h"
#include "smaug/operators/data_op.h"
#include "smaug/operators/depthwise_convolution_op.h"
#include "smaug/operators/eltwise_add_op.h"
#include "smaug/operators/eltwise_mul_op.h"
#include "smaug/operators/less_op.h"
#include "smaug/operators/greater_op.h"
#include "smaug/operators/control_flow_ops.h"
#include "smaug/operators/elu_op.h"
#include "smaug/operators/greater_op.h"
#include "smaug/operators/inner_product_op.h"
#include "smaug/operators/less_op.h"
#include "smaug/operators/padding_op.h"
#include "smaug/operators/pooling_op.h"
#include "smaug/operators/relu_op.h"
#include "smaug/operators/reorder_op.h"
#include "smaug/operators/concat_op.h"
#include "smaug/operators/split_op.h"
#include "smaug/operators/reshape_op.h"
#include "smaug/operators/repeat_op.h"
#include "smaug/operators/reshape_op.h"
#include "smaug/operators/sigmoid_op.h"
#include "smaug/operators/softmax_op.h"
#include "smaug/operators/tanh_op.h"
#include "smaug/operators/smv/smv_batch_norm_op.h"
#include "smaug/operators/smv/smv_convolution_op.h"
#include "smaug/operators/smv/smv_eltwise_add_op.h"
#include "smaug/operators/smv/smv_eltwise_mul_op.h"
#include "smaug/operators/smv/smv_elu_op.h"
#include "smaug/operators/smv/smv_greater_op.h"
#include "smaug/operators/smv/smv_inner_product_op.h"
#include "smaug/operators/smv/smv_less_op.h"
#include "smaug/operators/smv/smv_pooling_op.h"
#include "smaug/operators/smv/smv_batch_norm_op.h"
#include "smaug/operators/smv/smv_relu_op.h"
#include "smaug/operators/smv/smv_elu_op.h"
#include "smaug/operators/smv/smv_tanh_op.h"
#include "smaug/operators/smv/smv_sigmoid_op.h"
#include "smaug/operators/smv/smv_softmax_op.h"
#include "smaug/operators/smv/smv_eltwise_add_op.h"
#include "smaug/operators/smv/smv_eltwise_mul_op.h"
#include "smaug/operators/smv/smv_less_op.h"
#include "smaug/operators/smv/smv_greater_op.h"
#include "smaug/utility/utils.h"
#include "smaug/operators/smv/smv_tanh_op.h"
#include "smaug/operators/softmax_op.h"
#include "smaug/operators/split_op.h"
#include "smaug/operators/tanh_op.h"
#include "smaug/utility/debug_stream.h"
#include "smaug/utility/utils.h"

using namespace smaug;
using namespace std;
Expand Down Expand Up @@ -263,6 +264,10 @@ static void createAndAddOperator(const NodeProto& node,
} else if (type == OpType::Tanh) {
auto op = Backend::createTanhOp(name, workspace);
network->addOperator(op);
} else if (type == OpType::Padding) {
auto op = Backend::createPaddingOp(name, workspace);
op->setPaddingSize(node.params().padding_params().padding_size());
network->addOperator(op);
} else if (type == OpType::HardTanh) {
auto op = Backend::createHardTanhOp(name, workspace);
network->addOperator(op);
Expand Down
5 changes: 5 additions & 0 deletions smaug/core/node.proto
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ message PoolParams {
repeated int32 pool_size = 2;
}

message PaddingParams {
repeated int32 padding_size = 1;
}

message ConcatParams {
int32 concat_axis = 1;
}
Expand Down Expand Up @@ -52,6 +56,7 @@ message Params {
PoolParams pool_params = 2;
ConcatParams concat_params = 4;
SplitParams split_params = 5;
PaddingParams padding_params = 6;
}
ActivationParams act_params = 3;
}
Expand Down
3 changes: 2 additions & 1 deletion smaug/core/types.proto
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ enum DataLayout {
NCT = 16;
NTC = 32;
N = 64;
X = 127; // Elementwise
X = 127; // Elementwise
EndDataLayout = 64;
}

Expand Down Expand Up @@ -64,6 +64,7 @@ enum OpType {
GreaterEqual = 26;
Switch = 27;
Merge = 28;
Padding = 29;
}

enum PaddingType {
Expand Down
96 changes: 96 additions & 0 deletions smaug/operators/padding_op.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
#ifndef _OPERATORS_PADDING_OP_H_
#define _OPERATORS_PADDING_OP_H_

#include "smaug/core/backend.h"
#include "smaug/core/operator.h"
#include "smaug/core/tensor.h"
#include "smaug/core/workspace.h"
#include <google/protobuf/repeated_field.h>
using namespace google::protobuf;

namespace smaug {

/** \ingroup Operators
* \brief Pad a given tensor in any number of dimensions with arbitrary size.
*
* This has a software-based implementation.
*
* @tparam Backend The Backend that sets Alignment.
*/
template <typename Backend>
class PaddingOp : public Operator {
public:
PaddingOp(const std::string& name, Workspace* workspace)
: Operator(name, OpType::Padding, workspace) {
inputs.resize(kNumInputs, nullptr);
outputs.resize(kNumOutputs, nullptr);
}

/**
* Set the paddingSize of the Tensor along each dimension.
* The paddingSize is orgainized as <{dim0_begin, dim0_end, dim1_begin,
* dim1_end, ... >
*/
void setPaddingSize(const RepeatedField<google::protobuf::int32>& val) {
paddingSize.assign(val.begin(), val.end());
}

void setPaddingSize(std::vector<int> const& val) { paddingSize = val; }

const std::vector<int>& getPaddingSize() const { return paddingSize; }

void run() override {
Tensor* input = getInput(kInput);
Tensor* output = getOutput(kOutput);
int ndims = input->ndims();
const std::vector<int>& inputDims = input->getShape().dims();
const std::vector<int>& outputDims = output->getShape().dims();
int total_dim = 1;
for (int i : outputDims) {
total_dim *= i;
}
std::vector<float> vf(total_dim, 0);
output->fillData(vf.data(), vf.size());
std::vector<int> paddingBegin, srcOrigin;
for (int i = 0; i < ndims; i++) {
paddingBegin.push_back(paddingSize.at(2 * i));
srcOrigin.push_back(0);
}
copyTensorRegion(output, input, paddingBegin, srcOrigin, inputDims);
}

// Optional override for testing purposes.
void createAllTensors() override {
Tensor* input = getInput(kInput);
int ndims = input->ndims();
std::vector<int> dims = input->getShape().dims();
for (int i = 0; i < ndims; i++) {
dims[i] += (paddingSize[2 * i] + paddingSize[2 * i + 1]);
}
TensorShape shape(
dims, input->getShape().getLayout(), Backend::Alignment);
Tensor* output = new Tensor(name, shape);
workspace->addTensor(output);
outputs.at(kOutput) = output;
}

// Optional but recommended function to verify operator parameters.
bool validate() override {
Tensor* input = getInput(kInput);
int ndims = input->ndims();
if (paddingSize.size() != 2 * ndims) {
return false;
}
return Operator::validate();
}

enum { kInput, kNumInputs };
enum { kOutput, kNumOutputs };

private:
std::vector<int> paddingSize = {};
};

} // namespace smaug

#endif
Loading

0 comments on commit e54f53d

Please sign in to comment.