Skip to content

Commit

Permalink
Fix the wrong axis order, try to move all logic into onnxconverter Co…
Browse files Browse the repository at this point in the history
…nvert()
  • Loading branch information
daquexian committed Nov 2, 2019
1 parent e0dd8ab commit 47caec4
Show file tree
Hide file tree
Showing 3 changed files with 64 additions and 20 deletions.
10 changes: 10 additions & 0 deletions dnnlibrary/ModelBuilderImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -718,6 +718,16 @@ ModelBuilder::Index ModelBuilder::AddDepthWiseConv(
output_quant_info);
}

ModelBuilder::Index ModelBuilder::AddConv(
const std::string &input_name, const std::string &weight_name,
const dnn::optional<std::string> &bias_name, const std::vector<int32_t> paddings,
const std::vector<int32_t> strides, const std::string &output_name,
const dnn::optional<QuantInfo> &output_quant_info) {
return AddConv(input_name, weight_name, bias_name, paddingLeft,
paddingRight, paddingTop, paddingBottom, strideX, strideY,
activation, output_name, output_quant_info);
}

ModelBuilder::Index ModelBuilder::AddConv(
const std::string &input_name, int32_t strideX, int32_t strideY,
int32_t paddingLeft, int32_t paddingRight, int32_t paddingTop,
Expand Down
41 changes: 26 additions & 15 deletions tools/onnx2daq/OnnxConverter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -682,34 +682,45 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
}
if (op == "Conv") {
VLOG(5) << "Start converting Conv";
const auto strides = helper.get("strides", vector<int>{1, 1});
const auto pads = helper.get("pads", vector<int>{0, 0, 0, 0});
const auto dilations = helper.get("dilations", vector<int>{1, 1});
CHECK_EQ(pads.size(), 4ul);
CHECK_EQ(strides.size(), 2ul);
CHECK_EQ(dilations.size(), 2ul);
// onnx strides are in the order height, width
// while nnapi strides are in the order width, height
const auto onnx_strides = helper.get("strides", vector<int>{1, 1});
// onnx pads are in the order top, left, bottom, right
// while nnapi pads is in the order left, right, top, bottom
const auto onnx_pads = helper.get("pads", vector<int>{0, 0, 0, 0});
// onnx dilations is in the order height, width
// while nnapi dilations are in the order width, height
const auto onnx_dilations = helper.get("dilations", vector<int>{1, 1});
CHECK_EQ(onnx_pads.size(), 4ul);
CHECK_EQ(onnx_strides.size(), 2ul);
CHECK_EQ(onnx_dilations.size(), 2ul);
const decltype(onnx_strides) nnapi_strides{onnx_strides[1], onnx_strides[0]};
const decltype(onnx_pads) nnapi_pads{onnx_pads[1], onnx_pads[3], onnx_pads[0], onnx_pads[2]};
const decltype(onnx_dilations) nnapi_dilations{onnx_dilations[1], onnx_dilations[0]};
const auto group = helper.get("group", 1);
dnn::optional<string> bias_name;
if (node.input_size() >= 3) {
bias_name = m(node.input(2));
}

const auto ori_weight_name = m(node.input(1));
AddConv(m(node.input(0)), strides, pads, dilations, group,
AddConv(m(node.input(0)), nnapi_strides, nnapi_pads, nnapi_dilations, group,
ori_weight_name, bias_name, m(node.output(0)));
VLOG(5) << "Converting Conv completed";
} else if (op == "AveragePool" || op == "MaxPool" ||
op == "GlobalAveragePool" || op == "GlobalMaxPool") {
VLOG(5) << "Start converting Pool";
const auto input_name = m(node.input(0));
const auto output_name = m(node.output(0));
vector<int> strides, pads, kernel_shape;
vector<int> nnapi_strides, nnapi_pads, kernel_shape;
if (op == "AveragePool" || op == "MaxPool") {
strides = helper.get("strides", vector<int>{1, 1});
pads = helper.get("pads", vector<int>{0, 0, 0, 0});
kernel_shape = helper.get("kernel_shape", vector<int>{0, 0});
const auto count_include_pad =
helper.get("count_include_pad", 0);
const auto onnx_strides = helper.get("strides", vector<int>{1, 1});
const auto onnx_pads = helper.get("pads", vector<int>{0, 0, 0, 0});
nnapi_strides = {onnx_strides[1], onnx_strides[0]};
nnapi_pads = {onnx_pads[1], onnx_pads[3], onnx_pads[0], onnx_pads[2]};
if (count_include_pad == 1) {
throw std::invalid_argument(
"count_include_pad == 1 is not supported");
Expand All @@ -723,14 +734,14 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
throw std::invalid_argument("auto_pad is not supported");
}
} else {
strides = {0, 0};
pads = {0, 0, 0, 0};
nnapi_strides = {0, 0};
nnapi_pads = {0, 0, 0, 0};
kernel_shape = {-1, -1}; // -1 for global
}
CHECK_EQ(pads.size(), 4ul);
CHECK_EQ(nnapi_pads.size(), 4ul);
CHECK_EQ(kernel_shape.size(), 2ul);
CHECK_EQ(strides.size(), 2ul);
AddLayerPool(op, input_name, kernel_shape, pads, strides,
CHECK_EQ(nnapi_strides.size(), 2ul);
AddLayerPool(op, input_name, kernel_shape, nnapi_pads, nnapi_strides,
output_name);
VLOG(5) << "Converting Pool completed";
} else if (op == "Relu") {
Expand Down
33 changes: 28 additions & 5 deletions tools/onnx2daq/OnnxConverterImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,12 @@ using Shape = Shaper::Shape;

namespace dnn {
void OnnxConverter::AddConv(const string &input_name,
// Strides here are in the order: width, height
const std::vector<int> &strides,
// The order of pads here is the same as nnapi:
// left, right, top, bottom
const std::vector<int> &pads,
// Dilations here are in the order: width, height
const std::vector<int> &dilations, int group,
const string &ori_weight_name,
const dnn::optional<std::string> &bias_name,
Expand All @@ -21,18 +25,22 @@ void OnnxConverter::AddConv(const string &input_name,
throw std::invalid_argument(
"Both dilations and strides > 1 is not supported for now");
}
if (!(pads[0] == pads[1] && pads[1] == pads[2] && pads[2] == pads[3])) {
throw std::invalid_argument(
"Both dilations and asymmetric pads is not supported for now");
}
VLOG(5) << "Dilations of conv: " << dilations << ", converting..";
const auto s2b_name = input_name + "_s2b";
const auto im_name = input_name + "_conv_imm";
const auto b2s_name = input_name + "_b2s";
std::vector<int> new_pads = pads;
const auto input_shape = shaper_[input_name];
new_pads[1] = (input_shape[1] + pads[1] + (dilations[0] - 1)) /
new_pads[1] = (input_shape[2] + pads[1] + (dilations[0] - 1)) /
dilations[0] * dilations[0] -
input_shape[1];
new_pads[3] = (input_shape[2] + pads[3] + (dilations[1] - 1)) /
dilations[1] * dilations[1] -
input_shape[2];
new_pads[3] = (input_shape[1] + pads[3] + (dilations[1] - 1)) /
dilations[1] * dilations[1] -
input_shape[1];
VLOG(5) << input_shape << ", " << pads << ", " << dilations << ", "
<< new_pads;
// Why "AllowShortBlocksOnASingleLine: false" doesn't work on it?
Expand All @@ -57,7 +65,7 @@ void OnnxConverter::AddConv(const string &input_name,
const std::vector<int32_t> starts{0, 0, 0, 0};
const std::vector<int32_t> ends{
static_cast<int32_t>(b2s_shape[0]),
static_cast<int32_t>(b2s_shape[1]) - (new_pads[1] - pads[0]),
static_cast<int32_t>(b2s_shape[1]) - (new_pads[1] - pads[1]),
static_cast<int32_t>(b2s_shape[2]) - (new_pads[3] - pads[3]),
static_cast<int32_t>(b2s_shape[3])};
const std::vector<int32_t> strides_in_ss{1, 1, 1, 1};
Expand Down Expand Up @@ -92,7 +100,10 @@ void OnnxConverter::AddConv(const string &input_name,

void OnnxConverter::AddLayerPool(css &op, css &input_name,
const std::vector<int> &kernel_shape,
// The order of pads here is the same as nnapi:
// left, right, top, bottom
const std::vector<int> &pads,
// Strides here are in the order: width, height
const std::vector<int> &strides,
css &output_name) {
if (op == "AveragePool" || op == "GlobalAveragePool") {
Expand All @@ -108,7 +119,10 @@ void OnnxConverter::AddLayerPool(css &op, css &input_name,
void OnnxConverter::AddLayerConvImpl(const std::string &input,
const std::string &weight,
const dnn::optional<std::string> &bias,
// The order of pads here is the same as nnapi:
// left, right, top, bottom
const std::vector<int32_t> &pads,
// Strides here are in the order: width, height
const std::vector<int32_t> &strides,
const std::string &output) {
const auto activation = FindActivation(model_proto_, output);
Expand Down Expand Up @@ -161,6 +175,9 @@ void OnnxConverter::AddLayerConvImpl(const std::string &input,

void OnnxConverter::AddLayerAvePoolImpl(
const std::string &input, const std::vector<int32_t> &kernel_shape,
// The order of pads here is the same as nnapi:
// left, right, top, bottom
// Strides here are in the order: width, height
const std::vector<int32_t> &pads, const std::vector<int32_t> &strides,
const std::string &output) {
const auto activation = FindActivation(model_proto_, output);
Expand Down Expand Up @@ -188,6 +205,9 @@ void OnnxConverter::AddLayerAvePoolImpl(

void OnnxConverter::AddLayerMaxPoolImpl(
const std::string &input, const std::vector<int32_t> &kernel_shape,
// The order of pads here is the same as nnapi:
// left, right, top, bottom
// Strides here are in the order: width, height
const std::vector<int32_t> &pads, const std::vector<int32_t> &strides,
const std::string &output) {
const auto activation = FindActivation(model_proto_, output);
Expand Down Expand Up @@ -370,7 +390,10 @@ void OnnxConverter::AddLayerConcat(const std::vector<std::string> &inputs,

void OnnxConverter::AddLayerDepthwiseConvImpl(
const std::string &input, const std::string &weight,
// The order of pads here is the same as nnapi:
// left, right, top, bottom
const dnn::optional<std::string> &bias, const std::vector<int32_t> &pads,
// Strides here are in the order: width, height
const std::vector<int32_t> &strides, int32_t depth_multiplier,
const std::string &output) {
const auto activation = FindActivation(model_proto_, output);
Expand Down

0 comments on commit 47caec4

Please sign in to comment.