Skip to content

Commit

Permalink
'converter_simple' should not exist, all related logic should be impl…
Browse files Browse the repository at this point in the history
…emented in Convert()
  • Loading branch information
daquexian committed Nov 5, 2019
1 parent f46f008 commit 0895bb4
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 44 deletions.
41 changes: 20 additions & 21 deletions include/tools/onnx2daq/OnnxConverter.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,26 +110,23 @@ class OnnxConverter {
void SetIdentity(const std::string &input_name,
const std::string &output_name);
// OnnxConverter auto generated methods start
void AddLayerCONV_2DImpl(const std::string &input,
const std::string &weight,
const dnn::optional<std::string> &bias,
int32_t padding_left, int32_t padding_right,
int32_t padding_top, int32_t padding_bottom,
int32_t stride_x, int32_t stride_y,
const std::string &output);
void AddLayerAVERAGE_POOL_2DImpl(const std::string &input,
int32_t padding_left,
int32_t padding_right, int32_t padding_top,
int32_t padding_bottom, int32_t stride_x,
int32_t stride_y, int32_t kernel_width,
int32_t kernel_height,
const std::string &output);
void AddLayerMAX_POOL_2DImpl(const std::string &input, int32_t padding_left,
void AddLayerCONV_2D(const std::string &input, const std::string &weight,
const dnn::optional<std::string> &bias,
int32_t padding_left, int32_t padding_right,
int32_t padding_top, int32_t padding_bottom,
int32_t stride_x, int32_t stride_y,
const std::string &output);
void AddLayerAVERAGE_POOL_2D(const std::string &input, int32_t padding_left,
int32_t padding_right, int32_t padding_top,
int32_t padding_bottom, int32_t stride_x,
int32_t stride_y, int32_t kernel_width,
int32_t kernel_height,
const std::string &output);
void AddLayerMAX_POOL_2D(const std::string &input, int32_t padding_left,
int32_t padding_right, int32_t padding_top,
int32_t padding_bottom, int32_t stride_x,
int32_t stride_y, int32_t kernel_width,
int32_t kernel_height, const std::string &output);
void AddLayerRELU(const std::string &input, const std::string &output);
void AddLayerSOFTMAX(const std::string &input, float beta,
const std::string &output);
Expand All @@ -141,12 +138,14 @@ class OnnxConverter {
const std::string &output);
void AddLayerCONCATENATION(const std::vector<std::string> &inputs,
int32_t axis, const std::string &output);
void AddLayerDEPTHWISE_CONV_2DImpl(
const std::string &input, const std::string &weight,
const dnn::optional<std::string> &bias, int32_t padding_left,
int32_t padding_right, int32_t padding_top, int32_t padding_bottom,
int32_t stride_x, int32_t stride_y, int32_t depth_multiplier,
const std::string &output);
void AddLayerDEPTHWISE_CONV_2D(const std::string &input,
const std::string &weight,
const dnn::optional<std::string> &bias,
int32_t padding_left, int32_t padding_right,
int32_t padding_top, int32_t padding_bottom,
int32_t stride_x, int32_t stride_y,
int32_t depth_multiplier,
const std::string &output);
void AddLayerBATCH_TO_SPACE_ND(const std::string &input,
const std::vector<int32_t> &block_sizes,
const std::string &output);
Expand Down
5 changes: 1 addition & 4 deletions ops.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@
needed_by_shaper: true
fused: true
support_quant_asymm: true
converter_simple: false
api: 27
-
input:
Expand Down Expand Up @@ -93,7 +92,6 @@
nnapi: AVERAGE_POOL_2D
shaper: Pool
fused: true
converter_simple: false
# builder_simple: false
support_quant_asymm: true
api: 27
Expand Down Expand Up @@ -143,7 +141,6 @@
nnapi: MAX_POOL_2D
shaper: Pool
fused: true
converter_simple: false
# builder_simple: false
support_quant_asymm: true
api: 27
Expand Down Expand Up @@ -244,7 +241,7 @@
cpp_type: int32_t
fused: true
support_quant_asymm: true
converter_simple: false
# converter_simple: false
api: 27
-
input:
Expand Down
28 changes: 17 additions & 11 deletions tools/onnx2daq/OnnxConverter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -717,13 +717,13 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
const auto &onnx_weight = onnx_tensors_.at(ori_weight_name);
if (group == 1) {
VLOG(5) << "Vanilla conv";
AddLayerCONV_2DImpl(input_name, ori_weight_name, bias_name,
onnx_pads[1], onnx_pads[3], onnx_pads[0],
onnx_pads[2], onnx_strides[1],
onnx_strides[0], output_name);
AddLayerCONV_2D(input_name, ori_weight_name, bias_name,
onnx_pads[1], onnx_pads[3], onnx_pads[0],
onnx_pads[2], onnx_strides[1], onnx_strides[0],
output_name);
} else if (onnx_weight.shape[1] == 1) { // depthwise
VLOG(5) << "Depthwise conv";
AddLayerDEPTHWISE_CONV_2DImpl(
AddLayerDEPTHWISE_CONV_2D(
input_name, ori_weight_name, bias_name, onnx_pads[1],
onnx_pads[3], onnx_pads[0], onnx_pads[2], onnx_strides[1],
onnx_strides[0], onnx_weight.shape[0] / group, output_name);
Expand All @@ -737,8 +737,8 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
VLOG(5) << "Start converting Pool";
const auto input_name = m(node.input(0));
const auto output_name = m(node.output(0));
vector<int> nnapi_strides, nnapi_pads, kernel_shape;
if (op == "AveragePool" || op == "MaxPool") {
vector<int> nnapi_strides, nnapi_pads, kernel_shape;
kernel_shape = helper.get("kernel_shape", vector<int>{0, 0});
const auto count_include_pad =
helper.get("count_include_pad", 0);
Expand All @@ -765,20 +765,26 @@ void OnnxConverter::Convert(const ONNX_NAMESPACE::ModelProto &model_proto,
CHECK_EQ(kernel_shape.size(), 2ul);
CHECK_EQ(nnapi_strides.size(), 2ul);
if (op == "AveragePool") {
AddLayerAVERAGE_POOL_2DImpl(
AddLayerAVERAGE_POOL_2D(
input_name, onnx_pads[1], onnx_pads[3], onnx_pads[0],
onnx_pads[2], onnx_strides[1], onnx_strides[0],
kernel_shape[1], kernel_shape[0], output_name);
} else {
AddLayerMAX_POOL_2DImpl(
AddLayerMAX_POOL_2D(
input_name, onnx_pads[1], onnx_pads[3], onnx_pads[0],
onnx_pads[2], onnx_strides[1], onnx_strides[0],
kernel_shape[1], kernel_shape[0], output_name);
}
} else {
// -1 means global
AddLayerAVERAGE_POOL_2DImpl(input_name, 0, 0, 0, 0, 1, 1, -1,
-1, output_name);
if (op == "GlobalAveragePool") {
AddLayerAVERAGE_POOL_2D(
input_name, 0, 0, 0, 0, 1, 1, shaper_[input_name][1],
shaper_[input_name][0], output_name);
} else {
AddLayerMAX_POOL_2D(input_name, 0, 0, 0, 0, 1, 1,
shaper_[input_name][1],
shaper_[input_name][0], output_name);
}
}
VLOG(5) << "Converting Pool completed";
} else if (op == "Relu") {
Expand Down
18 changes: 10 additions & 8 deletions tools/onnx2daq/OnnxConverterImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,11 +106,13 @@ namespace dnn {
// }

// OnnxConverter auto generated methods start
void OnnxConverter::AddLayerCONV_2DImpl(
const std::string &input, const std::string &weight,
const dnn::optional<std::string> &bias, int32_t padding_left,
int32_t padding_right, int32_t padding_top, int32_t padding_bottom,
int32_t stride_x, int32_t stride_y, const std::string &output) {
void OnnxConverter::AddLayerCONV_2D(const std::string &input,
const std::string &weight,
const dnn::optional<std::string> &bias,
int32_t padding_left, int32_t padding_right,
int32_t padding_top, int32_t padding_bottom,
int32_t stride_x, int32_t stride_y,
const std::string &output) {
const auto activation = FindActivation(model_proto_, output);

{
Expand Down Expand Up @@ -161,7 +163,7 @@ void OnnxConverter::AddLayerCONV_2DImpl(
layers_.push_back(layer);
}

void OnnxConverter::AddLayerAVERAGE_POOL_2DImpl(
void OnnxConverter::AddLayerAVERAGE_POOL_2D(
const std::string &input, int32_t padding_left, int32_t padding_right,
int32_t padding_top, int32_t padding_bottom, int32_t stride_x,
int32_t stride_y, int32_t kernel_width, int32_t kernel_height,
Expand Down Expand Up @@ -192,7 +194,7 @@ void OnnxConverter::AddLayerAVERAGE_POOL_2DImpl(
layers_.push_back(layer);
}

void OnnxConverter::AddLayerMAX_POOL_2DImpl(
void OnnxConverter::AddLayerMAX_POOL_2D(
const std::string &input, int32_t padding_left, int32_t padding_right,
int32_t padding_top, int32_t padding_bottom, int32_t stride_x,
int32_t stride_y, int32_t kernel_width, int32_t kernel_height,
Expand Down Expand Up @@ -378,7 +380,7 @@ void OnnxConverter::AddLayerCONCATENATION(
layers_.push_back(layer);
}

void OnnxConverter::AddLayerDEPTHWISE_CONV_2DImpl(
void OnnxConverter::AddLayerDEPTHWISE_CONV_2D(
const std::string &input, const std::string &weight,
const dnn::optional<std::string> &bias, int32_t padding_left,
int32_t padding_right, int32_t padding_top, int32_t padding_bottom,
Expand Down

0 comments on commit 0895bb4

Please sign in to comment.