Skip to content

Commit

Permalink
Fix nnapi Bad data error in api < 29 conv_2d
Browse files Browse the repository at this point in the history
  • Loading branch information
daquexian committed Jan 27, 2020
1 parent f64b511 commit 2980add
Show file tree
Hide file tree
Showing 5 changed files with 71 additions and 30 deletions.
6 changes: 6 additions & 0 deletions dnnlibrary/ModelBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ OperandType ModelBuilder::GetOperandType(const QuantInfo &quant_info,
return map_type##_operand_map_[value]; \
}

DEFINE_OPERAND_FROM_SCALAR(bool, bool, BOOL);
DEFINE_OPERAND_FROM_SCALAR(uint32_t, uint32, UINT32);
DEFINE_OPERAND_FROM_SCALAR(int32_t, int32, INT32);
DEFINE_OPERAND_FROM_SCALAR(float, float32, FLOAT32);
Expand Down Expand Up @@ -381,4 +382,9 @@ dnn::optional<std::vector<Device>> ModelBuilder::GetDevices() {
return dnn::nullopt;
}
}


int32_t ModelBuilder::android_api_level() const {
return nnapi_->android_sdk_version;
}
} // namespace dnn
79 changes: 52 additions & 27 deletions dnnlibrary/ModelBuilderImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,25 @@ expected<Unit, std::string> ModelBuilder::AddLayer_CONV_2D_Impl(
bias_idx_val = operand_indexes_.at(bias.value());
}
input_indexes.push_back(bias_idx_val);
AddScalarOperands(input_indexes, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, fuse_code, dilation_x,
dilation_y);
AddScalarOperands(input_indexes, padding_left);
AddScalarOperands(input_indexes, padding_right);
AddScalarOperands(input_indexes, padding_top);
AddScalarOperands(input_indexes, padding_bottom);
AddScalarOperands(input_indexes, stride_x);
AddScalarOperands(input_indexes, stride_y);
AddScalarOperands(input_indexes, fuse_code);

if (android_api_level() > 29) {
AddScalarOperands(input_indexes, nchw);
}

if (android_api_level() > 29) {
AddScalarOperands(input_indexes, dilation_x);
}

if (android_api_level() > 29) {
AddScalarOperands(input_indexes, dilation_y);
}
shaper_.Conv(input, weight, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, nchw, dilation_x,
dilation_y, output);
Expand All @@ -95,9 +111,15 @@ expected<Unit, std::string> ModelBuilder::AddLayer_AVERAGE_POOL_2D(
imm_blob_inputs_.insert(input);
const auto input_idx = operand_indexes_.at(input);
input_indexes.push_back(input_idx);
AddScalarOperands(input_indexes, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, fuse_code);
AddScalarOperands(input_indexes, padding_left);
AddScalarOperands(input_indexes, padding_right);
AddScalarOperands(input_indexes, padding_top);
AddScalarOperands(input_indexes, padding_bottom);
AddScalarOperands(input_indexes, stride_x);
AddScalarOperands(input_indexes, stride_y);
AddScalarOperands(input_indexes, kernel_width);
AddScalarOperands(input_indexes, kernel_height);
AddScalarOperands(input_indexes, fuse_code);
shaper_.Pool(input, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, output);
Expand All @@ -123,9 +145,15 @@ expected<Unit, std::string> ModelBuilder::AddLayer_MAX_POOL_2D(
imm_blob_inputs_.insert(input);
const auto input_idx = operand_indexes_.at(input);
input_indexes.push_back(input_idx);
AddScalarOperands(input_indexes, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, fuse_code);
AddScalarOperands(input_indexes, padding_left);
AddScalarOperands(input_indexes, padding_right);
AddScalarOperands(input_indexes, padding_top);
AddScalarOperands(input_indexes, padding_bottom);
AddScalarOperands(input_indexes, stride_x);
AddScalarOperands(input_indexes, stride_y);
AddScalarOperands(input_indexes, kernel_width);
AddScalarOperands(input_indexes, kernel_height);
AddScalarOperands(input_indexes, fuse_code);
shaper_.Pool(input, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, kernel_width,
kernel_height, output);
Expand Down Expand Up @@ -315,9 +343,14 @@ expected<Unit, std::string> ModelBuilder::AddLayer_DEPTHWISE_CONV_2D(
bias_idx_val = operand_indexes_.at(bias.value());
}
input_indexes.push_back(bias_idx_val);
AddScalarOperands(input_indexes, padding_left, padding_right, padding_top,
padding_bottom, stride_x, stride_y, depth_multiplier,
fuse_code);
AddScalarOperands(input_indexes, padding_left);
AddScalarOperands(input_indexes, padding_right);
AddScalarOperands(input_indexes, padding_top);
AddScalarOperands(input_indexes, padding_bottom);
AddScalarOperands(input_indexes, stride_x);
AddScalarOperands(input_indexes, stride_y);
AddScalarOperands(input_indexes, depth_multiplier);
AddScalarOperands(input_indexes, fuse_code);
shaper_.DepthwiseConv(input, weight, padding_left, padding_right,
padding_top, padding_bottom, stride_x, stride_y,
output);
Expand Down Expand Up @@ -406,7 +439,9 @@ expected<Unit, std::string> ModelBuilder::AddLayer_STRIDED_SLICE(
"input_strides_of_" + output, &strides[0],
{Type::TENSOR_INT32, Shape{static_cast<uint32_t>(strides.size())}});
input_indexes.push_back(strides_idx);
AddScalarOperands(input_indexes, begin_mask, end_mask, shrink_axis_mask);
AddScalarOperands(input_indexes, begin_mask);
AddScalarOperands(input_indexes, end_mask);
AddScalarOperands(input_indexes, shrink_axis_mask);
shaper_.StridedSlice(input, starts, ends, strides, begin_mask, end_mask,
shrink_axis_mask, output);
const OperandType operand_type =
Expand Down Expand Up @@ -472,7 +507,10 @@ expected<Unit, std::string> ModelBuilder::AddLayer_LOCAL_RESPONSE_NORMALIZATION(
imm_blob_inputs_.insert(input);
const auto input_idx = operand_indexes_.at(input);
input_indexes.push_back(input_idx);
AddScalarOperands(input_indexes, radius, bias, alpha, beta);
AddScalarOperands(input_indexes, radius);
AddScalarOperands(input_indexes, bias);
AddScalarOperands(input_indexes, alpha);
AddScalarOperands(input_indexes, beta);
shaper_.Identity(input, output);
const OperandType operand_type =
GetOperandType(operand_types_.at(input).type, shaper_[output]);
Expand Down Expand Up @@ -880,16 +918,3 @@ ModelBuilder::IndexSeq ModelBuilder::AddOperation(
}

} // namespace dnn













10 changes: 8 additions & 2 deletions generate_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -400,8 +400,14 @@ def generate_model_builder():
for x in tensor_input:
cogoutl(add_tensor_operand(x))
# cogoutl('IndexSeq input_indexes{{{}}};'.format(', '.join([x['name'] + "_idx" for x in tensor_input])))
if len(scalar_input) > 0:
cogoutl('AddScalarOperands(input_indexes, {});'.format(', '.join([x['name'] for x in scalar_input])))
for x in scalar_input:
if 'api' in x:
cogoutl(f"""
if (android_api_level() > {x['api']}) {{
AddScalarOperands(input_indexes, {x['name']});
}}""")
else:
cogoutl(f"AddScalarOperands(input_indexes, {x['name']});")
cogoutl('shaper_.{}({});'.format(op['shaper'],
', '.join([x['name'] for x in ipt_opt if x['needed_by_shaper']])))
if op['output_tensor_type'] != 'auto':
Expand Down
4 changes: 4 additions & 0 deletions include/dnnlibrary/ModelBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ class ModelBuilder {
Shaper shaper_;
IndexSeq input_index_vec_;
IndexSeq output_index_vec_;
std::map<bool, Index> bool_operand_map_;
std::map<uint32_t, Index> uint32_operand_map_;
std::map<int32_t, Index> int32_operand_map_;
std::map<float, Index> float32_operand_map_;
Expand All @@ -64,6 +65,7 @@ class ModelBuilder {
IndexSeq AddOperation(int op, IndexSeq input_indexes,
OperandTypes... output_types);

Index OperandFromScalar(bool value);
Index OperandFromScalar(int32_t value);
Index OperandFromScalar(float value);
Index OperandFromScalar(uint32_t value);
Expand All @@ -88,6 +90,8 @@ class ModelBuilder {
android::nn::wrapper::OperandType GetOperandType(
const QuantInfo &quant_info, const Shape &dims);

int32_t android_api_level() const;

const NnApi *nnapi_ = nullptr;

public:
Expand Down
2 changes: 1 addition & 1 deletion ops.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
-
predefined: fuse_code
- name: nchw
nnapi_type: bool
nnapi_type: scalar
cpp_type: bool
needed_by_shaper: true
api: 29
Expand Down

0 comments on commit 2980add

Please sign in to comment.