diff --git a/src/operator/contrib/adaptive_avg_pooling-inl.h b/src/operator/contrib/adaptive_avg_pooling-inl.h index 7331c7bd47a1..12284d9d85d2 100644 --- a/src/operator/contrib/adaptive_avg_pooling-inl.h +++ b/src/operator/contrib/adaptive_avg_pooling-inl.h @@ -144,41 +144,6 @@ static bool AdaptiveAvgPoolOpInferShape(const nnvm::NodeAttrs& attrs, return true; } -static bool AdaptiveAvgPoolOpInferType(const nnvm::NodeAttrs& attrs, - std::vector *in_type, - std::vector *out_type) { - using namespace mshadow; - CHECK_EQ(in_type->size(), 1U); - int dtype = (*in_type)[0]; - CHECK_NE(dtype, -1) << "First input must have specified type"; - // For float16 input type beta, gamma, mean, and average are stored in float32. - // For other input types, these parameters have the same type as input - // NOTE: This requirement is from cuDNN (v. 4 and 5) - int dtype_param = 0; - MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DTypeX, AccRealX, { - dtype_param = mshadow::DataType::kFlag; }); - out_type->clear(); - out_type->push_back(dtype_param); - return true; -} - -static inline bool AdaptiveAvgPoolOpStorageType(const nnvm::NodeAttrs &attrs, - const int dev_mask, - DispatchMode *dispatch_mode, - std::vector *in_attrs, - std::vector *out_attrs) { - CHECK_EQ(in_attrs->size(), 1); - CHECK_EQ(out_attrs->size(), 1); - *dispatch_mode = DispatchMode::kFCompute; - for (int& v : *in_attrs) { - if (v == - 1) v = kDefaultStorage; - } - for (size_t i = 0; i < out_attrs->size(); i++) { - (*out_attrs)[i] = kDefaultStorage; - } - return true; -} - using namespace mshadow; template MSHADOW_XINLINE int get_stride(Tensor tensor, int idx) { diff --git a/src/operator/contrib/adaptive_avg_pooling.cc b/src/operator/contrib/adaptive_avg_pooling.cc index 079571177cbf..00ab36605bf4 100644 --- a/src/operator/contrib/adaptive_avg_pooling.cc +++ b/src/operator/contrib/adaptive_avg_pooling.cc @@ -216,8 +216,6 @@ The pooling kernel and stride sizes are automatically chosen for desired output .set_num_inputs(1) .set_num_outputs(1) .set_attr("FInferShape", AdaptiveAvgPoolOpInferShape) -.set_attr("FInferType", AdaptiveAvgPoolOpInferType) -.set_attr("FInferStorageType", AdaptiveAvgPoolOpStorageType) .set_attr("FCompute", AdaptiveAvgPoolOpForward) .set_attr("FGradient", ElemwiseGradUseNone{"_backward_contrib_AdaptiveAvgPooling2D"}) @@ -229,7 +227,6 @@ NNVM_REGISTER_OP(_backward_contrib_AdaptiveAvgPooling2D) .set_num_inputs(1) .set_num_outputs(1) .set_attr("TIsBackward", true) -.set_attr("FInferStorageType", AdaptiveAvgPoolOpStorageType) .set_attr("FCompute", AdaptiveAvgPoolOpBackward); diff --git a/src/operator/contrib/bilinear_resize-inl.h b/src/operator/contrib/bilinear_resize-inl.h index c096f0149751..ff3f794d167d 100644 --- a/src/operator/contrib/bilinear_resize-inl.h +++ b/src/operator/contrib/bilinear_resize-inl.h @@ -136,42 +136,6 @@ static bool BilinearSampleOpInferShape(const nnvm::NodeAttrs& attrs, return true; } -static bool BilinearSampleOpInferType(const nnvm::NodeAttrs& attrs, - std::vector *in_type, - std::vector *out_type) { - using namespace mshadow; - CHECK_EQ(in_type->size(), 1U); - int dtype = (*in_type)[0]; - CHECK_NE(dtype, -1) << "First input must have specified type"; - // For float16 input type beta, gamma, mean, and average are stored in float32. - // For other input types, these parameters have the same type as input - // NOTE: This requirement is from cuDNN (v. 4 and 5) - int dtype_param = 0; - MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DTypeX, AccRealX, { - dtype_param = mshadow::DataType::kFlag; }); - out_type->clear(); - out_type->push_back(dtype_param); - return true; -} - -static inline bool BilinearSampleOpStorageType(const nnvm::NodeAttrs &attrs, - const int dev_mask, - DispatchMode *dispatch_mode, - std::vector *in_attrs, - std::vector *out_attrs) { - CHECK_EQ(in_attrs->size(), 1); - CHECK_EQ(out_attrs->size(), 1); - *dispatch_mode = DispatchMode::kFCompute; - for (int& v : *in_attrs) { - if (v == - 1) v = kDefaultStorage; - } - for (size_t i = 0; i < out_attrs->size(); i++) { - (*out_attrs)[i] = kDefaultStorage; - } - return true; -} - - } // namespace op } // namespace mxnet diff --git a/src/operator/contrib/bilinear_resize.cc b/src/operator/contrib/bilinear_resize.cc index e1248ce97bbf..074f74aefcc9 100644 --- a/src/operator/contrib/bilinear_resize.cc +++ b/src/operator/contrib/bilinear_resize.cc @@ -177,8 +177,6 @@ for more details. .set_num_inputs(1) .set_num_outputs(1) .set_attr("FInferShape", BilinearSampleOpInferShape) -.set_attr("FInferType", BilinearSampleOpInferType) -.set_attr("FInferStorageType", BilinearSampleOpStorageType) .set_attr("FCompute", BilinearSampleOpForward) .set_attr("FGradient", ElemwiseGradUseNone{"_backward_contrib_BilinearResize2D"}) @@ -190,7 +188,6 @@ NNVM_REGISTER_OP(_backward_contrib_BilinearResize2D) .set_num_inputs(1) .set_num_outputs(1) .set_attr("TIsBackward", true) -.set_attr("FInferStorageType", BilinearSampleOpStorageType) .set_attr("FCompute", BilinearSampleOpBackward);