Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Fix setting cudnn bias stride and change order of bgrad and wgrad to boost performance #18905

Merged
merged 1 commit into from
Aug 18, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 14 additions & 13 deletions src/operator/nn/cudnn/cudnn_convolution-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -212,16 +212,6 @@ class CuDNNConvolutionOp {
typename DataType<DType>::ScaleType alpha = 1.0f;
typename DataType<DType>::ScaleType beta = 0.0f;
typename DataType<DType>::ScaleType beta_add = 1.0f;
if (!param_.no_bias && (req[conv::kBias] != kNullOp)) {
Tensor<gpu, 1, DType> gbias = in_grad[conv::kBias].get<gpu, 1, DType>(s);
CUDNN_CALL(cudnnConvolutionBackwardBias(s->dnn_handle_,
&alpha,
out_desc_,
grad_ptr,
req[conv::kBias] == kAddTo ? &beta_add : &beta,
bias_desc_,
gbias.dptr_));
}
if (req[conv::kWeight] != kNullOp) {
CHECK_EQ(add_to_weight_, req[conv::kWeight] == kAddTo);
CUDNN_CALL(cudnnConvolutionBackwardFilter(s->dnn_handle_,
Expand All @@ -238,6 +228,16 @@ class CuDNNConvolutionOp {
filter_desc_,
gwmat_ptr));
}
if (!param_.no_bias && (req[conv::kBias] != kNullOp)) {
Tensor<gpu, 1, DType> gbias = in_grad[conv::kBias].get<gpu, 1, DType>(s);
CUDNN_CALL(cudnnConvolutionBackwardBias(s->dnn_handle_,
&alpha,
out_desc_,
grad_ptr,
req[conv::kBias] == kAddTo ? &beta_add : &beta,
bias_desc_,
gbias.dptr_));
}
if (req[conv::kData] != kNullOp) {
CUDNN_CALL(cudnnConvolutionBackwardData(s_dgrad.GetStream()->dnn_handle_,
&alpha,
Expand Down Expand Up @@ -459,13 +459,14 @@ class CuDNNConvolutionOp {

if (!param_.no_bias) {
mxnet::TShape bias = in_shape[conv::kBias];
int bias_dim = static_cast<int>(bias[0]);
std::vector<int> bias_shape = {1,
static_cast<int>(bias[0]),
bias_dim,
1, 1};
std::vector<int> bias_stride = {static_cast<int>(bias[0]), 1, 1, 1};
std::vector<int> bias_stride = {bias_dim, 1, bias_dim, bias_dim};
if (param_.kernel.ndim() == 3) {
bias_shape.push_back(1);
bias_stride.push_back(1);
bias_stride.push_back(bias_dim);
}
CUDNN_CALL(cudnnSetTensorNdDescriptor(bias_desc_,
dtype_,
Expand Down
27 changes: 14 additions & 13 deletions src/operator/nn/cudnn/cudnn_deconvolution-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -201,16 +201,6 @@ class CuDNNDeconvolutionOp {
req[deconv::kData] == kAddTo ? 1.0f : 0.0f;
typename DataType<DType>::ScaleType weight_beta =
req[deconv::kWeight] == kAddTo ? 1.0f : 0.0f;
if (!param_.no_bias && (req[deconv::kBias] != kNullOp)) {
Tensor<gpu, 1, DType> gbias = in_grad[deconv::kBias].get<gpu, 1, DType>(s);
CUDNN_CALL(cudnnConvolutionBackwardBias(s->dnn_handle_,
&alpha,
out_desc_,
grad_ptr + out_offset_ * g,
&bias_beta,
bias_desc_,
gbias.dptr_ + bias_offset_ * g));
}
if (req[deconv::kWeight] != kNullOp) {
CHECK_EQ(add_to_weight_, req[deconv::kWeight] == kAddTo);
CUDNN_CALL(cudnnConvolutionBackwardFilter(
Expand All @@ -228,6 +218,16 @@ class CuDNNDeconvolutionOp {
filter_desc_,
gwmat_ptr + weight_offset_ * g));
}
if (!param_.no_bias && (req[deconv::kBias] != kNullOp)) {
Tensor<gpu, 1, DType> gbias = in_grad[deconv::kBias].get<gpu, 1, DType>(s);
CUDNN_CALL(cudnnConvolutionBackwardBias(s->dnn_handle_,
&alpha,
out_desc_,
grad_ptr + out_offset_ * g,
&bias_beta,
bias_desc_,
gbias.dptr_ + bias_offset_ * g));
}
if (req[deconv::kData] != kNullOp) {
CUDNN_CALL(cudnnConvolutionForward(s->dnn_handle_,
&alpha,
Expand Down Expand Up @@ -460,13 +460,14 @@ class CuDNNDeconvolutionOp {
if (!param_.no_bias) {
mxnet::TShape bias = in_shape[deconv::kBias];
bias_offset_ = bias[0] / param_.num_group;
int bias_dim = static_cast<int>(bias_offset_);
std::vector<int> bias_shape = {1,
static_cast<int>(bias[0] / param_.num_group),
bias_dim,
1, 1};
std::vector<int> bias_stride = {static_cast<int>(bias_offset_), 1, 1, 1};
std::vector<int> bias_stride = {bias_dim, 1, bias_dim, bias_dim};
if (param_.kernel.ndim() == 3) {
bias_shape.push_back(1);
bias_stride.push_back(1);
bias_stride.push_back(bias_dim);
}
CUDNN_CALL(cudnnSetTensorNdDescriptor(bias_desc_,
dtype_,
Expand Down