Skip to content

Commit

Permalink
Avoid using in header files (#2257)
Browse files Browse the repository at this point in the history
* Avoid `using` in header files

* Fix clang_format

* use clang-format-7 to reformat code
  • Loading branch information
ShawnZhong authored May 26, 2020
1 parent 3d65fc6 commit e89c4c0
Show file tree
Hide file tree
Showing 6 changed files with 90 additions and 114 deletions.
40 changes: 17 additions & 23 deletions torchvision/csrc/DeformConv.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,21 +88,15 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> DeformConv2d_backward
offset_groups);
}

using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;

class DeformConv2dFunction
: public torch::autograd::Function<DeformConv2dFunction> {
public:
static variable_list forward(
AutogradContext* ctx,
Variable input,
Variable weight,
Variable offset,
Variable bias,
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
torch::autograd::Variable input,
torch::autograd::Variable weight,
torch::autograd::Variable offset,
torch::autograd::Variable bias,
int64_t stride_h,
int64_t stride_w,
int64_t pad_h,
Expand Down Expand Up @@ -137,9 +131,9 @@ class DeformConv2dFunction
};
}

static variable_list backward(
AutogradContext* ctx,
variable_list grad_output) {
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output) {
auto saved = ctx->get_saved_variables();
auto input = saved[0];
auto weight = saved[1];
Expand Down Expand Up @@ -176,14 +170,14 @@ class DeformConv2dFunction
grad_weight,
grad_offset,
grad_bias,
Variable(),
Variable(),
Variable(),
Variable(),
Variable(),
Variable(),
Variable(),
Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
};
}
};
Expand Down
36 changes: 17 additions & 19 deletions torchvision/csrc/PSROIAlign.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,19 +79,13 @@ at::Tensor PSROIAlign_backward(
width);
}

using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;

class PSROIAlignFunction
: public torch::autograd::Function<PSROIAlignFunction> {
public:
static variable_list forward(
AutogradContext* ctx,
Variable input,
Variable rois,
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
torch::autograd::Variable input,
torch::autograd::Variable rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width,
Expand All @@ -115,9 +109,9 @@ class PSROIAlignFunction
return {output, channel_mapping};
}

static variable_list backward(
AutogradContext* ctx,
variable_list grad_output) {
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output) {
// Use data saved in forward
auto saved = ctx->get_saved_variables();
auto rois = saved[0];
Expand All @@ -135,19 +129,23 @@ class PSROIAlignFunction
input_shape[1],
input_shape[2],
input_shape[3]);
return {
grad_in, Variable(), Variable(), Variable(), Variable(), Variable()};
return {grad_in,
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable()};
}
};

std::tuple<Tensor, Tensor> ps_roi_align(
const Tensor& input,
const Tensor& rois,
std::tuple<at::Tensor, at::Tensor> ps_roi_align(
const at::Tensor& input,
const at::Tensor& rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width,
const int64_t sampling_ratio) {
auto result = PSROIAlignFunction::apply(
input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio);
return std::tuple<Tensor, Tensor>(result[0], result[1]);
return std::tuple<at::Tensor, at::Tensor>(result[0], result[1]);
}
34 changes: 16 additions & 18 deletions torchvision/csrc/PSROIPool.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,18 +68,12 @@ at::Tensor PSROIPool_backward(
width);
}

using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;

class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> {
public:
static variable_list forward(
AutogradContext* ctx,
Variable input,
Variable rois,
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
torch::autograd::Variable input,
torch::autograd::Variable rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width) {
Expand All @@ -96,9 +90,9 @@ class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> {
return {output, channel_mapping};
}

static variable_list backward(
AutogradContext* ctx,
variable_list grad_output) {
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output) {
// Use data saved in forward
auto saved = ctx->get_saved_variables();
auto rois = saved[0];
Expand All @@ -115,17 +109,21 @@ class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> {
input_shape[1],
input_shape[2],
input_shape[3]);
return {grad_in, Variable(), Variable(), Variable(), Variable()};
return {grad_in,
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable()};
}
};

std::tuple<Tensor, Tensor> ps_roi_pool(
const Tensor& input,
const Tensor& rois,
std::tuple<at::Tensor, at::Tensor> ps_roi_pool(
const at::Tensor& input,
const at::Tensor& rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width) {
auto result = PSROIPoolFunction::apply(
input, rois, spatial_scale, pooled_height, pooled_width);
return std::tuple<Tensor, Tensor>(result[0], result[1]);
return std::tuple<at::Tensor, at::Tensor>(result[0], result[1]);
}
38 changes: 16 additions & 22 deletions torchvision/csrc/ROIAlign.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,18 +89,12 @@ at::Tensor ROIAlign_backward(
aligned);
}

using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;

class ROIAlignFunction : public torch::autograd::Function<ROIAlignFunction> {
public:
static variable_list forward(
AutogradContext* ctx,
Variable input,
Variable rois,
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
torch::autograd::Variable input,
torch::autograd::Variable rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width,
Expand All @@ -124,9 +118,9 @@ class ROIAlignFunction : public torch::autograd::Function<ROIAlignFunction> {
return {result};
}

static variable_list backward(
AutogradContext* ctx,
variable_list grad_output) {
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output) {
// Use data saved in forward
auto saved = ctx->get_saved_variables();
auto rois = saved[0];
Expand All @@ -144,18 +138,18 @@ class ROIAlignFunction : public torch::autograd::Function<ROIAlignFunction> {
ctx->saved_data["sampling_ratio"].toInt(),
ctx->saved_data["aligned"].toBool());
return {grad_in,
Variable(),
Variable(),
Variable(),
Variable(),
Variable(),
Variable()};
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable()};
}
};

Tensor roi_align(
const Tensor& input,
const Tensor& rois,
at::Tensor roi_align(
const at::Tensor& input,
const at::Tensor& rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width,
Expand Down
34 changes: 16 additions & 18 deletions torchvision/csrc/ROIPool.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,18 +68,12 @@ at::Tensor ROIPool_backward(
width);
}

using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;

class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> {
public:
static variable_list forward(
AutogradContext* ctx,
Variable input,
Variable rois,
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
torch::autograd::Variable input,
torch::autograd::Variable rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width) {
Expand All @@ -96,9 +90,9 @@ class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> {
return {output, argmax};
}

static variable_list backward(
AutogradContext* ctx,
variable_list grad_output) {
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output) {
// Use data saved in forward
auto saved = ctx->get_saved_variables();
auto rois = saved[0];
Expand All @@ -115,17 +109,21 @@ class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> {
input_shape[1],
input_shape[2],
input_shape[3]);
return {grad_in, Variable(), Variable(), Variable(), Variable()};
return {grad_in,
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable(),
torch::autograd::Variable()};
}
};

std::tuple<Tensor, Tensor> roi_pool(
const Tensor& input,
const Tensor& rois,
std::tuple<at::Tensor, at::Tensor> roi_pool(
const at::Tensor& input,
const at::Tensor& rois,
const double spatial_scale,
const int64_t pooled_height,
const int64_t pooled_width) {
auto result = ROIPoolFunction::apply(
input, rois, spatial_scale, pooled_height, pooled_width);
return std::tuple<Tensor, Tensor>(result[0], result[1]);
return std::tuple<at::Tensor, at::Tensor>(result[0], result[1]);
}
22 changes: 8 additions & 14 deletions torchvision/csrc/empty_tensor_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,33 +5,27 @@
// Python bindings for the C++ frontend (includes Python.h).
#include <torch/python.h>

using namespace at;
using torch::Tensor;
using torch::autograd::AutogradContext;
using torch::autograd::Variable;
using torch::autograd::variable_list;

class NewEmptyTensorOp : public torch::autograd::Function<NewEmptyTensorOp> {
public:
static variable_list forward(
AutogradContext* ctx,
Variable input,
static torch::autograd::variable_list forward(
torch::autograd::AutogradContext* ctx,
torch::autograd::Variable input,
c10::List<int64_t> new_shape) {
ctx->saved_data["shape"] = input.sizes();
std::vector<int64_t> shape(new_shape.begin(), new_shape.end());
return {input.new_empty(shape, TensorOptions())};
return {input.new_empty(shape, at::TensorOptions())};
}

static variable_list backward(
AutogradContext* ctx,
variable_list grad_output) {
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output) {
// Use data saved in forward
auto shape = ctx->saved_data["shape"].toIntList();
auto out = forward(ctx, grad_output[0], shape);
return {out[0], at::Tensor()};
}
};

Tensor new_empty_tensor(const Tensor& input, c10::List<int64_t> shape) {
at::Tensor new_empty_tensor(const at::Tensor& input, c10::List<int64_t> shape) {
return NewEmptyTensorOp::apply(input, shape)[0];
}

0 comments on commit e89c4c0

Please sign in to comment.