Skip to content

Commit

Permalink
[Change] rename func without index
Browse files Browse the repository at this point in the history
  • Loading branch information
megemini committed Jan 11, 2024
1 parent 6cc617d commit e24739a
Show file tree
Hide file tree
Showing 18 changed files with 264 additions and 306 deletions.
12 changes: 0 additions & 12 deletions paddle/phi/api/yaml/op_compat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1161,18 +1161,6 @@
outputs :
out : Y

- op : fractional_max_pool2d
inputs :
{x : X}
outputs :
{out : Out, mask : Mask}

- op : fractional_max_pool3d
inputs :
{x : X}
outputs :
{out : Out, mask : Mask}

- op : frame
backward : frame_grad
inputs :
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/api/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1025,7 +1025,7 @@
args : (Tensor x, int[] output_size, int[] kernel_size = {0, 0}, float random_u = 0.0, bool return_mask = true)
output : Tensor(out), Tensor(mask)
infer_meta :
func : FractionalMaxPoolWithIndexInferMeta
func : FractionalMaxPoolInferMeta
kernel :
func : fractional_max_pool2d
backward : fractional_max_pool2d_grad
Expand All @@ -1034,7 +1034,7 @@
args : (Tensor x, int[] output_size, int[] kernel_size = {0, 0, 0}, float random_u = 0.0, bool return_mask = true)
output : Tensor(out), Tensor(mask)
infer_meta :
func : FractionalMaxPoolWithIndexInferMeta
func : FractionalMaxPoolInferMeta
kernel :
func : fractional_max_pool3d
backward : fractional_max_pool3d_grad
Expand Down
76 changes: 38 additions & 38 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1719,6 +1719,44 @@ void FoldInferMeta(const MetaTensor& x,
}
}

void FractionalMaxPoolInferMeta(const MetaTensor& x,
const std::vector<int>& output_size,
const std::vector<int>& kernel_size,
float random_u,
bool return_mask,
MetaTensor* out,
MetaTensor* mask,
MetaConfig config) {
std::vector<int> output_size_ = output_size;

auto x_dims = x.dims();

PADDLE_ENFORCE_EQ(
(x_dims.size() == 4 || x_dims.size() == 5),
true,
errors::InvalidArgument("Pooling intput should be 4-D or "
"5-D tensor but received %dD-Tensor",
x_dims.size()));

PADDLE_ENFORCE_EQ(
x_dims.size() - output_size_.size(),
2U,
errors::InvalidArgument(
"The input size %d minus the output size %d should equal to 2.",
x_dims.size(),
output_size_.size()));

std::vector<int64_t> output_shape({x_dims[0], x_dims[1]});
output_shape.insert(
output_shape.end(), output_size_.begin(), output_size_.end());

out->set_dims(common::make_ddim(output_shape));
out->set_dtype(x.dtype());

mask->set_dims(common::make_ddim(output_shape));
mask->set_dtype(phi::CppTypeToDataType<int>::Type());
}

void FrameInferMeta(const MetaTensor& x,
int frame_length,
int hop_length,
Expand Down Expand Up @@ -2349,44 +2387,6 @@ void MaxPoolWithIndexInferMeta(const MetaTensor& x,
mask->set_dtype(phi::CppTypeToDataType<int>::Type());
}

void FractionalMaxPoolWithIndexInferMeta(const MetaTensor& x,
const std::vector<int>& output_size,
const std::vector<int>& kernel_size,
float random_u,
bool return_mask,
MetaTensor* out,
MetaTensor* mask,
MetaConfig config) {
std::vector<int> output_size_ = output_size;

auto x_dims = x.dims();

PADDLE_ENFORCE_EQ(
(x_dims.size() == 4 || x_dims.size() == 5),
true,
errors::InvalidArgument("Pooling intput should be 4-D or "
"5-D tensor but received %dD-Tensor",
x_dims.size()));

PADDLE_ENFORCE_EQ(
x_dims.size() - output_size_.size(),
2U,
errors::InvalidArgument(
"The input size %d minus the output size %d should equal to 2.",
x_dims.size(),
output_size_.size()));

std::vector<int64_t> output_shape({x_dims[0], x_dims[1]});
output_shape.insert(
output_shape.end(), output_size_.begin(), output_size_.end());

out->set_dims(common::make_ddim(output_shape));
out->set_dtype(x.dtype());

mask->set_dims(common::make_ddim(output_shape));
mask->set_dtype(phi::CppTypeToDataType<int>::Type());
}

void MaxPoolV2InferMeta(const MetaTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
Expand Down
18 changes: 9 additions & 9 deletions paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,15 @@ void FoldInferMeta(const MetaTensor& x,
const std::vector<int>& dilations,
MetaTensor* out);

void FractionalMaxPoolInferMeta(const MetaTensor& x,
const std::vector<int>& output_size,
const std::vector<int>& kernel_size,
float random_u,
bool return_mask,
MetaTensor* out,
MetaTensor* mask,
MetaConfig config = MetaConfig());

void FrameInferMeta(const MetaTensor& x,
int frame_length,
int hop_length,
Expand Down Expand Up @@ -350,15 +359,6 @@ void MaxPoolWithIndexInferMeta(const MetaTensor& x,
MetaTensor* mask,
MetaConfig config = MetaConfig());

void FractionalMaxPoolWithIndexInferMeta(const MetaTensor& x,
const std::vector<int>& output_size,
const std::vector<int>& kernel_size,
float random_u,
bool return_mask,
MetaTensor* out,
MetaTensor* mask,
MetaConfig config = MetaConfig());

void MaxPoolV2InferMeta(const MetaTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/pool_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ PD_REGISTER_KERNEL(max_pool3d_with_index_grad,
PD_REGISTER_KERNEL(fractional_max_pool2d_grad,
CPU,
ALL_LAYOUT,
phi::FractionalMaxPool2dWithIndexGradKernel,
phi::FractionalMaxPool2dGradKernel,
float,
double,
phi::dtype::float16) {
Expand All @@ -58,7 +58,7 @@ PD_REGISTER_KERNEL(fractional_max_pool2d_grad,
PD_REGISTER_KERNEL(fractional_max_pool3d_grad,
CPU,
ALL_LAYOUT,
phi::FractionalMaxPool3dWithIndexGradKernel,
phi::FractionalMaxPool3dGradKernel,
float,
double,
phi::dtype::float16) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/pool_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ PD_REGISTER_KERNEL(max_pool3d_with_index,
PD_REGISTER_KERNEL(fractional_max_pool2d,
CPU,
ALL_LAYOUT,
phi::FractionalMaxPool2dWithIndexKernel,
phi::FractionalMaxPool2dKernel,
float,
double,
phi::dtype::float16) {
Expand All @@ -50,7 +50,7 @@ PD_REGISTER_KERNEL(fractional_max_pool2d,
PD_REGISTER_KERNEL(fractional_max_pool3d,
CPU,
ALL_LAYOUT,
phi::FractionalMaxPool3dWithIndexKernel,
phi::FractionalMaxPool3dKernel,
float,
double,
phi::dtype::float16) {
Expand Down
40 changes: 16 additions & 24 deletions paddle/phi/kernels/funcs/pooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1850,7 +1850,7 @@ template class MaxPool3dWithIndexGradFunctor<CPUContext, double, int>;
* All tensors are in NCHW format.
*/
template <typename T1, typename T2>
class FractionalMaxPool2dWithIndexFunctor<CPUContext, T1, T2> {
class FractionalMaxPool2dFunctor<CPUContext, T1, T2> {
public:
void operator()(const CPUContext& context,
const DenseTensor& input,
Expand Down Expand Up @@ -1955,7 +1955,7 @@ class FractionalMaxPool2dWithIndexFunctor<CPUContext, T1, T2> {
* All tensors are in NCHW format.
*/
template <typename T1, typename T2>
class FractionalMaxPool2dWithIndexGradFunctor<CPUContext, T1, T2> {
class FractionalMaxPool2dGradFunctor<CPUContext, T1, T2> {
public:
void operator()(const CPUContext& context,
const DenseTensor& output_grad,
Expand Down Expand Up @@ -1996,22 +1996,18 @@ class FractionalMaxPool2dWithIndexGradFunctor<CPUContext, T1, T2> {
}
};

template class FractionalMaxPool2dWithIndexFunctor<CPUContext, float, int>;
template class FractionalMaxPool2dWithIndexGradFunctor<CPUContext, float, int>;
template class FractionalMaxPool2dWithIndexFunctor<CPUContext, double, int>;
template class FractionalMaxPool2dWithIndexGradFunctor<CPUContext, double, int>;
template class FractionalMaxPool2dWithIndexFunctor<CPUContext,
dtype::float16,
int>;
template class FractionalMaxPool2dWithIndexGradFunctor<CPUContext,
dtype::float16,
int>;
template class FractionalMaxPool2dFunctor<CPUContext, float, int>;
template class FractionalMaxPool2dGradFunctor<CPUContext, float, int>;
template class FractionalMaxPool2dFunctor<CPUContext, double, int>;
template class FractionalMaxPool2dGradFunctor<CPUContext, double, int>;
template class FractionalMaxPool2dFunctor<CPUContext, dtype::float16, int>;
template class FractionalMaxPool2dGradFunctor<CPUContext, dtype::float16, int>;

/*
* All tensors are in NCDHW format.
*/
template <typename T1, typename T2>
class FractionalMaxPool3dWithIndexFunctor<CPUContext, T1, T2> {
class FractionalMaxPool3dFunctor<CPUContext, T1, T2> {
public:
void operator()(const CPUContext& context,
const DenseTensor& input,
Expand Down Expand Up @@ -2143,7 +2139,7 @@ class FractionalMaxPool3dWithIndexFunctor<CPUContext, T1, T2> {
* All tensors are in NCDHW format.
*/
template <typename T1, typename T2>
class FractionalMaxPool3dWithIndexGradFunctor<CPUContext, T1, T2> {
class FractionalMaxPool3dGradFunctor<CPUContext, T1, T2> {
public:
void operator()(const CPUContext& context,
const DenseTensor& output_grad,
Expand Down Expand Up @@ -2189,16 +2185,12 @@ class FractionalMaxPool3dWithIndexGradFunctor<CPUContext, T1, T2> {
}
};

template class FractionalMaxPool3dWithIndexFunctor<CPUContext, float, int>;
template class FractionalMaxPool3dWithIndexGradFunctor<CPUContext, float, int>;
template class FractionalMaxPool3dWithIndexFunctor<CPUContext, double, int>;
template class FractionalMaxPool3dWithIndexGradFunctor<CPUContext, double, int>;
template class FractionalMaxPool3dWithIndexFunctor<CPUContext,
dtype::float16,
int>;
template class FractionalMaxPool3dWithIndexGradFunctor<CPUContext,
dtype::float16,
int>;
template class FractionalMaxPool3dFunctor<CPUContext, float, int>;
template class FractionalMaxPool3dGradFunctor<CPUContext, float, int>;
template class FractionalMaxPool3dFunctor<CPUContext, double, int>;
template class FractionalMaxPool3dGradFunctor<CPUContext, double, int>;
template class FractionalMaxPool3dFunctor<CPUContext, dtype::float16, int>;
template class FractionalMaxPool3dGradFunctor<CPUContext, dtype::float16, int>;

} // namespace funcs
} // namespace phi
Loading

0 comments on commit e24739a

Please sign in to comment.