Skip to content

Commit e24739a

Browse files
committed
[Change] rename func without index
1 parent 6cc617d commit e24739a

File tree

18 files changed

+264
-306
lines changed

18 files changed

+264
-306
lines changed

paddle/phi/api/yaml/op_compat.yaml

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1161,18 +1161,6 @@
11611161
outputs :
11621162
out : Y
11631163

1164-
- op : fractional_max_pool2d
1165-
inputs :
1166-
{x : X}
1167-
outputs :
1168-
{out : Out, mask : Mask}
1169-
1170-
- op : fractional_max_pool3d
1171-
inputs :
1172-
{x : X}
1173-
outputs :
1174-
{out : Out, mask : Mask}
1175-
11761164
- op : frame
11771165
backward : frame_grad
11781166
inputs :

paddle/phi/api/yaml/ops.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1025,7 +1025,7 @@
10251025
args : (Tensor x, int[] output_size, int[] kernel_size = {0, 0}, float random_u = 0.0, bool return_mask = true)
10261026
output : Tensor(out), Tensor(mask)
10271027
infer_meta :
1028-
func : FractionalMaxPoolWithIndexInferMeta
1028+
func : FractionalMaxPoolInferMeta
10291029
kernel :
10301030
func : fractional_max_pool2d
10311031
backward : fractional_max_pool2d_grad
@@ -1034,7 +1034,7 @@
10341034
args : (Tensor x, int[] output_size, int[] kernel_size = {0, 0, 0}, float random_u = 0.0, bool return_mask = true)
10351035
output : Tensor(out), Tensor(mask)
10361036
infer_meta :
1037-
func : FractionalMaxPoolWithIndexInferMeta
1037+
func : FractionalMaxPoolInferMeta
10381038
kernel :
10391039
func : fractional_max_pool3d
10401040
backward : fractional_max_pool3d_grad

paddle/phi/infermeta/unary.cc

Lines changed: 38 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1719,6 +1719,44 @@ void FoldInferMeta(const MetaTensor& x,
17191719
}
17201720
}
17211721

1722+
void FractionalMaxPoolInferMeta(const MetaTensor& x,
1723+
const std::vector<int>& output_size,
1724+
const std::vector<int>& kernel_size,
1725+
float random_u,
1726+
bool return_mask,
1727+
MetaTensor* out,
1728+
MetaTensor* mask,
1729+
MetaConfig config) {
1730+
std::vector<int> output_size_ = output_size;
1731+
1732+
auto x_dims = x.dims();
1733+
1734+
PADDLE_ENFORCE_EQ(
1735+
(x_dims.size() == 4 || x_dims.size() == 5),
1736+
true,
1737+
errors::InvalidArgument("Pooling intput should be 4-D or "
1738+
"5-D tensor but received %dD-Tensor",
1739+
x_dims.size()));
1740+
1741+
PADDLE_ENFORCE_EQ(
1742+
x_dims.size() - output_size_.size(),
1743+
2U,
1744+
errors::InvalidArgument(
1745+
"The input size %d minus the output size %d should equal to 2.",
1746+
x_dims.size(),
1747+
output_size_.size()));
1748+
1749+
std::vector<int64_t> output_shape({x_dims[0], x_dims[1]});
1750+
output_shape.insert(
1751+
output_shape.end(), output_size_.begin(), output_size_.end());
1752+
1753+
out->set_dims(common::make_ddim(output_shape));
1754+
out->set_dtype(x.dtype());
1755+
1756+
mask->set_dims(common::make_ddim(output_shape));
1757+
mask->set_dtype(phi::CppTypeToDataType<int>::Type());
1758+
}
1759+
17221760
void FrameInferMeta(const MetaTensor& x,
17231761
int frame_length,
17241762
int hop_length,
@@ -2349,44 +2387,6 @@ void MaxPoolWithIndexInferMeta(const MetaTensor& x,
23492387
mask->set_dtype(phi::CppTypeToDataType<int>::Type());
23502388
}
23512389

2352-
void FractionalMaxPoolWithIndexInferMeta(const MetaTensor& x,
2353-
const std::vector<int>& output_size,
2354-
const std::vector<int>& kernel_size,
2355-
float random_u,
2356-
bool return_mask,
2357-
MetaTensor* out,
2358-
MetaTensor* mask,
2359-
MetaConfig config) {
2360-
std::vector<int> output_size_ = output_size;
2361-
2362-
auto x_dims = x.dims();
2363-
2364-
PADDLE_ENFORCE_EQ(
2365-
(x_dims.size() == 4 || x_dims.size() == 5),
2366-
true,
2367-
errors::InvalidArgument("Pooling intput should be 4-D or "
2368-
"5-D tensor but received %dD-Tensor",
2369-
x_dims.size()));
2370-
2371-
PADDLE_ENFORCE_EQ(
2372-
x_dims.size() - output_size_.size(),
2373-
2U,
2374-
errors::InvalidArgument(
2375-
"The input size %d minus the output size %d should equal to 2.",
2376-
x_dims.size(),
2377-
output_size_.size()));
2378-
2379-
std::vector<int64_t> output_shape({x_dims[0], x_dims[1]});
2380-
output_shape.insert(
2381-
output_shape.end(), output_size_.begin(), output_size_.end());
2382-
2383-
out->set_dims(common::make_ddim(output_shape));
2384-
out->set_dtype(x.dtype());
2385-
2386-
mask->set_dims(common::make_ddim(output_shape));
2387-
mask->set_dtype(phi::CppTypeToDataType<int>::Type());
2388-
}
2389-
23902390
void MaxPoolV2InferMeta(const MetaTensor& x,
23912391
const std::vector<int>& kernel_size,
23922392
const std::vector<int>& strides,

paddle/phi/infermeta/unary.h

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -269,6 +269,15 @@ void FoldInferMeta(const MetaTensor& x,
269269
const std::vector<int>& dilations,
270270
MetaTensor* out);
271271

272+
void FractionalMaxPoolInferMeta(const MetaTensor& x,
273+
const std::vector<int>& output_size,
274+
const std::vector<int>& kernel_size,
275+
float random_u,
276+
bool return_mask,
277+
MetaTensor* out,
278+
MetaTensor* mask,
279+
MetaConfig config = MetaConfig());
280+
272281
void FrameInferMeta(const MetaTensor& x,
273282
int frame_length,
274283
int hop_length,
@@ -350,15 +359,6 @@ void MaxPoolWithIndexInferMeta(const MetaTensor& x,
350359
MetaTensor* mask,
351360
MetaConfig config = MetaConfig());
352361

353-
void FractionalMaxPoolWithIndexInferMeta(const MetaTensor& x,
354-
const std::vector<int>& output_size,
355-
const std::vector<int>& kernel_size,
356-
float random_u,
357-
bool return_mask,
358-
MetaTensor* out,
359-
MetaTensor* mask,
360-
MetaConfig config = MetaConfig());
361-
362362
void MaxPoolV2InferMeta(const MetaTensor& x,
363363
const std::vector<int>& kernel_size,
364364
const std::vector<int>& strides,

paddle/phi/kernels/cpu/pool_grad_kernel.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ PD_REGISTER_KERNEL(max_pool3d_with_index_grad,
4848
PD_REGISTER_KERNEL(fractional_max_pool2d_grad,
4949
CPU,
5050
ALL_LAYOUT,
51-
phi::FractionalMaxPool2dWithIndexGradKernel,
51+
phi::FractionalMaxPool2dGradKernel,
5252
float,
5353
double,
5454
phi::dtype::float16) {
@@ -58,7 +58,7 @@ PD_REGISTER_KERNEL(fractional_max_pool2d_grad,
5858
PD_REGISTER_KERNEL(fractional_max_pool3d_grad,
5959
CPU,
6060
ALL_LAYOUT,
61-
phi::FractionalMaxPool3dWithIndexGradKernel,
61+
phi::FractionalMaxPool3dGradKernel,
6262
float,
6363
double,
6464
phi::dtype::float16) {

paddle/phi/kernels/cpu/pool_kernel.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ PD_REGISTER_KERNEL(max_pool3d_with_index,
4040
PD_REGISTER_KERNEL(fractional_max_pool2d,
4141
CPU,
4242
ALL_LAYOUT,
43-
phi::FractionalMaxPool2dWithIndexKernel,
43+
phi::FractionalMaxPool2dKernel,
4444
float,
4545
double,
4646
phi::dtype::float16) {
@@ -50,7 +50,7 @@ PD_REGISTER_KERNEL(fractional_max_pool2d,
5050
PD_REGISTER_KERNEL(fractional_max_pool3d,
5151
CPU,
5252
ALL_LAYOUT,
53-
phi::FractionalMaxPool3dWithIndexKernel,
53+
phi::FractionalMaxPool3dKernel,
5454
float,
5555
double,
5656
phi::dtype::float16) {

paddle/phi/kernels/funcs/pooling.cc

Lines changed: 16 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1850,7 +1850,7 @@ template class MaxPool3dWithIndexGradFunctor<CPUContext, double, int>;
18501850
* All tensors are in NCHW format.
18511851
*/
18521852
template <typename T1, typename T2>
1853-
class FractionalMaxPool2dWithIndexFunctor<CPUContext, T1, T2> {
1853+
class FractionalMaxPool2dFunctor<CPUContext, T1, T2> {
18541854
public:
18551855
void operator()(const CPUContext& context,
18561856
const DenseTensor& input,
@@ -1955,7 +1955,7 @@ class FractionalMaxPool2dWithIndexFunctor<CPUContext, T1, T2> {
19551955
* All tensors are in NCHW format.
19561956
*/
19571957
template <typename T1, typename T2>
1958-
class FractionalMaxPool2dWithIndexGradFunctor<CPUContext, T1, T2> {
1958+
class FractionalMaxPool2dGradFunctor<CPUContext, T1, T2> {
19591959
public:
19601960
void operator()(const CPUContext& context,
19611961
const DenseTensor& output_grad,
@@ -1996,22 +1996,18 @@ class FractionalMaxPool2dWithIndexGradFunctor<CPUContext, T1, T2> {
19961996
}
19971997
};
19981998

1999-
template class FractionalMaxPool2dWithIndexFunctor<CPUContext, float, int>;
2000-
template class FractionalMaxPool2dWithIndexGradFunctor<CPUContext, float, int>;
2001-
template class FractionalMaxPool2dWithIndexFunctor<CPUContext, double, int>;
2002-
template class FractionalMaxPool2dWithIndexGradFunctor<CPUContext, double, int>;
2003-
template class FractionalMaxPool2dWithIndexFunctor<CPUContext,
2004-
dtype::float16,
2005-
int>;
2006-
template class FractionalMaxPool2dWithIndexGradFunctor<CPUContext,
2007-
dtype::float16,
2008-
int>;
1999+
template class FractionalMaxPool2dFunctor<CPUContext, float, int>;
2000+
template class FractionalMaxPool2dGradFunctor<CPUContext, float, int>;
2001+
template class FractionalMaxPool2dFunctor<CPUContext, double, int>;
2002+
template class FractionalMaxPool2dGradFunctor<CPUContext, double, int>;
2003+
template class FractionalMaxPool2dFunctor<CPUContext, dtype::float16, int>;
2004+
template class FractionalMaxPool2dGradFunctor<CPUContext, dtype::float16, int>;
20092005

20102006
/*
20112007
* All tensors are in NCDHW format.
20122008
*/
20132009
template <typename T1, typename T2>
2014-
class FractionalMaxPool3dWithIndexFunctor<CPUContext, T1, T2> {
2010+
class FractionalMaxPool3dFunctor<CPUContext, T1, T2> {
20152011
public:
20162012
void operator()(const CPUContext& context,
20172013
const DenseTensor& input,
@@ -2143,7 +2139,7 @@ class FractionalMaxPool3dWithIndexFunctor<CPUContext, T1, T2> {
21432139
* All tensors are in NCDHW format.
21442140
*/
21452141
template <typename T1, typename T2>
2146-
class FractionalMaxPool3dWithIndexGradFunctor<CPUContext, T1, T2> {
2142+
class FractionalMaxPool3dGradFunctor<CPUContext, T1, T2> {
21472143
public:
21482144
void operator()(const CPUContext& context,
21492145
const DenseTensor& output_grad,
@@ -2189,16 +2185,12 @@ class FractionalMaxPool3dWithIndexGradFunctor<CPUContext, T1, T2> {
21892185
}
21902186
};
21912187

2192-
template class FractionalMaxPool3dWithIndexFunctor<CPUContext, float, int>;
2193-
template class FractionalMaxPool3dWithIndexGradFunctor<CPUContext, float, int>;
2194-
template class FractionalMaxPool3dWithIndexFunctor<CPUContext, double, int>;
2195-
template class FractionalMaxPool3dWithIndexGradFunctor<CPUContext, double, int>;
2196-
template class FractionalMaxPool3dWithIndexFunctor<CPUContext,
2197-
dtype::float16,
2198-
int>;
2199-
template class FractionalMaxPool3dWithIndexGradFunctor<CPUContext,
2200-
dtype::float16,
2201-
int>;
2188+
template class FractionalMaxPool3dFunctor<CPUContext, float, int>;
2189+
template class FractionalMaxPool3dGradFunctor<CPUContext, float, int>;
2190+
template class FractionalMaxPool3dFunctor<CPUContext, double, int>;
2191+
template class FractionalMaxPool3dGradFunctor<CPUContext, double, int>;
2192+
template class FractionalMaxPool3dFunctor<CPUContext, dtype::float16, int>;
2193+
template class FractionalMaxPool3dGradFunctor<CPUContext, dtype::float16, int>;
22022194

22032195
} // namespace funcs
22042196
} // namespace phi

0 commit comments

Comments
 (0)