Skip to content

Commit 245046a

Browse files
authored
【Hackathon 5th No.13】【关联 PR】Added int support for sign -Part (#58255)
* ♻️ Refactor: added sign int type support * ✏️ Refactor: update typo
1 parent 29efd9d commit 245046a

File tree

6 files changed

+86
-41
lines changed

6 files changed

+86
-41
lines changed

paddle/phi/kernels/cpu/sign_kernel.cc

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,4 +21,13 @@ limitations under the License. */
2121
// See Note [ Why still include the fluid headers? ]
2222
#include "paddle/phi/common/bfloat16.h"
2323

24-
PD_REGISTER_KERNEL(sign, CPU, ALL_LAYOUT, phi::SignKernel, float, double) {}
24+
PD_REGISTER_KERNEL(sign,
25+
CPU,
26+
ALL_LAYOUT,
27+
phi::SignKernel,
28+
int8_t,
29+
int16_t,
30+
int32_t,
31+
int64_t,
32+
float,
33+
double) {}

paddle/phi/kernels/funcs/eigen/sign.cc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,10 @@ struct EigenSign<Eigen::DefaultDevice, T> {
2929
}
3030
};
3131

32+
template struct EigenSign<Eigen::DefaultDevice, int8_t>;
33+
template struct EigenSign<Eigen::DefaultDevice, int16_t>;
34+
template struct EigenSign<Eigen::DefaultDevice, int32_t>;
35+
template struct EigenSign<Eigen::DefaultDevice, int64_t>;
3236
template struct EigenSign<Eigen::DefaultDevice, float>;
3337
template struct EigenSign<Eigen::DefaultDevice, double>;
3438

paddle/phi/kernels/funcs/eigen/sign.cu

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,10 @@ struct EigenSign<Eigen::GpuDevice, T> {
2929
}
3030
};
3131

32+
template struct EigenSign<Eigen::GpuDevice, int8_t>;
33+
template struct EigenSign<Eigen::GpuDevice, int16_t>;
34+
template struct EigenSign<Eigen::GpuDevice, int32_t>;
35+
template struct EigenSign<Eigen::GpuDevice, int64_t>;
3236
template struct EigenSign<Eigen::GpuDevice, float>;
3337
template struct EigenSign<Eigen::GpuDevice, double>;
3438
template struct EigenSign<Eigen::GpuDevice, dtype::float16>;

paddle/phi/kernels/gpu/sign_kernel.cu.cc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,10 @@ PD_REGISTER_KERNEL(sign,
2525
GPU,
2626
ALL_LAYOUT,
2727
phi::SignKernel,
28+
int8_t,
29+
int16_t,
30+
int32_t,
31+
int64_t,
2832
float,
2933
double,
3034
phi::dtype::float16,

python/paddle/tensor/math.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4565,7 +4565,7 @@ def sign(x, name=None):
45654565
Returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.
45664566
45674567
Args:
4568-
x (Tensor): The input tensor. The data type can be float16, float32 or float64.
4568+
x (Tensor): The input tensor. The data type can be int8, int16, int32, int64, float16, float32 or float64.
45694569
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
45704570
45714571
Returns:
@@ -4586,7 +4586,19 @@ def sign(x, name=None):
45864586
return _C_ops.sign(x)
45874587
else:
45884588
check_variable_and_dtype(
4589-
x, 'x', ['float16', 'float32', 'float64', 'uint16'], 'sign'
4589+
x,
4590+
'x',
4591+
[
4592+
'int8',
4593+
'int16',
4594+
'int32',
4595+
'int64',
4596+
'float16',
4597+
'float32',
4598+
'float64',
4599+
'uint16',
4600+
],
4601+
'sign',
45904602
)
45914603
helper = LayerHelper("sign", **locals())
45924604
out = helper.create_variable_for_type_inference(dtype=x.dtype)

test/legacy_test/test_sign_op.py

Lines changed: 50 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -76,28 +76,12 @@ def test_check_grad(self):
7676
self.check_grad_with_place(self.place, ['X'], 'Out')
7777

7878

79-
class TestSignOpError(unittest.TestCase):
80-
def test_errors(self):
81-
with program_guard(Program(), Program()):
82-
# The input type of sign_op must be Variable or numpy.ndarray.
83-
input1 = 12
84-
self.assertRaises(TypeError, paddle.sign, input1)
85-
# The input dtype of sign_op must be float16, float32, float64.
86-
input2 = paddle.static.data(
87-
name='input2', shape=[-1, 12, 10], dtype="int32"
88-
)
89-
input3 = paddle.static.data(
90-
name='input3', shape=[-1, 12, 10], dtype="int64"
91-
)
92-
self.assertRaises(TypeError, paddle.sign, input2)
93-
self.assertRaises(TypeError, paddle.sign, input3)
94-
input4 = paddle.static.data(
95-
name='input4', shape=[-1, 4], dtype="float16"
96-
)
97-
paddle.sign(input4)
98-
99-
10079
class TestSignAPI(unittest.TestCase):
80+
def setUp(self):
81+
self.place = [base.CPUPlace()]
82+
if core.is_compiled_with_cuda():
83+
self.place.append(base.CUDAPlace(0))
84+
10185
def test_dygraph(self):
10286
with base.dygraph.guard():
10387
np_x = np.array([-1.0, 0.0, -0.0, 1.2, 1.5], dtype='float64')
@@ -108,23 +92,51 @@ def test_dygraph(self):
10892
self.assertEqual((np_z == z_expected).all(), True)
10993

11094
def test_static(self):
111-
with program_guard(Program(), Program()):
112-
# The input type of sign_op must be Variable or numpy.ndarray.
113-
input1 = 12
114-
self.assertRaises(TypeError, paddle.tensor.math.sign, input1)
115-
# The input dtype of sign_op must be float16, float32, float64.
116-
input2 = paddle.static.data(
117-
name='input2', shape=[-1, 12, 10], dtype="int32"
118-
)
119-
input3 = paddle.static.data(
120-
name='input3', shape=[-1, 12, 10], dtype="int64"
121-
)
122-
self.assertRaises(TypeError, paddle.tensor.math.sign, input2)
123-
self.assertRaises(TypeError, paddle.tensor.math.sign, input3)
124-
input4 = paddle.static.data(
125-
name='input4', shape=[-1, 4], dtype="float16"
126-
)
127-
paddle.sign(input4)
95+
np_input2 = np.random.uniform(-10, 10, (12, 10)).astype("int16")
96+
np_input3 = np.random.uniform(-10, 10, (12, 10)).astype("int32")
97+
np_input4 = np.random.uniform(-10, 10, (12, 10)).astype("int64")
98+
np_out2 = np.sign(np_input2)
99+
np_out3 = np.sign(np_input3)
100+
np_out4 = np.sign(np_input4)
101+
102+
def run(place):
103+
with program_guard(Program(), Program()):
104+
# The input type of sign_op must be Variable or numpy.ndarray.
105+
input1 = 12
106+
self.assertRaises(TypeError, paddle.tensor.math.sign, input1)
107+
# The result of sign_op must correct.
108+
input2 = paddle.static.data(
109+
name='input2', shape=[12, 10], dtype="int16"
110+
)
111+
input3 = paddle.static.data(
112+
name='input3', shape=[12, 10], dtype="int32"
113+
)
114+
input4 = paddle.static.data(
115+
name='input4', shape=[12, 10], dtype="int64"
116+
)
117+
out2 = paddle.sign(input2)
118+
out3 = paddle.sign(input3)
119+
out4 = paddle.sign(input4)
120+
exe = paddle.static.Executor(place)
121+
res2, res3, res4 = exe.run(
122+
paddle.static.default_main_program(),
123+
feed={
124+
"input2": np_input2,
125+
"input3": np_input3,
126+
"input4": np_input4,
127+
},
128+
fetch_list=[out2, out3, out4],
129+
)
130+
self.assertEqual((res2 == np_out2).all(), True)
131+
self.assertEqual((res3 == np_out3).all(), True)
132+
self.assertEqual((res4 == np_out4).all(), True)
133+
input5 = paddle.static.data(
134+
name='input5', shape=[-1, 4], dtype="float16"
135+
)
136+
paddle.sign(input5)
137+
138+
for place in self.place:
139+
run(place)
128140

129141

130142
class TestSignDoubleGradCheck(unittest.TestCase):

0 commit comments

Comments
 (0)