|
|
|
@ -22,7 +22,7 @@ from scipy.special import expit, erf
|
|
|
|
|
import paddle
|
|
|
|
|
import paddle.fluid as fluid
|
|
|
|
|
import paddle.nn as nn
|
|
|
|
|
import paddle.nn.functional as functional
|
|
|
|
|
import paddle.nn.functional as F
|
|
|
|
|
from paddle.fluid import compiler, Program, program_guard
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -344,6 +344,12 @@ class TestTanhShrink(TestActivation):
|
|
|
|
|
self.check_grad(['X'], 'Out')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def ref_hardshrink(x, threshold):
|
|
|
|
|
out = np.copy(x)
|
|
|
|
|
out[(out >= -threshold) & (out <= threshold)] = 0
|
|
|
|
|
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestHardShrink(TestActivation):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.op_type = "hard_shrink"
|
|
|
|
@ -351,11 +357,10 @@ class TestHardShrink(TestActivation):
|
|
|
|
|
|
|
|
|
|
threshold = 0.5
|
|
|
|
|
x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) * 10
|
|
|
|
|
out = np.copy(x)
|
|
|
|
|
out[(out >= -threshold) & (out <= threshold)] = 0
|
|
|
|
|
out = ref_hardshrink(x, threshold)
|
|
|
|
|
|
|
|
|
|
self.attrs = {'lambda': threshold}
|
|
|
|
|
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
|
|
|
|
|
self.attrs = {'threshold': threshold}
|
|
|
|
|
self.inputs = {'X': x}
|
|
|
|
|
self.outputs = {'Out': out}
|
|
|
|
|
|
|
|
|
|
def test_check_grad(self):
|
|
|
|
@ -364,17 +369,62 @@ class TestHardShrink(TestActivation):
|
|
|
|
|
self.check_grad(['X'], 'Out')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestHardShrinkOpError(unittest.TestCase):
|
|
|
|
|
class TestHardShrinkAPI(unittest.TestCase):
|
|
|
|
|
# test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
|
|
|
|
|
self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
|
|
|
|
|
else paddle.CPUPlace()
|
|
|
|
|
|
|
|
|
|
def test_static_api(self):
|
|
|
|
|
with paddle.static.program_guard(paddle.static.Program()):
|
|
|
|
|
x = paddle.data('X', [10, 12])
|
|
|
|
|
out1 = F.hardshrink(x)
|
|
|
|
|
hd = paddle.nn.Hardshrink()
|
|
|
|
|
out2 = hd(x)
|
|
|
|
|
exe = paddle.static.Executor(self.place)
|
|
|
|
|
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
|
|
|
|
|
out_ref = ref_hardshrink(self.x_np, 0.5)
|
|
|
|
|
for r in res:
|
|
|
|
|
self.assertEqual(np.allclose(out_ref, r), True)
|
|
|
|
|
|
|
|
|
|
def test_dygraph_api(self):
|
|
|
|
|
paddle.disable_static(self.place)
|
|
|
|
|
x = paddle.to_variable(self.x_np)
|
|
|
|
|
out1 = F.hardshrink(x)
|
|
|
|
|
hd = paddle.nn.Hardshrink()
|
|
|
|
|
out2 = hd(x)
|
|
|
|
|
out_ref = ref_hardshrink(self.x_np, 0.5)
|
|
|
|
|
for r in [out1, out2]:
|
|
|
|
|
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
|
|
|
|
|
|
|
|
|
|
out1 = F.hardshrink(x, 0.6)
|
|
|
|
|
hd = paddle.nn.Hardshrink(0.6)
|
|
|
|
|
out2 = hd(x)
|
|
|
|
|
out_ref = ref_hardshrink(self.x_np, 0.6)
|
|
|
|
|
for r in [out1, out2]:
|
|
|
|
|
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
|
|
|
|
|
paddle.enable_static()
|
|
|
|
|
|
|
|
|
|
def test_fluid_api(self):
|
|
|
|
|
with fluid.program_guard(fluid.Program()):
|
|
|
|
|
x = fluid.data('X', [10, 12])
|
|
|
|
|
out = fluid.layers.hard_shrink(x)
|
|
|
|
|
exe = fluid.Executor(self.place)
|
|
|
|
|
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
|
|
|
|
|
out_ref = ref_hardshrink(self.x_np, 0.5)
|
|
|
|
|
self.assertEqual(np.allclose(out_ref, res[0]), True)
|
|
|
|
|
|
|
|
|
|
def test_errors(self):
|
|
|
|
|
with program_guard(Program()):
|
|
|
|
|
with paddle.static.program_guard(paddle.static.Program()):
|
|
|
|
|
# The input type must be Variable.
|
|
|
|
|
self.assertRaises(TypeError, fluid.layers.hard_shrink, 1)
|
|
|
|
|
self.assertRaises(TypeError, F.hardshrink, 1)
|
|
|
|
|
# The input dtype must be float16, float32, float64.
|
|
|
|
|
x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
|
|
|
|
|
self.assertRaises(TypeError, fluid.layers.hard_shrink, x_int32)
|
|
|
|
|
x_int32 = paddle.data(name='x_int32', shape=[12, 10], dtype='int32')
|
|
|
|
|
self.assertRaises(TypeError, F.hardshrink, x_int32)
|
|
|
|
|
# support the input dtype is float16
|
|
|
|
|
x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
|
|
|
|
|
fluid.layers.hard_shrink(x_fp16)
|
|
|
|
|
x_fp16 = paddle.data(name='x_fp16', shape=[12, 10], dtype='float16')
|
|
|
|
|
F.hardshrink(x_fp16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestSoftShrink(TestActivation):
|
|
|
|
@ -1435,7 +1485,7 @@ class TestNNFunctionalReluAPI(unittest.TestCase):
|
|
|
|
|
main_program = Program()
|
|
|
|
|
with fluid.program_guard(main_program):
|
|
|
|
|
x = fluid.data(name='x', shape=self.x_shape)
|
|
|
|
|
y = functional.relu(x)
|
|
|
|
|
y = F.relu(x)
|
|
|
|
|
exe = fluid.Executor(fluid.CPUPlace())
|
|
|
|
|
out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y])
|
|
|
|
|
self.assertTrue(np.allclose(out[0], self.y))
|
|
|
|
@ -1501,7 +1551,7 @@ class TestNNFunctionalSigmoidAPI(unittest.TestCase):
|
|
|
|
|
main_program = Program()
|
|
|
|
|
with fluid.program_guard(main_program):
|
|
|
|
|
x = fluid.data(name='x', shape=self.x_shape)
|
|
|
|
|
y = functional.sigmoid(x)
|
|
|
|
|
y = F.sigmoid(x)
|
|
|
|
|
exe = fluid.Executor(fluid.CPUPlace())
|
|
|
|
|
out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y])
|
|
|
|
|
self.assertTrue(np.allclose(out[0], self.y))
|
|
|
|
|