|
|
|
@ -18,21 +18,6 @@ class TestExp(OpTest):
|
|
|
|
|
self.check_grad(['X'], 'Y', max_relative_error=0.007)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestRelu(OpTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.op_type = "relu"
|
|
|
|
|
x = np.random.uniform(-1, 1, [11, 17]).astype("float32")
|
|
|
|
|
x = np.sign(x) * np.exp(np.abs(x))
|
|
|
|
|
self.inputs = {'X': x}
|
|
|
|
|
self.outputs = {'Y': np.maximum(self.inputs['X'], 0)}
|
|
|
|
|
|
|
|
|
|
def test_check_output(self):
|
|
|
|
|
self.check_output()
|
|
|
|
|
|
|
|
|
|
def test_check_grad(self):
|
|
|
|
|
self.check_grad(['X'], 'Y', max_relative_error=0.007)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestSigmoid(OpTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.op_type = "sigmoid"
|
|
|
|
@ -81,8 +66,12 @@ class TestSqrt(OpTest):
|
|
|
|
|
class TestAbs(OpTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.op_type = "abs"
|
|
|
|
|
x = np.random.uniform(-1, 1, [11, 17]).astype("float32")
|
|
|
|
|
x = np.sign(x) * np.exp(np.abs(x))
|
|
|
|
|
x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
|
|
|
|
|
# Because we set delta = 0.005 in caculating numeric gradient,
|
|
|
|
|
# if x is too small, such as 0.002, x_neg will be -0.003
|
|
|
|
|
# x_pos will be 0.007, so the numeric gradient is unaccurate.
|
|
|
|
|
# we should avoid this
|
|
|
|
|
x[np.abs(x) < 0.005] = 0.02
|
|
|
|
|
self.inputs = {'X': x}
|
|
|
|
|
self.outputs = {'Y': np.abs(self.inputs['X'])}
|
|
|
|
|
|
|
|
|
@ -93,41 +82,14 @@ class TestAbs(OpTest):
|
|
|
|
|
self.check_grad(['X'], 'Y', max_relative_error=0.007)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestReciprocal(OpTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.op_type = "reciprocal"
|
|
|
|
|
self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
|
|
|
|
|
self.outputs = {'Y': np.reciprocal(self.inputs['X'])}
|
|
|
|
|
|
|
|
|
|
def test_check_output(self):
|
|
|
|
|
self.check_output()
|
|
|
|
|
|
|
|
|
|
def test_check_grad(self):
|
|
|
|
|
self.check_grad(['X'], 'Y', max_relative_error=0.01)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestLog(OpTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.op_type = "log"
|
|
|
|
|
self.inputs = {
|
|
|
|
|
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
|
|
|
|
|
}
|
|
|
|
|
self.outputs = {'Y': np.log(self.inputs['X'])}
|
|
|
|
|
|
|
|
|
|
def test_check_output(self):
|
|
|
|
|
self.check_output()
|
|
|
|
|
|
|
|
|
|
def test_check_grad(self):
|
|
|
|
|
self.check_grad(['X'], 'Y', max_relative_error=0.007)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestSquare(OpTest):
|
|
|
|
|
class TestRelu(OpTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.op_type = "square"
|
|
|
|
|
self.inputs = {
|
|
|
|
|
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
|
|
|
|
|
}
|
|
|
|
|
self.outputs = {'Y': np.square(self.inputs['X'])}
|
|
|
|
|
self.op_type = "relu"
|
|
|
|
|
x = np.random.uniform(-1, 1, [11, 17]).astype("float32")
|
|
|
|
|
# The same reason with TestAbs
|
|
|
|
|
x[np.abs(x) < 0.005] = 0.02
|
|
|
|
|
self.inputs = {'X': x}
|
|
|
|
|
self.outputs = {'Y': np.maximum(self.inputs['X'], 0)}
|
|
|
|
|
|
|
|
|
|
def test_check_output(self):
|
|
|
|
|
self.check_output()
|
|
|
|
@ -140,10 +102,13 @@ class TestBRelu(OpTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.op_type = "brelu"
|
|
|
|
|
x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
|
|
|
|
|
x = 2 * np.sign(x) * np.exp(np.abs(x))
|
|
|
|
|
self.inputs = {'X': x}
|
|
|
|
|
t_min = 0
|
|
|
|
|
t_min = 1
|
|
|
|
|
t_max = 4
|
|
|
|
|
# The same with TestAbs
|
|
|
|
|
x[np.abs(x - t_min) < 0.005] = t_min + 0.02
|
|
|
|
|
x[np.abs(x - t_max) < 0.005] = t_min + 0.02
|
|
|
|
|
|
|
|
|
|
self.inputs = {'X': x}
|
|
|
|
|
self.attrs = {'t_min': t_min, 't_max': t_max}
|
|
|
|
|
t = np.copy(x)
|
|
|
|
|
t[t < t_min] = t_min
|
|
|
|
@ -160,10 +125,12 @@ class TestBRelu(OpTest):
|
|
|
|
|
class TestSoftRelu(OpTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.op_type = "soft_relu"
|
|
|
|
|
x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
|
|
|
|
|
x = 2 * np.sign(x) * np.exp(np.abs(x))
|
|
|
|
|
x = np.random.uniform(-3, 3, [4, 4]).astype("float32")
|
|
|
|
|
threshold = 2
|
|
|
|
|
# The same reason with TestAbs
|
|
|
|
|
x[np.abs(x - threshold) < 0.005] = threshold + 0.02
|
|
|
|
|
x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
|
|
|
|
|
self.inputs = {'X': x}
|
|
|
|
|
threshold = 4
|
|
|
|
|
self.attrs = {'threshold': threshold}
|
|
|
|
|
t = np.copy(x)
|
|
|
|
|
t[t < -threshold] = -threshold
|
|
|
|
@ -177,6 +144,49 @@ class TestSoftRelu(OpTest):
|
|
|
|
|
self.check_grad(['X'], 'Y', max_relative_error=0.02)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestReciprocal(OpTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.op_type = "reciprocal"
|
|
|
|
|
self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
|
|
|
|
|
self.outputs = {'Y': np.reciprocal(self.inputs['X'])}
|
|
|
|
|
|
|
|
|
|
def test_check_output(self):
|
|
|
|
|
self.check_output()
|
|
|
|
|
|
|
|
|
|
def test_check_grad(self):
|
|
|
|
|
self.check_grad(['X'], 'Y', max_relative_error=0.01)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestLog(OpTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.op_type = "log"
|
|
|
|
|
self.inputs = {
|
|
|
|
|
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
|
|
|
|
|
}
|
|
|
|
|
self.outputs = {'Y': np.log(self.inputs['X'])}
|
|
|
|
|
|
|
|
|
|
def test_check_output(self):
|
|
|
|
|
self.check_output()
|
|
|
|
|
|
|
|
|
|
def test_check_grad(self):
|
|
|
|
|
self.check_grad(['X'], 'Y', max_relative_error=0.007)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestSquare(OpTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.op_type = "square"
|
|
|
|
|
self.inputs = {
|
|
|
|
|
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
|
|
|
|
|
}
|
|
|
|
|
self.outputs = {'Y': np.square(self.inputs['X'])}
|
|
|
|
|
|
|
|
|
|
def test_check_output(self):
|
|
|
|
|
self.check_output()
|
|
|
|
|
|
|
|
|
|
def test_check_grad(self):
|
|
|
|
|
self.check_grad(['X'], 'Y', max_relative_error=0.007)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestPow(OpTest):
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.op_type = "pow"
|
|
|
|
|