update error info of ops,add some test cases for raise message (#23750)

1. update error info of the ops (abs, acos, asin, atan, ceil, cos, exp, floor, log, pow, reciprocal, round, rsqrt, sin, sqrt, square, tanh)
2. add the unittests of the above refered ops (test error info)
revert-23830-2.0-beta
Steffy-zxf 5 years ago committed by GitHub
parent 17588bbeed
commit ac4da77aa6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

File diff suppressed because it is too large Load Diff

@ -23,10 +23,9 @@ class SignOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SignOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SignOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "sign");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "sign");
ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
ctx->ShareLoD("X", /*->*/ "Out");
}

@ -256,8 +256,15 @@ def generate_activation_fn(op_type):
op = getattr(core.ops, op_type)
return op(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
op_type)
if op_type not in ["abs", "exp", "square"]:
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
op_type)
else:
# abs exp square ops support dtype(int32, int64, float16, float32, float64)
check_variable_and_dtype(
x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'],
op_type)
helper = LayerHelper(op_type, **locals())
output = helper.create_variable_for_type_inference(dtype=x.dtype)

@ -8183,6 +8183,7 @@ def log(x, name=None):
if in_dygraph_mode():
return core.ops.log(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log")
inputs = {'X': [x]}
helper = LayerHelper('log', **locals())
dtype = helper.input_dtype(input_param_name='x')
@ -8938,10 +8939,14 @@ def pow(x, factor=1.0, name=None):
y_2 = fluid.layers.pow(x, factor=factor_tensor)
# y_2 is x^{3.0}
"""
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'],
'pow')
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {}
if isinstance(factor, Variable):
check_variable_and_dtype(factor, 'factor', ['float32'], 'pow')
factor.stop_gradient = True
inputs['FactorTensor'] = factor
else:

@ -765,6 +765,15 @@ class TestLog(TestActivation):
return
self.check_grad(['X'], 'Out')
def test_error(self):
in1 = fluid.layers.data(
name="in1", shape=[11, 17], append_batch_size=False, dtype="int32")
in2 = fluid.layers.data(
name="in2", shape=[11, 17], append_batch_size=False, dtype="int64")
self.assertRaises(TypeError, fluid.layers.log, in1)
self.assertRaises(TypeError, fluid.layers.log, in2)
class TestSquare(TestActivation):
def setUp(self):
@ -856,6 +865,29 @@ class TestPow_factor_tensor(TestActivation):
assert np.array_equal(res_3, res)
assert np.array_equal(res_6, np.power(input, 3))
def test_error(self):
in1 = fluid.layers.data(
name="in1", shape=[11, 17], append_batch_size=False, dtype="int32")
in2 = fluid.layers.data(
name="in2", shape=[11, 17], append_batch_size=False, dtype="int64")
in3 = fluid.layers.data(
name="in3",
shape=[11, 17],
append_batch_size=False,
dtype="float32")
in4 = fluid.layers.data(
name="in4",
shape=[11, 17],
append_batch_size=False,
dtype="float64")
factor_1 = fluid.layers.fill_constant([1], "float64", 3.0)
self.assertRaises(TypeError, fluid.layers.pow, x=in1, factor=factor_1)
self.assertRaises(TypeError, fluid.layers.pow, x=in2, factor=factor_1)
self.assertRaises(TypeError, fluid.layers.pow, x=in3, factor=factor_1)
self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1)
class TestSTanh(TestActivation):
def setUp(self):
@ -1035,6 +1067,39 @@ class TestSwishOpError(unittest.TestCase):
fluid.layers.swish(x_fp16)
#------------------ Test Error Activation----------------------
def create_test_error_class(op_type):
class TestOpErrors(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
op = getattr(fluid.layers, op_type)
# The input dtype of op_type must be float32, float64.
in1 = fluid.layers.data(
name='input2', shape=[12, 10], dtype="int32")
in2 = fluid.layers.data(
name='input3', shape=[12, 10], dtype="int64")
self.assertRaises(TypeError, op, in1)
self.assertRaises(TypeError, op, in2)
cls_name = "{0}_{1}".format(op_type, "test_errors")
TestOpErrors.__name__ = cls_name
globals()[cls_name] = TestOpErrors
create_test_error_class('acos')
create_test_error_class('asin')
create_test_error_class('atan')
create_test_error_class('ceil')
create_test_error_class('cos')
create_test_error_class('floor')
create_test_error_class('reciprocal')
create_test_error_class('round')
create_test_error_class('rsqrt')
create_test_error_class('sin')
create_test_error_class('sqrt')
create_test_error_class('tanh')
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
@unittest.skipIf(not core.is_compiled_with_cuda(),

@ -45,10 +45,13 @@ class TestSignOpError(unittest.TestCase):
# The input dtype of sign_op must be float16, float32, float64.
input2 = fluid.layers.data(
name='input2', shape=[12, 10], dtype="int32")
self.assertRaises(TypeError, fluid.layers.sign, input2)
input3 = fluid.layers.data(
name='input3', shape=[4], dtype="float16")
fluid.layers.sign(input3)
name='input3', shape=[12, 10], dtype="int64")
self.assertRaises(TypeError, fluid.layers.sign, input2)
self.assertRaises(TypeError, fluid.layers.sign, input3)
input4 = fluid.layers.data(
name='input4', shape=[4], dtype="float16")
fluid.layers.sign(input4)
if __name__ == "__main__":

Loading…
Cancel
Save