update error info of ops,add some test cases for raise message (#23750)

1. update error info of the ops (abs, acos, asin, atan, ceil, cos, exp, floor, log, pow, reciprocal, round, rsqrt, sin, sqrt, square, tanh)
2. add the unittests of the above refered ops (test error info)
revert-23830-2.0-beta
Steffy-zxf 6 years ago committed by GitHub
parent 17588bbeed
commit ac4da77aa6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -27,6 +27,7 @@ limitations under the License. */
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/float16.h"
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
@ -53,12 +54,14 @@ inline void ExtractActivationTensor(const framework::ExecutionContext& context,
framework::Tensor** Out) { framework::Tensor** Out) {
auto x_var = context.InputVar("X"); auto x_var = context.InputVar("X");
auto out_var = context.OutputVar("Out"); auto out_var = context.OutputVar("Out");
PADDLE_ENFORCE(x_var != nullptr, PADDLE_ENFORCE_NOT_NULL(x_var,
platform::errors::NotFound(
"Cannot get input Variable X, variable name = %s", "Cannot get input Variable X, variable name = %s",
context.InputName("X")); context.InputName("X")));
PADDLE_ENFORCE(out_var != nullptr, PADDLE_ENFORCE_NOT_NULL(
out_var, platform::errors::NotFound(
"Cannot get output Variable Out, variable name = %s", "Cannot get output Variable Out, variable name = %s",
context.OutputName("Out")); context.OutputName("Out")));
if (CanBeUsedBySelectedRows.count(context.Type())) { if (CanBeUsedBySelectedRows.count(context.Type())) {
*X = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_var); *X = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_var);
*Out = paddle::framework::GetMutableLoDTensorOrSelectedRowsValueFromVar( *Out = paddle::framework::GetMutableLoDTensorOrSelectedRowsValueFromVar(
@ -68,9 +71,10 @@ inline void ExtractActivationTensor(const framework::ExecutionContext& context,
*Out = context.Output<framework::Tensor>("Out"); *Out = context.Output<framework::Tensor>("Out");
} }
PADDLE_ENFORCE(*Out != nullptr, PADDLE_ENFORCE_NOT_NULL(*Out, platform::errors::NotFound(
"Cannot get output tensor Out, variable name = %s", "Cannot get the tensor from the Variable "
context.OutputName("Out")); "Output(Out), variable name = %s",
context.OutputName("Out")));
} }
template <ActBwdOpFwdDeps kDepValue> template <ActBwdOpFwdDeps kDepValue>
@ -84,18 +88,22 @@ inline void ExtractActivationGradTensor(
if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) { if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) {
out_var = context.InputVar("Out"); out_var = context.InputVar("Out");
PADDLE_ENFORCE(out_var != nullptr, PADDLE_ENFORCE_NOT_NULL(
out_var, platform::errors::NotFound(
"Cannot get input Variable Out, variable name = %s", "Cannot get input Variable Out, variable name = %s",
context.InputName("Out")); context.InputName("Out")));
} }
PADDLE_ENFORCE(out_grad_var != nullptr,
PADDLE_ENFORCE_NOT_NULL(
out_grad_var, platform::errors::NotFound(
"Cannot get input Variable %s, variable name = %s", "Cannot get input Variable %s, variable name = %s",
framework::GradVarName("Out"), framework::GradVarName("Out"),
context.InputName(framework::GradVarName("Out"))); context.InputName(framework::GradVarName("Out"))));
PADDLE_ENFORCE(x_grad_var != nullptr, PADDLE_ENFORCE_NOT_NULL(
x_grad_var, platform::errors::NotFound(
"Cannot get output Variable %s, variable name = %s", "Cannot get output Variable %s, variable name = %s",
framework::GradVarName("X"), framework::GradVarName("X"),
context.OutputName(framework::GradVarName("X"))); context.OutputName(framework::GradVarName("X"))));
if (CanBeUsedBySelectedRows.count(context.Type())) { if (CanBeUsedBySelectedRows.count(context.Type())) {
*dOut = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar( *dOut = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(
@ -122,16 +130,18 @@ inline void ExtractActivationGradTensor(
} }
} }
PADDLE_ENFORCE(*dX != nullptr, PADDLE_ENFORCE_NOT_NULL(*dX,
"Cannot get output tensor %s, variable name = %s", platform::errors::NotFound(
framework::GradVarName("X"), "Cannot get the tensor from the Variable "
context.OutputName(framework::GradVarName("X"))); "Output(Out), variable name = %s",
context.OutputName(framework::GradVarName("X"))));
if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) { if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) {
auto x_var = context.InputVar("X"); auto x_var = context.InputVar("X");
PADDLE_ENFORCE(x_var != nullptr, PADDLE_ENFORCE_NOT_NULL(x_var, platform::errors::NotFound(
"Cannot get input tensor X, variable name = %s", "Cannot get the tensor from the "
context.InputName("X")); "Variable Input(X), variable name = %s",
context.InputName("X")));
if (CanBeUsedBySelectedRows.count(context.Type())) { if (CanBeUsedBySelectedRows.count(context.Type())) {
*X = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_var); *X = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_var);
} else { } else {
@ -1186,9 +1196,10 @@ inline void ExtractActivationDoubleGradTensor(
framework::Tensor** ddOut) { framework::Tensor** ddOut) {
auto ddx_var = ctx.InputVar("DDX"); auto ddx_var = ctx.InputVar("DDX");
auto ddo_var = ctx.OutputVar("DDOut"); auto ddo_var = ctx.OutputVar("DDOut");
PADDLE_ENFORCE(ddx_var != nullptr, PADDLE_ENFORCE_NOT_NULL(
ddx_var, platform::errors::NotFound(
"Cannot get input Variable Out, variable name = %s", "Cannot get input Variable Out, variable name = %s",
ctx.InputName("DDX")); ctx.InputName("DDX")));
if (CanBeUsedBySelectedRows.count(ctx.Type())) { if (CanBeUsedBySelectedRows.count(ctx.Type())) {
*ddX = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*ddx_var); *ddX = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*ddx_var);
if (ddo_var) { if (ddo_var) {
@ -1201,15 +1212,18 @@ inline void ExtractActivationDoubleGradTensor(
*ddOut = ctx.Output<framework::Tensor>("DDOut"); *ddOut = ctx.Output<framework::Tensor>("DDOut");
} }
} }
PADDLE_ENFORCE(*ddX != nullptr, PADDLE_ENFORCE_NOT_NULL(
"Cannot get output tensor DDX, variable name = %s", *ddX,
ctx.OutputName("DDX")); platform::errors::NotFound(
"Cannot get the tensor from the Variable Output, variable name = %s",
ctx.OutputName("DDX")));
if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) { if (static_cast<int>(kDepValue) & static_cast<int>(kDepX)) {
auto x_var = ctx.InputVar("X"); auto x_var = ctx.InputVar("X");
PADDLE_ENFORCE(x_var != nullptr, PADDLE_ENFORCE_NOT_NULL(
x_var, platform::errors::NotFound(
"Cannot get input Variable Out, variable name = %s", "Cannot get input Variable Out, variable name = %s",
ctx.InputName("X")); ctx.InputName("X")));
auto dx_var = ctx.OutputVar("DX"); auto dx_var = ctx.OutputVar("DX");
if (CanBeUsedBySelectedRows.count(ctx.Type())) { if (CanBeUsedBySelectedRows.count(ctx.Type())) {
*X = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_var); *X = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_var);
@ -1229,9 +1243,11 @@ inline void ExtractActivationDoubleGradTensor(
} }
if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) { if (static_cast<int>(kDepValue) & static_cast<int>(kDepOut)) {
auto out_var = ctx.InputVar("Out"); auto out_var = ctx.InputVar("Out");
PADDLE_ENFORCE(out_var != nullptr, PADDLE_ENFORCE_NOT_NULL(
"Cannot get input tensor Out, variable name = %s", out_var,
ctx.InputName("Out")); platform::errors::NotFound(
"Cannot get the tensor from the Variable Out, variable name = %s",
ctx.InputName("Out")));
auto dout_var = ctx.OutputVar("DOut"); auto dout_var = ctx.OutputVar("DOut");
if (CanBeUsedBySelectedRows.count(ctx.Type())) { if (CanBeUsedBySelectedRows.count(ctx.Type())) {
*Out = *Out =
@ -1438,22 +1454,26 @@ inline void ExtractDoubleGradTensorWithInputDOut(
// extract ddX(output), ddOut(input) // extract ddX(output), ddOut(input)
auto ddx_var = ctx.InputVar("DDX"); auto ddx_var = ctx.InputVar("DDX");
auto ddo_var = ctx.OutputVar("DDOut"); auto ddo_var = ctx.OutputVar("DDOut");
PADDLE_ENFORCE(ddx_var != nullptr, PADDLE_ENFORCE_NOT_NULL(
ddx_var, platform::errors::NotFound(
"Cannot get input Variable Out, variable name = %s", "Cannot get input Variable Out, variable name = %s",
ctx.InputName("DDX")); ctx.InputName("DDX")));
*ddX = ctx.Input<framework::Tensor>("DDX"); *ddX = ctx.Input<framework::Tensor>("DDX");
if (ddo_var) { if (ddo_var) {
*ddOut = ctx.Output<framework::Tensor>("DDOut"); *ddOut = ctx.Output<framework::Tensor>("DDOut");
} }
PADDLE_ENFORCE(*ddX != nullptr, PADDLE_ENFORCE_NOT_NULL(
"Cannot get output tensor DDX, variable name = %s", ddX,
ctx.OutputName("DDX")); platform::errors::NotFound(
"Cannot get the tensor from the Variable DDX, variable name = %s",
ctx.OutputName("DDX")));
// extract x(input), dx(output) // extract x(input), dx(output)
auto x_var = ctx.InputVar("X"); auto x_var = ctx.InputVar("X");
PADDLE_ENFORCE(x_var != nullptr, PADDLE_ENFORCE_NOT_NULL(
x_var, platform::errors::NotFound(
"Cannot get input Variable Out, variable name = %s", "Cannot get input Variable Out, variable name = %s",
ctx.InputName("X")); ctx.InputName("X")));
auto dx_var = ctx.OutputVar("DX"); auto dx_var = ctx.OutputVar("DX");
*X = ctx.Input<framework::Tensor>("X"); *X = ctx.Input<framework::Tensor>("X");
if (dx_var) { if (dx_var) {
@ -1531,22 +1551,25 @@ class SqrtDoubleGradKernel
// extract ddx(input), ddout(output) // extract ddx(input), ddout(output)
auto ddx_var = ctx.InputVar("DDX"); auto ddx_var = ctx.InputVar("DDX");
auto ddo_var = ctx.OutputVar("DDOut"); auto ddo_var = ctx.OutputVar("DDOut");
PADDLE_ENFORCE(ddx_var != nullptr, PADDLE_ENFORCE_NOT_NULL(
ddx_var, platform::errors::NotFound(
"Cannot get input Variable DDX, variable name = %s", "Cannot get input Variable DDX, variable name = %s",
ctx.InputName("DDX")); ctx.InputName("DDX")));
ddX = ctx.Input<framework::Tensor>("DDX"); ddX = ctx.Input<framework::Tensor>("DDX");
if (ddo_var) { if (ddo_var) {
ddOut = ctx.Output<framework::Tensor>("DDOut"); ddOut = ctx.Output<framework::Tensor>("DDOut");
} }
PADDLE_ENFORCE(ddX != nullptr, PADDLE_ENFORCE_NOT_NULL(
ddX, platform::errors::NotFound(
"Cannot get input Variable DDX, variable name = %s", "Cannot get input Variable DDX, variable name = %s",
ctx.InputName("DDX")); ctx.InputName("DDX")));
// extract out(input), dout(output) // extract out(input), dout(output)
auto out_var = ctx.InputVar("Out"); auto out_var = ctx.InputVar("Out");
PADDLE_ENFORCE(out_var != nullptr, PADDLE_ENFORCE_NOT_NULL(
out_var, platform::errors::NotFound(
"Cannot get input Variable Out, variable name = %s", "Cannot get input Variable Out, variable name = %s",
ctx.InputName("Out")); ctx.InputName("Out")));
auto dout_var = ctx.OutputVar("DOut"); auto dout_var = ctx.OutputVar("DOut");
Out = ctx.Input<framework::Tensor>("Out"); Out = ctx.Input<framework::Tensor>("Out");
if (dout_var) { if (dout_var) {
@ -1555,9 +1578,10 @@ class SqrtDoubleGradKernel
// extract dx(input) // extract dx(input)
auto dx_var = ctx.InputVar("DX"); auto dx_var = ctx.InputVar("DX");
PADDLE_ENFORCE(dx_var != nullptr, PADDLE_ENFORCE_NOT_NULL(
dx_var, platform::errors::NotFound(
"Cannot get input Variable DX, variable name = %s", "Cannot get input Variable DX, variable name = %s",
ctx.InputName("DX")); ctx.InputName("DX")));
if (dx_var) { if (dx_var) {
dX = ctx.Input<framework::Tensor>("DX"); dX = ctx.Input<framework::Tensor>("DX");
} }
@ -1608,8 +1632,11 @@ class PowKernel : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
} }
auto factor = auto factor =
std::vector<float>(factor_data, factor_data + factor_tensor->numel()); std::vector<float>(factor_data, factor_data + factor_tensor->numel());
PADDLE_ENFORCE_EQ(factor.size(), 1, PADDLE_ENFORCE_EQ(
"The shape of factor(tensor) MUST BE [1]."); factor.size(), 1,
platform::errors::InvalidArgument(
"The shape of factor(tensor) must be [1] rather than %d",
factor.size()));
for (auto& attr : attrs) { for (auto& attr : attrs) {
*attr.second = factor[0]; *attr.second = factor[0];
} }
@ -1660,8 +1687,11 @@ class PowGradKernel
} }
auto factor = auto factor =
std::vector<float>(factor_data, factor_data + factor_tensor->numel()); std::vector<float>(factor_data, factor_data + factor_tensor->numel());
PADDLE_ENFORCE_EQ(factor.size(), 1, PADDLE_ENFORCE_EQ(
"The shape of factor(tensor) MUST BE [1]."); factor.size(), 1,
platform::errors::InvalidArgument(
"The shape of factor(tensor) must be [1] rather than %d",
factor.size()));
for (auto& attr : attrs) { for (auto& attr : attrs) {
*attr.second = factor[0]; *attr.second = factor[0];
} }

@ -23,10 +23,9 @@ class SignOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "sign");
"Input(X) of SignOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "sign");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SignOp should not be null.");
ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
ctx->ShareLoD("X", /*->*/ "Out"); ctx->ShareLoD("X", /*->*/ "Out");
} }

@ -256,8 +256,15 @@ def generate_activation_fn(op_type):
op = getattr(core.ops, op_type) op = getattr(core.ops, op_type)
return op(x) return op(x)
if op_type not in ["abs", "exp", "square"]:
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
op_type) op_type)
else:
# abs exp square ops support dtype(int32, int64, float16, float32, float64)
check_variable_and_dtype(
x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'],
op_type)
helper = LayerHelper(op_type, **locals()) helper = LayerHelper(op_type, **locals())
output = helper.create_variable_for_type_inference(dtype=x.dtype) output = helper.create_variable_for_type_inference(dtype=x.dtype)

@ -8183,6 +8183,7 @@ def log(x, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.log(x) return core.ops.log(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log")
inputs = {'X': [x]} inputs = {'X': [x]}
helper = LayerHelper('log', **locals()) helper = LayerHelper('log', **locals())
dtype = helper.input_dtype(input_param_name='x') dtype = helper.input_dtype(input_param_name='x')
@ -8938,10 +8939,14 @@ def pow(x, factor=1.0, name=None):
y_2 = fluid.layers.pow(x, factor=factor_tensor) y_2 = fluid.layers.pow(x, factor=factor_tensor)
# y_2 is x^{3.0} # y_2 is x^{3.0}
""" """
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'],
'pow')
helper = LayerHelper('pow', **locals()) helper = LayerHelper('pow', **locals())
inputs = {'X': x} inputs = {'X': x}
attrs = {} attrs = {}
if isinstance(factor, Variable): if isinstance(factor, Variable):
check_variable_and_dtype(factor, 'factor', ['float32'], 'pow')
factor.stop_gradient = True factor.stop_gradient = True
inputs['FactorTensor'] = factor inputs['FactorTensor'] = factor
else: else:

@ -765,6 +765,15 @@ class TestLog(TestActivation):
return return
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
def test_error(self):
in1 = fluid.layers.data(
name="in1", shape=[11, 17], append_batch_size=False, dtype="int32")
in2 = fluid.layers.data(
name="in2", shape=[11, 17], append_batch_size=False, dtype="int64")
self.assertRaises(TypeError, fluid.layers.log, in1)
self.assertRaises(TypeError, fluid.layers.log, in2)
class TestSquare(TestActivation): class TestSquare(TestActivation):
def setUp(self): def setUp(self):
@ -856,6 +865,29 @@ class TestPow_factor_tensor(TestActivation):
assert np.array_equal(res_3, res) assert np.array_equal(res_3, res)
assert np.array_equal(res_6, np.power(input, 3)) assert np.array_equal(res_6, np.power(input, 3))
def test_error(self):
in1 = fluid.layers.data(
name="in1", shape=[11, 17], append_batch_size=False, dtype="int32")
in2 = fluid.layers.data(
name="in2", shape=[11, 17], append_batch_size=False, dtype="int64")
in3 = fluid.layers.data(
name="in3",
shape=[11, 17],
append_batch_size=False,
dtype="float32")
in4 = fluid.layers.data(
name="in4",
shape=[11, 17],
append_batch_size=False,
dtype="float64")
factor_1 = fluid.layers.fill_constant([1], "float64", 3.0)
self.assertRaises(TypeError, fluid.layers.pow, x=in1, factor=factor_1)
self.assertRaises(TypeError, fluid.layers.pow, x=in2, factor=factor_1)
self.assertRaises(TypeError, fluid.layers.pow, x=in3, factor=factor_1)
self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1)
class TestSTanh(TestActivation): class TestSTanh(TestActivation):
def setUp(self): def setUp(self):
@ -1035,6 +1067,39 @@ class TestSwishOpError(unittest.TestCase):
fluid.layers.swish(x_fp16) fluid.layers.swish(x_fp16)
#------------------ Test Error Activation----------------------
def create_test_error_class(op_type):
class TestOpErrors(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
op = getattr(fluid.layers, op_type)
# The input dtype of op_type must be float32, float64.
in1 = fluid.layers.data(
name='input2', shape=[12, 10], dtype="int32")
in2 = fluid.layers.data(
name='input3', shape=[12, 10], dtype="int64")
self.assertRaises(TypeError, op, in1)
self.assertRaises(TypeError, op, in2)
cls_name = "{0}_{1}".format(op_type, "test_errors")
TestOpErrors.__name__ = cls_name
globals()[cls_name] = TestOpErrors
create_test_error_class('acos')
create_test_error_class('asin')
create_test_error_class('atan')
create_test_error_class('ceil')
create_test_error_class('cos')
create_test_error_class('floor')
create_test_error_class('reciprocal')
create_test_error_class('round')
create_test_error_class('rsqrt')
create_test_error_class('sin')
create_test_error_class('sqrt')
create_test_error_class('tanh')
#------------------ Test Cudnn Activation---------------------- #------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3): def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),

@ -45,10 +45,13 @@ class TestSignOpError(unittest.TestCase):
# The input dtype of sign_op must be float16, float32, float64. # The input dtype of sign_op must be float16, float32, float64.
input2 = fluid.layers.data( input2 = fluid.layers.data(
name='input2', shape=[12, 10], dtype="int32") name='input2', shape=[12, 10], dtype="int32")
self.assertRaises(TypeError, fluid.layers.sign, input2)
input3 = fluid.layers.data( input3 = fluid.layers.data(
name='input3', shape=[4], dtype="float16") name='input3', shape=[12, 10], dtype="int64")
fluid.layers.sign(input3) self.assertRaises(TypeError, fluid.layers.sign, input2)
self.assertRaises(TypeError, fluid.layers.sign, input3)
input4 = fluid.layers.data(
name='input4', shape=[4], dtype="float16")
fluid.layers.sign(input4)
if __name__ == "__main__": if __name__ == "__main__":

Loading…
Cancel
Save