diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index f8c97b05bc..aa756af8fa 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -286,7 +286,11 @@ paddle.fluid.layers.increment (ArgSpec(args=['x', 'value', 'in_place'], varargs= paddle.fluid.layers.array_write (ArgSpec(args=['x', 'i', 'array'], varargs=None, keywords=None, defaults=(None,)), ('document', '40b6d15f4c86b2b09df340d7778ad713')) paddle.fluid.layers.create_array (ArgSpec(args=['dtype'], varargs=None, keywords=None, defaults=None), ('document', '2d4f20087080ba5105b55205ad5c5b6a')) paddle.fluid.layers.less_than (ArgSpec(args=['x', 'y', 'force_cpu', 'cond'], varargs=None, keywords=None, defaults=(None, None)), ('document', '067bbc799c66289ca8b8924c26b6673f')) +paddle.fluid.layers.less_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd6b173ae1a149e0bdfe7b8bf69285957')) +paddle.fluid.layers.greater_than (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '2c9bd414caa6c615539018d27001b44c')) +paddle.fluid.layers.greater_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '62c667d24e7b07e166b47a53b61b2ff4')) paddle.fluid.layers.equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '80c29b1dc64718f0116de90d1ac88a77')) +paddle.fluid.layers.not_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '56148fb1024687a08e96af79bdc5c929')) paddle.fluid.layers.array_read (ArgSpec(args=['array', 'i'], varargs=None, keywords=None, defaults=None), ('document', 'dd68bead34dfbaf6b0a163fc1cc3c385')) paddle.fluid.layers.array_length (ArgSpec(args=['array'], varargs=None, keywords=None, defaults=None), ('document', 'ffb8b9578ec66db565b223d313aa82a2')) paddle.fluid.layers.IfElse.__init__ (ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) diff --git a/paddle/fluid/operators/affine_channel_op.cc b/paddle/fluid/operators/affine_channel_op.cc index 268a5b894a..27370a3c29 100644 --- a/paddle/fluid/operators/affine_channel_op.cc +++ b/paddle/fluid/operators/affine_channel_op.cc @@ -79,9 +79,13 @@ class AffineChannelOp : public framework::OperatorWithKernel { : x_dims[x_dims.size() - 1]); PADDLE_ENFORCE_EQ(scale_dims.size(), 1UL); - PADDLE_ENFORCE_EQ(scale_dims[0], C); PADDLE_ENFORCE_EQ(b_dims.size(), 1UL); - PADDLE_ENFORCE_EQ(b_dims[0], C); + if (ctx->IsRuntime() || scale_dims[0] > 0) { + PADDLE_ENFORCE_EQ(scale_dims[0], C); + } + if (ctx->IsRuntime() || b_dims[0] > 0) { + PADDLE_ENFORCE_EQ(b_dims[0], C); + } ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->ShareLoD("X", "Out"); diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index 494d26f58f..0cc3e1f2b8 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -65,11 +65,22 @@ void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const { (data_layout == DataLayout::kNCHW ? x_dims[1] : x_dims[x_dims.size() - 1]); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], C); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], C); + auto scale_dim = ctx->GetInputDim("Scale"); + auto bias_dim = ctx->GetInputDim("Bias"); + PADDLE_ENFORCE_EQ(scale_dim.size(), 1UL); + PADDLE_ENFORCE_EQ(scale_dim.size(), 1UL); + + bool check = true; + if ((!ctx->IsRuntime()) && (framework::product(scale_dim) <= 0 || + framework::product(bias_dim) <= 0)) { + check = false; + } + + if (check) { + PADDLE_ENFORCE_EQ(scale_dim[0], C); + PADDLE_ENFORCE_EQ(scale_dim[0], C); + } ctx->SetOutputDim("Y", x_dims); ctx->SetOutputDim("MeanOut", {C}); ctx->SetOutputDim("VarianceOut", {C}); diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 619e12e6ba..e1281602bf 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -68,9 +68,14 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { std::vector output_shape({in_dims[0], filter_dims[0]}); for (size_t i = 0; i < strides.size(); ++i) { - output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], - dilations[i], paddings[i], - strides[i])); + if ((!ctx->IsRuntime()) && + (in_dims[i + 2] <= 0 || filter_dims[i + 2] <= 0)) { + output_shape.push_back(-1); + } else { + output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], + dilations[i], paddings[i], + strides[i])); + } } ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); ctx->ShareLoD("Input", "Output"); diff --git a/paddle/fluid/operators/cos_sim_op.cc b/paddle/fluid/operators/cos_sim_op.cc index 30ec74d844..93304ec670 100644 --- a/paddle/fluid/operators/cos_sim_op.cc +++ b/paddle/fluid/operators/cos_sim_op.cc @@ -40,17 +40,27 @@ class CosSimOp : public framework::OperatorWithKernel { auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(x_dims.size(), y_dims.size(), - "Ranks of Input(X) and Input(Y) must be equal."); - PADDLE_ENFORCE_GE(x_dims.size(), 2, - "Rank of Input(X) must not be less than 2."); - PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 1, x_dims.size()), - framework::slice_ddim(y_dims, 1, y_dims.size()), - "All dimensions except the 1st of Input(X) and Input(Y) " - "must be equal."); - PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1, - "The 1st dimension of Input(Y) must be equal to Input(X) or" - " just 1 (which will be broadcasted to match Input(X))."); + bool check = true; + if ((!ctx->IsRuntime()) && + (framework::product(x_dims) <= 0 || framework::product(y_dims) <= 0)) { + check = false; + } + + if (check) { + PADDLE_ENFORCE_EQ(x_dims.size(), y_dims.size(), + "Ranks of Input(X) and Input(Y) must be equal."); + PADDLE_ENFORCE_GE(x_dims.size(), 2, + "Rank of Input(X) must not be less than 2."); + PADDLE_ENFORCE_EQ( + framework::slice_ddim(x_dims, 1, x_dims.size()), + framework::slice_ddim(y_dims, 1, y_dims.size()), + "All dimensions except the 1st of Input(X) and Input(Y) " + "must be equal."); + PADDLE_ENFORCE( + x_dims[0] == y_dims[0] || y_dims[0] == 1, + "The 1st dimension of Input(Y) must be equal to Input(X) or" + " just 1 (which will be broadcasted to match Input(X))."); + } // resize tensor ctx->SetOutputDim("Out", {x_dims[0], 1}); diff --git a/paddle/fluid/operators/detection_map_op.cc b/paddle/fluid/operators/detection_map_op.cc index e1d113f854..554e50725f 100644 --- a/paddle/fluid/operators/detection_map_op.cc +++ b/paddle/fluid/operators/detection_map_op.cc @@ -51,8 +51,10 @@ class DetectionMAPOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(label_dims.size(), 2, "The rank of Input(Label) must be 2, " "the shape is [N, 6]."); - PADDLE_ENFORCE(label_dims[1] == 6 || label_dims[1] == 5, - "The shape of Input(Label) is [N, 6] or [N, 5]."); + if (ctx->IsRuntime() || label_dims[1] > 0) { + PADDLE_ENFORCE(label_dims[1] == 6 || label_dims[1] == 5, + "The shape of Input(Label) is [N, 6] or [N, 5]."); + } if (ctx->HasInput("PosCount")) { PADDLE_ENFORCE(ctx->HasInput("TruePos"), diff --git a/paddle/fluid/operators/row_conv_op.cc b/paddle/fluid/operators/row_conv_op.cc index 81aabdd006..7e9611679b 100644 --- a/paddle/fluid/operators/row_conv_op.cc +++ b/paddle/fluid/operators/row_conv_op.cc @@ -45,9 +45,12 @@ class RowConvOp : public framework::OperatorWithKernel { auto filter_dims = ctx->GetInputDim("Filter"); PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); PADDLE_ENFORCE_EQ(filter_dims.size(), 2, "Input(Y)'s rank should be 2."); - PADDLE_ENFORCE_EQ( - x_dims[1], filter_dims[1], - "The 2nd dimension of Input(X) and Input(Filter) should be same."); + if (ctx->IsRuntime() || (x_dims[1] > 0 && filter_dims[1] > 0)) { + PADDLE_ENFORCE_EQ( + x_dims[1], filter_dims[1], + "The 2nd dimension of Input(X) and Input(Filter) should be same."); + } + ctx->SetOutputDim("Out", x_dims); ctx->ShareLoD("X", "Out"); } diff --git a/paddle/fluid/operators/scatter_op.cc b/paddle/fluid/operators/scatter_op.cc index 8e0e3bd605..68ad223b3c 100644 --- a/paddle/fluid/operators/scatter_op.cc +++ b/paddle/fluid/operators/scatter_op.cc @@ -42,10 +42,6 @@ class ScatterOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(ctx->GetInputDim("Updates")[0], ctx->GetInputDim("Ids")[0], "Updates and Ids should have same batch-size."); - framework::DDim data_dim(updates_dims); - for (int i = 1; i < data_dim.size(); ++i) { - PADDLE_ENFORCE_EQ(data_dim[i], updates_dims[i]); - } ctx->SetOutputDim("Out", ref_dims); } diff --git a/paddle/fluid/operators/unpool_op.cc b/paddle/fluid/operators/unpool_op.cc index 11e505d6df..86b4c06a27 100644 --- a/paddle/fluid/operators/unpool_op.cc +++ b/paddle/fluid/operators/unpool_op.cc @@ -99,10 +99,15 @@ class UnpoolOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(in_x_dims.size() == 4, "Unpooling intput must be of 4-dimensional."); PADDLE_ENFORCE_EQ(in_x_dims, in_y_dims); + std::vector output_shape({in_x_dims[0], in_x_dims[1]}); for (size_t i = 0; i < ksize.size(); ++i) { - output_shape.push_back(UnpoolOutputSize(in_x_dims[i + 2], ksize[i], - paddings[i], strides[i])); + if (!ctx->IsRuntime() && in_x_dims[i + 2] <= 0) { + output_shape.push_back(-1); + } else { + output_shape.push_back(UnpoolOutputSize(in_x_dims[i + 2], ksize[i], + paddings[i], strides[i])); + } } ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); } diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 7bb7134931..ccfcb13db6 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -446,7 +446,8 @@ function assert_api_spec_approvals() { BRANCH="develop" fi - API_FILES=("paddle/fluid/API.spec" + API_FILES=("CMakeLists.txt" + "paddle/fluid/API.spec" "paddle/fluid/op_use_default_grad_op_maker.spec" "python/paddle/fluid/parallel_executor.py" "paddle/fluid/framework/operator.h" @@ -469,24 +470,29 @@ function assert_api_spec_approvals() { echo "checking ${API_FILE} change, PR: ${GIT_PR_ID}, changes: ${API_CHANGE}" if [ ${API_CHANGE} ] && [ "${GIT_PR_ID}" != "" ]; then # NOTE: per_page=10000 should be ok for all cases, a PR review > 10000 is not human readable. - # approval_user_list: velconia 1979255,panyx0718 2887803,XiaoguangHu01 46782768,chengduoZH 30176695,Xreki 12538138,luotao1 6836917,sneaxiy 32832641,tensor-tang 21351065,jacquesqiao 3048612,typhoonzero 13348433,shanyi15 35982308. + # approval_user_list: velconia 1979255,XiaoguangHu01 46782768,chengduoZH 30176695,Xreki 12538138,luotao1 6836917,sneaxiy 32832641,tensor-tang 21351065,jacquesqiao 3048612,typhoonzero 13348433,shanyi15 35982308. if [ "$API_FILE" == "paddle/fluid/API.spec" ];then APPROVALS=`curl -H "Authorization: token ${GITHUB_API_TOKEN}" https://api.github.com/repos/PaddlePaddle/Paddle/pulls/${GIT_PR_ID}/reviews?per_page=10000 | \ - python ${PADDLE_ROOT}/tools/check_pr_approval.py 2 2887803 35982308 46782768 30176695` + python ${PADDLE_ROOT}/tools/check_pr_approval.py 2 35982308 46782768 30176695` if [ "${APPROVALS}" == "TRUE" ];then APPROVALS=`curl -H "Authorization: token ${GITHUB_API_TOKEN}" https://api.github.com/repos/PaddlePaddle/Paddle/pulls/${GIT_PR_ID}/reviews?per_page=10000 | \ python ${PADDLE_ROOT}/tools/check_pr_approval.py 1 35982308` fi + elif [ "$API_FILE" == "CMakeLists.txt" ];then + APPROVALS=`curl -H "Authorization: token ${GITHUB_API_TOKEN}" https://api.github.com/repos/PaddlePaddle/Paddle/pulls/${GIT_PR_ID}/reviews?per_page=10000 | \ + python ${PADDLE_ROOT}/tools/check_pr_approval.py 1 6836917 46782768 30176695` else APPROVALS=`curl -H "Authorization: token ${GITHUB_API_TOKEN}" https://api.github.com/repos/PaddlePaddle/Paddle/pulls/${GIT_PR_ID}/reviews?per_page=10000 | \ - python ${PADDLE_ROOT}/tools/check_pr_approval.py 1 2887803 1979255 21351065 3048612 13348433 46782768 30176695 12538138 6836917 32832641` + python ${PADDLE_ROOT}/tools/check_pr_approval.py 1 1979255 21351065 3048612 13348433 46782768 30176695 12538138 6836917 32832641` fi echo "current pr ${GIT_PR_ID} got approvals: ${APPROVALS}" if [ "${APPROVALS}" == "FALSE" ]; then if [ "$API_FILE" == "paddle/fluid/API.spec" ];then - echo "You must have one RD (panyx0718 or chengduoZH or XiaoguangHu01) and one PM (shanyi15) approval for the api change! ${API_FILE}" + echo "You must have one RD (chengduoZH or XiaoguangHu01) and one PM (shanyi15) approval for the api change! ${API_FILE}" + elif [ "$API_FILE" == "CMakeLists.txt" ];then + echo "You must have one RD (luotao1 or chengduoZH or XiaoguangHu01) approval for the cmakelist change! ${API_FILE}" else - echo "You must have one RD (velconia,panyx0718,XiaoguangHu01,chengduoZH,Xreki,luotao1,sneaxiy,tensor-tang,jacquesqiao,typhoonzero) approval for the api change! ${API_FILE}" + echo "You must have one RD (velconia,XiaoguangHu01,chengduoZH,Xreki,luotao1,sneaxiy,tensor-tang,jacquesqiao,typhoonzero) approval for the api change! ${API_FILE}" fi exit 1 fi @@ -496,10 +502,10 @@ function assert_api_spec_approvals() { HAS_CONST_CAST=`git diff -U0 upstream/$BRANCH |grep -o -m 1 "const_cast" || true` if [ ${HAS_CONST_CAST} ] && [ "${GIT_PR_ID}" != "" ]; then APPROVALS=`curl -H "Authorization: token ${GITHUB_API_TOKEN}" https://api.github.com/repos/PaddlePaddle/Paddle/pulls/${GIT_PR_ID}/reviews?per_page=10000 | \ - python ${PADDLE_ROOT}/tools/check_pr_approval.py 1 2887803 1979255 21351065 3048612 13348433 46782768 30176695 12538138 6836917 32832641` + python ${PADDLE_ROOT}/tools/check_pr_approval.py 1 1979255 21351065 3048612 13348433 46782768 30176695 12538138 6836917 32832641` echo "current pr ${GIT_PR_ID} got approvals: ${APPROVALS}" if [ "${APPROVALS}" == "FALSE" ]; then - echo "You must have one RD (velconia,panyx0718,XiaoguangHu01,chengduoZH,Xreki,luotao1,sneaxiy,tensor-tang,jacquesqiao,typhoonzero) approval for the api change! ${API_FILE}" + echo "You must have one RD (velconia,XiaoguangHu01,chengduoZH,Xreki,luotao1,sneaxiy,tensor-tang,jacquesqiao,typhoonzero) approval for the api change! ${API_FILE}" exit 1 fi fi diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index f8f461853f..2df63d723e 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -29,7 +29,8 @@ from functools import reduce __all__ = [ 'While', 'Switch', 'increment', 'array_write', 'create_array', 'less_than', - 'equal', 'array_read', 'array_length', 'IfElse', 'DynamicRNN', 'StaticRNN', + 'less_equal', 'greater_than', 'greater_equal', 'equal', 'not_equal', + 'array_read', 'array_length', 'IfElse', 'DynamicRNN', 'StaticRNN', 'reorder_lod_tensor_by_rank', 'Print', 'is_empty' ] @@ -189,6 +190,7 @@ def Print(input, 'print_tensor_lod': print_tensor_lod, 'print_phase': print_phase.upper() }) + return input class BlockGuard(object): @@ -971,6 +973,114 @@ def less_than(x, y, force_cpu=None, cond=None): return cond +@templatedoc() +def less_equal(x, y, cond=None): + """ + This layer returns the truth value of :math:`x <= y` elementwise, which is equivalent to the overloaded operator `<=`. + + Args: + x(Variable): First operand of *less_equal* + y(Variable): Second operand of *less_equal* + cond(Variable|None): Optional output variable to store the result of *less_equal* + + Returns: + Variable: The tensor variable storing the output of *less_equal*. + + Examples: + .. code-block:: python + + out = fluid.layers.less_equal(x=label, y=limit) + """ + helper = LayerHelper("less_equal", **locals()) + if cond is None: + cond = helper.create_variable_for_type_inference(dtype='bool') + cond.stop_gradient = True + + attrs = dict() + if force_init_on_cpu(): + attrs['force_cpu'] = force_init_on_cpu() + + helper.append_op( + type='less_equal', + inputs={'X': [x], + 'Y': [y]}, + outputs={'Out': [cond]}, + attrs=attrs) + return cond + + +@templatedoc() +def greater_than(x, y, cond=None): + """ + This layer returns the truth value of :math:`x > y` elementwise, which is equivalent to the overloaded operator `>`. + + Args: + x(Variable): First operand of *greater_than* + y(Variable): Second operand of *greater_than* + cond(Variable|None): Optional output variable to store the result of *greater_than* + + Returns: + Variable: The tensor variable storing the output of *greater_than*. + + Examples: + .. code-block:: python + + out = fluid.layers.greater_than(x=label, y=limit) + """ + helper = LayerHelper("greater_than", **locals()) + if cond is None: + cond = helper.create_variable_for_type_inference(dtype='bool') + cond.stop_gradient = True + + attrs = dict() + if force_init_on_cpu(): + attrs['force_cpu'] = force_init_on_cpu() + + helper.append_op( + type='greater_than', + inputs={'X': [x], + 'Y': [y]}, + outputs={'Out': [cond]}, + attrs=attrs) + return cond + + +@templatedoc() +def greater_equal(x, y, cond=None): + """ + This layer returns the truth value of :math:`x >= y` elementwise, which is equivalent to the overloaded operator `>=`. + + Args: + x(Variable): First operand of *greater_equal* + y(Variable): Second operand of *greater_equal* + cond(Variable|None): Optional output variable to store the result of *greater_equal* + + Returns: + Variable: The tensor variable storing the output of *greater_equal*. + + Examples: + .. code-block:: python + + out = fluid.layers.greater_equal(x=label, y=limit) + """ + helper = LayerHelper("greater_equal", **locals()) + if cond is None: + cond = helper.create_variable_for_type_inference(dtype='bool') + cond.stop_gradient = True + + attrs = dict() + if force_init_on_cpu(): + attrs['force_cpu'] = force_init_on_cpu() + + helper.append_op( + type='greater_equal', + inputs={'X': [x], + 'Y': [y]}, + outputs={'Out': [cond]}, + attrs=attrs) + return cond + + def equal(x, y, cond=None): """ This layer returns the truth value of :math:`x == y` elementwise. @@ -999,6 +1109,34 @@ def equal(x, y, cond=None): return cond +def not_equal(x, y, cond=None): + """ + This layer returns the truth value of :math:`x != y` elementwise, which is equivalent to the overloader operator `!=`. + + Args: + x(Variable): First operand of *not_equal* + y(Variable): Second operand of *not_equal* + cond(Variable|None): Optional output variable to store the result of *not_equal* + + Returns: + Variable: The tensor variable storing the output of *not_equal*. + + Examples: + .. code-block:: python + + out = fluid.layers.not_equal(x=label, y=limit) + """ + helper = LayerHelper("not_equal", **locals()) + if cond is None: + cond = helper.create_variable_for_type_inference(dtype='bool') + cond.stop_gradient = True + + helper.append_op( + type='not_equal', inputs={'X': [x], + 'Y': [y]}, outputs={'Out': [cond]}) + return cond + + def array_read(array, i): """ This function performs the operation to read the data in as an