diff --git a/paddle/fluid/operators/pad2d_op.cc b/paddle/fluid/operators/pad2d_op.cc index a706d05fd7..a9da21f479 100644 --- a/paddle/fluid/operators/pad2d_op.cc +++ b/paddle/fluid/operators/pad2d_op.cc @@ -319,20 +319,46 @@ void Pad2DGradEdgeNHWC(T* d_in_data, const int num, const int channels, } } +static inline void GetPaddings(int* paddings, + const framework::ExecutionContext& context) { + auto* paddings_t = context.Input("Paddings"); + if (paddings_t) { + auto paddings_data = paddings_t->data(); + paddings[0] = paddings_data[0]; + paddings[1] = paddings_data[1]; + paddings[2] = paddings_data[2]; + paddings[3] = paddings_data[3]; + } else { + auto pads = context.Attr>("paddings"); + std::copy(pads.begin(), pads.end(), paddings); + } +} + template class Pad2dCPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto pads = context.Attr>("paddings"); + int pads[4]; + GetPaddings(pads, context); auto mode = context.Attr("mode"); auto data_format = context.Attr("data_format"); T value = context.Attr("pad_value"); + auto* x = context.Input("X"); - auto* out = context.Output("Out"); auto in_dims = x->dims(); - auto out_dims = out->dims(); const T* in_data = x->data(); + + auto* out = context.Output("Out"); + if (data_format == "NCHW") { + out->Resize({in_dims[0], in_dims[1], in_dims[2] + pads[0] + pads[1], + in_dims[3] + pads[2] + pads[3]}); + } else { + out->Resize({in_dims[0], in_dims[1] + pads[0] + pads[1], + in_dims[2] + pads[2] + pads[3], in_dims[3]}); + } + auto out_dims = out->dims(); T* out_data = out->mutable_data(context.GetPlace()); + const int pad_top = pads[0]; const int pad_left = pads[2]; const int num = in_dims[0]; @@ -376,7 +402,8 @@ template class Pad2dGradCPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto pads = context.Attr>("paddings"); + int pads[4]; + GetPaddings(pads, context); auto mode = context.Attr("mode"); auto data_format = context.Attr("data_format"); auto* d_out = context.Input(framework::GradVarName("Out")); @@ -442,21 +469,35 @@ class Pad2dOp : public framework::OperatorWithKernel { "Output(Out) of Pad2dOp should not be null."); auto x_dim = ctx->GetInputDim("X"); - auto paddings = ctx->Attrs().Get>("paddings"); PADDLE_ENFORCE_EQ(x_dim.size(), 4, - "Size of paddings should be equal to 4."); - std::vector out_dims(x_dim.size()); + "The size of input(X)'s dimension should be equal to 4."); + std::vector out_dims(x_dim.size()); auto data_format = ctx->Attrs().Get("data_format"); out_dims[0] = x_dim[0]; - if (data_format == "NCHW") { + if (ctx->HasInput("Paddings")) { + auto paddings_dim = ctx->GetInputDim("Paddings"); + PADDLE_ENFORCE_EQ( + paddings_dim.size(), 1, + "Size of Input(Paddings)'s dimension should be equal to 1."); + PADDLE_ENFORCE_EQ(paddings_dim[0], 4, + "Shape of Input(Paddings) should be equal to [4]."); out_dims[1] = x_dim[1]; - out_dims[2] = x_dim[2] + paddings[0] + paddings[1]; // height - out_dims[3] = x_dim[3] + paddings[2] + paddings[3]; // width - } else { // NHWC + out_dims[2] = x_dim[2]; out_dims[3] = x_dim[3]; - out_dims[1] = x_dim[1] + paddings[0] + paddings[1]; - out_dims[2] = x_dim[2] + paddings[2] + paddings[3]; + } else { + auto paddings = ctx->Attrs().Get>("paddings"); + PADDLE_ENFORCE_EQ(paddings.size(), 4, + "Size of paddings should be equal to 4."); + if (data_format == "NCHW") { + out_dims[1] = x_dim[1]; + out_dims[2] = x_dim[2] + paddings[0] + paddings[1]; // height + out_dims[3] = x_dim[3] + paddings[2] + paddings[3]; // width + } else { // NHWC + out_dims[3] = x_dim[3]; + out_dims[1] = x_dim[1] + paddings[0] + paddings[1]; + out_dims[2] = x_dim[2] + paddings[2] + paddings[3]; + } } ctx->SetOutputDim("Out", framework::make_ddim(out_dims)); @@ -466,6 +507,13 @@ class Pad2dOp : public framework::OperatorWithKernel { ctx->ShareLoD("X", /*->*/ "Out"); } } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace()); + } }; class Pad2dOpMaker : public framework::OpProtoAndCheckerMaker { @@ -477,6 +525,12 @@ class Pad2dOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "The output of pad2d op. " "A tensor with the same shape as X."); + AddInput("Paddings", + "A 1-D tensor to describe the padding rules." + "paddings=[0, 1, 2, 3] means " + "padding 0 row to top, 1 row to bottom, 2 columns to left " + "and 3 columns to right. Size of paddings must be 4.") + .AsDispensable(); AddAttr>( "paddings", "(vector) " @@ -554,6 +608,13 @@ class Pad2dOpGrad : public framework::OperatorWithKernel { ctx->SetOutputDim(x_grad_name, x_dims); } } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace()); + } }; class Pad2dOpGradMaker : public framework::SingleGradOpDescMaker { @@ -564,6 +625,7 @@ class Pad2dOpGradMaker : public framework::SingleGradOpDescMaker { std::unique_ptr Apply() const override { auto* bind = new framework::OpDesc(); bind->SetInput("X", Input("X")); + bind->SetInput("Paddings", Input("Paddings")); bind->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); bind->SetOutput(framework::GradVarName("X"), InputGrad("X")); bind->SetAttrMap(Attrs()); diff --git a/paddle/fluid/operators/pad2d_op.cu b/paddle/fluid/operators/pad2d_op.cu index 9ba0ddbd84..72eca08b06 100644 --- a/paddle/fluid/operators/pad2d_op.cu +++ b/paddle/fluid/operators/pad2d_op.cu @@ -287,20 +287,50 @@ __global__ void Pad2DGradEdgeNHWC(const int out_size, T* d_in_data, } } +static inline void GetPaddings(int* paddings, + const framework::ExecutionContext& context) { + auto* paddings_t = context.Input("Paddings"); + if (paddings_t) { + Tensor pads; + framework::TensorCopySync(*paddings_t, platform::CPUPlace(), &pads); + auto pads_data = pads.data(); + paddings[0] = pads_data[0]; + paddings[1] = pads_data[1]; + paddings[2] = pads_data[2]; + paddings[3] = pads_data[3]; + } else { + auto pads = context.Attr>("paddings"); + std::copy(pads.begin(), pads.end(), paddings); + } +} + template class Pad2dCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto pads = context.Attr>("paddings"); + int pads[4]; + GetPaddings(pads, context); auto mode = context.Attr("mode"); auto data_format = context.Attr("data_format"); T value = context.Attr("pad_value"); + auto* x = context.Input("X"); - auto* out = context.Output("Out"); auto in_dims = x->dims(); - auto out_dims = out->dims(); const T* in_data = x->data(); - T* out_data = out->mutable_data(context.GetPlace()); + auto* out = context.Output("Out"); + auto out_dims = out->dims(); + if (data_format == "NCHW") { + out_dims[0] = in_dims[0]; + out_dims[1] = in_dims[1]; + out_dims[2] = in_dims[2] + pads[0] + pads[1]; + out_dims[3] = in_dims[3] + pads[2] + pads[3]; + } else { + out_dims[0] = in_dims[0]; + out_dims[1] = in_dims[1] + pads[0] + pads[1]; + out_dims[2] = in_dims[2] + pads[2] + pads[3]; + out_dims[3] = in_dims[3]; + } + T* out_data = out->mutable_data(out_dims, context.GetPlace()); const int pad_top = pads[0]; const int pad_left = pads[2]; const int num = in_dims[0]; @@ -356,7 +386,8 @@ template class Pad2dGradCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto pads = context.Attr>("paddings"); + int pads[4]; + GetPaddings(pads, context); auto mode = context.Attr("mode"); auto data_format = context.Attr("data_format"); auto* d_out = context.Input(framework::GradVarName("Out")); diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 0403765121..472daff1ea 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -6924,7 +6924,7 @@ def pad2d(input, Args: input (Variable): The input image with [N, C, H, W] format or [N, H, W, C] format. - paddings (tuple|list): The padding size. If padding is a tuple, it must + paddings (tuple|list|Variable): The padding size. If padding is a tuple, it must contain four integers, (padding_top, padding_bottom, padding_left, padding_right). Default: padding = [0, 0, 0, 0]. mode (str): Three modes: constant(default), reflect, edge. Default: constant @@ -6949,16 +6949,17 @@ def pad2d(input, helper = LayerHelper('pad2d', **locals()) dtype = helper.input_dtype(input_param_name='input') out = helper.create_variable_for_type_inference(dtype) + inputs = {'X': input} + attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format} + + if isinstance(paddings, Variable): + inputs['Paddings'] = paddings + attrs['paddings'] = [] + else: + attrs['paddings'] = paddings + helper.append_op( - type='pad2d', - inputs={'X': input}, - outputs={"Out": out}, - attrs={ - 'paddings': paddings, - 'mode': mode, - 'pad_value': pad_value, - 'data_frmat': data_format - }) + type='pad2d', inputs=inputs, outputs={"Out": out}, attrs=attrs) return out diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 2004c91793..7f477031dd 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -636,13 +636,21 @@ class TestBook(unittest.TestCase): with program_guard(program): input = layers.data( name="input", shape=[3, 100, 100], dtype="float32") + paddings = layers.fill_constant(shape=[4], dtype='int32', value=1) out = layers.pad2d( input, paddings=[1, 2, 3, 4], mode='reflect', data_format='NCHW', name="shape") + out_1 = layers.pad2d( + input, + paddings=paddings, + mode='reflect', + data_format='NCHW', + name="shape") self.assertIsNotNone(out) + self.assertIsNotNone(out_1) print(str(program)) def test_prelu(self): diff --git a/python/paddle/fluid/tests/unittests/test_pad2d_op.py b/python/paddle/fluid/tests/unittests/test_pad2d_op.py index 728b8c181a..5c4a6ca59e 100644 --- a/python/paddle/fluid/tests/unittests/test_pad2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pad2d_op.py @@ -20,11 +20,17 @@ from op_test import OpTest class TestPad2dOp(OpTest): def setUp(self): self.pad_value = 0.0 + self.variable_paddings = False self.initTestCase() self.op_type = "pad2d" self.inputs = {'X': np.random.random(self.shape).astype("float32"), } self.attrs = {} - self.attrs['paddings'] = np.array(self.paddings).flatten() + if self.variable_paddings: + self.attrs['paddings'] = [] + self.inputs['Paddings'] = np.array(self.paddings).flatten().astype( + "int32") + else: + self.attrs['paddings'] = np.array(self.paddings).flatten() self.attrs['pad_value'] = self.pad_value self.attrs['mode'] = self.mode self.attrs['data_format'] = self.data_format @@ -98,5 +104,24 @@ class TestCase5(TestPad2dOp): self.data_format = "NHWC" +class TestCase6(TestPad2dOp): + def initTestCase(self): + self.shape = (2, 4, 4, 2) + self.paddings = [0, 1, 2, 3] + self.mode = "constant" + self.pad_value = 1.2 + self.data_format = "NHWC" + self.variable_paddings = True + + +class TestCase7(TestPad2dOp): + def initTestCase(self): + self.shape = (2, 3, 4, 4) + self.paddings = [0, 1, 2, 3] + self.mode = "reflect" + self.data_format = "NCHW" + self.variable_paddings = True + + if __name__ == '__main__': unittest.main()