commit
842d25be9d
@ -0,0 +1,223 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "PadOp.h"
|
||||
#include "paddle/math/Vector.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
template <>
|
||||
void Pad<DEVICE_TYPE_CPU>(real* outputs,
|
||||
const real* inputs,
|
||||
const int num,
|
||||
const int inC,
|
||||
const int inH,
|
||||
const int inW,
|
||||
const PadConf& pad) {
|
||||
int cstart = pad.channelStart, cend = pad.channelEnd;
|
||||
int hstart = pad.heightStart, hend = pad.heightEnd;
|
||||
int wstart = pad.widthStart, wend = pad.widthEnd;
|
||||
int outC = inC + cstart + cend;
|
||||
int outH = inH + hstart + hend;
|
||||
int outW = inW + wstart + wend;
|
||||
for (int i = 0; i < num; i++) {
|
||||
for (int c = 0; c < inC; c++) {
|
||||
for (int h = 0; h < inH; h++) {
|
||||
int inoff = ((i * inC + c) * inH + h) * inW;
|
||||
int outoff =
|
||||
((i * outC + c + cstart) * outH + h + hstart) * outW + wstart;
|
||||
memcpy(outputs + outoff, inputs + inoff, inW * sizeof(real));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void PadGrad<DEVICE_TYPE_CPU>(real* inGrad,
|
||||
const real* outGrad,
|
||||
const int num,
|
||||
const int inC,
|
||||
const int inH,
|
||||
const int inW,
|
||||
const PadConf& pad) {
|
||||
int cstart = pad.channelStart, cend = pad.channelEnd;
|
||||
int hstart = pad.heightStart, hend = pad.heightEnd;
|
||||
int wstart = pad.widthStart, wend = pad.widthEnd;
|
||||
int outC = inC + cstart + cend;
|
||||
int outH = inH + hstart + hend;
|
||||
int outW = inW + wstart + wend;
|
||||
for (int i = 0; i < num; i++) {
|
||||
for (int c = 0; c < inC; c++) {
|
||||
for (int h = 0; h < inH; h++) {
|
||||
int inoff = ((i * inC + c) * inH + h) * inW;
|
||||
int outoff =
|
||||
((i * outC + c + cstart) * outH + h + hstart) * outW + wstart;
|
||||
CpuVector inG = CpuVector(inW, inGrad + inoff);
|
||||
CpuVector outG = CpuVector(inW, const_cast<real*>(outGrad + outoff));
|
||||
inG += outG;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief Padding zeros to input according to the specify dimension.
|
||||
* The struct pad_ contains the padding size in each dimension.
|
||||
* The input and output is a 4D tensor. In PadFunc, we only
|
||||
* pad zeros to the 2nd to 4th dimension.
|
||||
*
|
||||
* Argument in this Function:
|
||||
* \param pad_ A struct object contains the padding size in each dimension.
|
||||
* It has six integers. The channelStart and channelEnd indicate
|
||||
* how many zeros to add before and after the input in channel
|
||||
* dimension. And the heightStart and heightEnd indicate padding
|
||||
* in height dimension. The widthStart and widthEnd indicate the
|
||||
* padding in width dimension.
|
||||
* \param inputs A 4D tensor, only one input.
|
||||
* \param outputs A 4D tensor, the output value after padding.
|
||||
*
|
||||
* For example,
|
||||
* Input(2,2,2,3) = [
|
||||
* [ [[1,2,3], [3,4,5]],
|
||||
* [[2,3,5], [1,6,7]] ],
|
||||
* [ [[4,3,1], [1,8,7]],
|
||||
* [[3,8,9], [2,3,5]] ]
|
||||
* ] # the shape is (1,2,2,3)
|
||||
*
|
||||
* pad_: if channelStart = channelEnd = 1, others are 0.
|
||||
* Output(2,4,2,3) = [
|
||||
* [ [[0,0,0], [0,0,0]],
|
||||
* [[1,2,3], [3,4,5]],
|
||||
* [[2,3,5], [1,6,7]],
|
||||
* [[0,0,0], [0,0,0]] ],
|
||||
* [ [[0,0,0], [0,0,0]],
|
||||
* [[4,3,1], [1,8,7]],
|
||||
* [[3,8,9], [2,3,5]],
|
||||
* [[0,0,0], [0,0,0]] ]
|
||||
* ] # the shape is (2,4,2,3)
|
||||
*
|
||||
* pad_: if widthStart = 1, widthEnd = 2, others are 0.
|
||||
* Output(2,2,2,6) = [
|
||||
* [ [[0,1,2,3,0,0], [0,3,4,5,0,0]],
|
||||
* [[0,2,3,5,0,0], [0,1,6,7,0,0]] ],
|
||||
* [ [[0,4,3,1,0,0], [0,1,8,7,0,0]],
|
||||
* [[0,3,8,9,0,0], [0,2,3,5,0,0]] ],
|
||||
* ] # the shape is (2,2,2,6)
|
||||
*
|
||||
* pad_: if heightStart = 1, heightEnd = 1, others are 0.
|
||||
* Output(2,2,4,3) = [
|
||||
* [ [[0,0,0], [1,2,3], [3,4,5], [0,0,0]],
|
||||
* [[0,0,0], [2,3,5], [1,6,7], [0,0,0]] ],
|
||||
* [ [[0,0,0], [4,3,1], [1,8,7], [0,0,0]],
|
||||
* [[0,0,0], [3,8,9], [2,3,5], [0,0,0]] ],
|
||||
* ] # the shape is (2,2,4,3)
|
||||
*/
|
||||
|
||||
template <DeviceType Device>
|
||||
class PadFunc : public FunctionBase {
|
||||
public:
|
||||
void init(const FuncConfig& config) override {
|
||||
pad_.channelStart = config.get<int>("cstart");
|
||||
pad_.channelEnd = config.get<int>("cend");
|
||||
pad_.heightStart = config.get<int>("hstart");
|
||||
pad_.heightEnd = config.get<int>("hend");
|
||||
pad_.widthStart = config.get<int>("wstart");
|
||||
pad_.widthEnd = config.get<int>("wend");
|
||||
}
|
||||
|
||||
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
|
||||
CHECK_EQ(1UL, inputs.size());
|
||||
CHECK_EQ(1UL, outputs.size());
|
||||
CHECK_EQ(outputs[0].getArgType(), ASSIGN_TO);
|
||||
|
||||
size_t num = inputs[0].shape()[0];
|
||||
size_t inC = inputs[0].shape()[1];
|
||||
size_t inH = inputs[0].shape()[2];
|
||||
size_t inW = inputs[0].shape()[3];
|
||||
typename Tensor<real, Device>::Vector vec(outputs[0].shape().getElements(),
|
||||
outputs[0].data<real>());
|
||||
vec.zero();
|
||||
|
||||
Pad<Device>(outputs[0].data<real>(),
|
||||
inputs[0].data<real>(),
|
||||
num,
|
||||
inC,
|
||||
inH,
|
||||
inW,
|
||||
pad_);
|
||||
}
|
||||
|
||||
private:
|
||||
PadConf pad_;
|
||||
};
|
||||
|
||||
/**
|
||||
* \brief The backward propagation of padding Function. Remove the elements
|
||||
* in the padding positions of forward.
|
||||
*
|
||||
* Argument in this Function:
|
||||
* \param pad_ The same meaning as it in PadFunc.
|
||||
* \param inputs The gradient with respect to the output value of PadFunc.
|
||||
* \param outputs The gradient with respect to the input value of PadFunc.
|
||||
*/
|
||||
|
||||
template <DeviceType Device>
|
||||
class PadGradFunc : public FunctionBase {
|
||||
public:
|
||||
void init(const FuncConfig& config) override {
|
||||
pad_.channelStart = config.get<int>("cstart");
|
||||
pad_.channelEnd = config.get<int>("cend");
|
||||
pad_.heightStart = config.get<int>("hstart");
|
||||
pad_.heightEnd = config.get<int>("hend");
|
||||
pad_.widthStart = config.get<int>("wstart");
|
||||
pad_.widthEnd = config.get<int>("wend");
|
||||
}
|
||||
|
||||
void calc(const BufferArgs& inputs, const BufferArgs& outputs) override {
|
||||
CHECK_EQ(1UL, inputs.size());
|
||||
CHECK_EQ(1UL, outputs.size());
|
||||
|
||||
size_t num = outputs[0].shape()[0];
|
||||
size_t inC = outputs[0].shape()[1];
|
||||
size_t inH = outputs[0].shape()[2];
|
||||
size_t inW = outputs[0].shape()[3];
|
||||
|
||||
if (outputs[0].getArgType() != ADD_TO) {
|
||||
// for unit test
|
||||
typename Tensor<real, Device>::Vector tmp(
|
||||
outputs[0].shape().getElements(), outputs[0].data<real>());
|
||||
tmp.zero();
|
||||
}
|
||||
|
||||
PadGrad<Device>(outputs[0].data<real>(),
|
||||
inputs[0].data<real>(),
|
||||
num,
|
||||
inC,
|
||||
inH,
|
||||
inW,
|
||||
pad_);
|
||||
}
|
||||
|
||||
private:
|
||||
PadConf pad_;
|
||||
};
|
||||
|
||||
REGISTER_TYPED_FUNC(Pad, CPU, PadFunc);
|
||||
REGISTER_TYPED_FUNC(PadGrad, CPU, PadGradFunc);
|
||||
#ifndef PADDLE_ONLY_CPU
|
||||
REGISTER_TYPED_FUNC(Pad, GPU, PadFunc);
|
||||
REGISTER_TYPED_FUNC(PadGrad, GPU, PadGradFunc);
|
||||
#endif
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,79 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Function.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
struct PadConf {
|
||||
/// how many values to add before the data along channel dimension.
|
||||
int channelStart;
|
||||
/// how many values to add after the data along channel dimension.
|
||||
int channelEnd;
|
||||
/// how many values to add before the data along height dimension.
|
||||
int heightStart;
|
||||
/// how many values to add after the data along height dimension.
|
||||
int heightEnd;
|
||||
/// how many values to add before the data along width dimension.
|
||||
int widthStart;
|
||||
/// how many values to add after the data along width dimension.
|
||||
int widthEnd;
|
||||
};
|
||||
|
||||
/**
|
||||
* \brief This funtion pads zeros to inputs according to the specify dimension.
|
||||
* The input and output is a 4D tensor. Padding zeros from the 2nd to
|
||||
* the 4th dimenstion according argument of pad.
|
||||
*
|
||||
* \param[out] outputs save results.
|
||||
* \param[in] inputs input data.
|
||||
* \param[in] num batch size of input data.
|
||||
* \param[in] inC channel number of input data.
|
||||
* \param[in] inH height of input data.
|
||||
* \param[in] inH with of input data.
|
||||
* \param[in] pad the padding config, contains the size along the
|
||||
* specify dimension.
|
||||
*/
|
||||
template <DeviceType Device>
|
||||
void Pad(real* outputs,
|
||||
const real* inputs,
|
||||
const int num,
|
||||
const int inC,
|
||||
const int inH,
|
||||
const int inW,
|
||||
const PadConf& pad);
|
||||
|
||||
/**
|
||||
* \brief Padding operation backward.
|
||||
*
|
||||
* \param[out] inGrad gradients of previous layer.
|
||||
* \param[in] outGrad output gradients.
|
||||
* \param[in] num batch size of input data.
|
||||
* \param[in] inC channel number of input data.
|
||||
* \param[in] inH height of input data.
|
||||
* \param[in] inH with of input data.
|
||||
* \param[in] pad the padding config, contains the size along the
|
||||
* specify dimension.
|
||||
*/
|
||||
template <DeviceType Device>
|
||||
void PadGrad(real* inGrad,
|
||||
const real* outGrad,
|
||||
const int num,
|
||||
const int inC,
|
||||
const int inH,
|
||||
const int inW,
|
||||
const PadConf& pad);
|
||||
} // namespace paddle
|
@ -0,0 +1,98 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "hl_base.h"
|
||||
#include "PadOp.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
__global__ void KePad(real* outputs, const real* inputs,
|
||||
int inC, int inH, int inW,
|
||||
int padc, int padh, int padw,
|
||||
int outC, int outH, int outW, int nthreads) {
|
||||
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
if (idx < nthreads) {
|
||||
const int w = idx % inW;
|
||||
const int h = (idx / inW) % inH;
|
||||
const int c = (idx / inW / inH) % inC;
|
||||
const int n = idx / inW / inH / inC;
|
||||
|
||||
const int off = ((n * outC + c + padc) * outH + h + padh) * outW + padw + w;
|
||||
outputs[off] = inputs[idx];
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void Pad<DEVICE_TYPE_GPU>(real* outputs,
|
||||
const real* inputs,
|
||||
const int num,
|
||||
const int inC,
|
||||
const int inH,
|
||||
const int inW,
|
||||
const PadConf& pad) {
|
||||
size_t nth = num * inC * inH * inW;
|
||||
int blockSize = 1024;
|
||||
int gridSize = (nth + 1024 - 1) / 1024;
|
||||
int cstart = pad.channelStart, cend = pad.channelEnd;
|
||||
int hstart = pad.heightStart, hend = pad.heightEnd;
|
||||
int wstart = pad.widthStart, wend = pad.widthEnd;
|
||||
int outC = inC + cstart + cend;
|
||||
int outH = inH + hstart + hend;
|
||||
int outW = inW + wstart + wend;
|
||||
KePad<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>
|
||||
(outputs, inputs, inC, inH, inW, cstart, hstart, wstart,
|
||||
outC, outH, outW, nth);
|
||||
CHECK_SYNC("Pad");
|
||||
}
|
||||
|
||||
__global__ void KePadDiff(real* inGrad, const real* outGrad,
|
||||
int inC, int inH, int inW,
|
||||
int padc, int padh, int padw,
|
||||
int outC, int outH, int outW, int nthreads) {
|
||||
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
if (idx < nthreads) {
|
||||
const int w = idx % inW;
|
||||
const int h = (idx / inW) % inH;
|
||||
const int c = (idx / inW / inH) % inC;
|
||||
const int n = idx / inW / inH / inC;
|
||||
|
||||
const int off = ((n * outC + c + padc) * outH + h + padh) * outW + padw + w;
|
||||
inGrad[idx] += outGrad[off];
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void PadGrad<DEVICE_TYPE_GPU>(real* inGrad,
|
||||
const real* outGrad,
|
||||
const int num,
|
||||
const int inC,
|
||||
const int inH,
|
||||
const int inW,
|
||||
const PadConf& pad) {
|
||||
int nth = num * inC * inH * inW;
|
||||
int blockSize = 1024;
|
||||
int gridSize = (nth + 1024 - 1) / 1024;
|
||||
int cstart = pad.channelStart, cend = pad.channelEnd;
|
||||
int hstart = pad.heightStart, hend = pad.heightEnd;
|
||||
int wstart = pad.widthStart, wend = pad.widthEnd;
|
||||
int outC = inC + cstart + cend;
|
||||
int outH = inH + hstart + hend;
|
||||
int outW = inW + wstart + wend;
|
||||
KePadDiff <<<gridSize, blockSize, 0, STREAM_DEFAULT>>>
|
||||
(inGrad, outGrad, inC, inH, inW, cstart, hstart, wstart,
|
||||
outC, outH, outW, nth);
|
||||
CHECK_SYNC("PadGrad");
|
||||
}
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,75 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "FunctionTest.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
TEST(Pad, real) {
|
||||
for (size_t numSamples : {5, 32}) {
|
||||
for (size_t channels : {1, 5, 32}) {
|
||||
for (size_t imgSizeH : {5, 33, 100}) {
|
||||
for (size_t imgSizeW : {5, 32, 96}) {
|
||||
VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
|
||||
<< " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW;
|
||||
|
||||
FunctionCompare compare("Pad",
|
||||
FuncConfig()
|
||||
.set("cstart", 2)
|
||||
.set("cend", 3)
|
||||
.set("hstart", 1)
|
||||
.set("hend", 2)
|
||||
.set("wstart", 3)
|
||||
.set("wend", 2));
|
||||
TensorShape inDims{numSamples, channels, imgSizeH, imgSizeW};
|
||||
TensorShape outDims{
|
||||
numSamples, channels + 5, imgSizeH + 3, imgSizeW + 5};
|
||||
compare.addInputs(BufferArg(VALUE_TYPE_FLOAT, inDims));
|
||||
compare.addOutputs(BufferArg(VALUE_TYPE_FLOAT, outDims, ASSIGN_TO));
|
||||
compare.run();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PadGrad, real) {
|
||||
for (size_t numSamples : {5, 32}) {
|
||||
for (size_t channels : {1, 5, 32}) {
|
||||
for (size_t imgSizeH : {5, 33, 100}) {
|
||||
for (size_t imgSizeW : {5, 32, 96}) {
|
||||
VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
|
||||
<< " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW;
|
||||
FunctionCompare compare("PadGrad",
|
||||
FuncConfig()
|
||||
.set("cstart", 2)
|
||||
.set("cend", 3)
|
||||
.set("hstart", 1)
|
||||
.set("hend", 2)
|
||||
.set("wstart", 3)
|
||||
.set("wend", 2));
|
||||
TensorShape inDims{numSamples, channels, imgSizeH, imgSizeW};
|
||||
TensorShape outDims{
|
||||
numSamples, channels + 5, imgSizeH + 3, imgSizeW + 5};
|
||||
compare.addInputs(BufferArg(VALUE_TYPE_FLOAT, outDims));
|
||||
compare.addOutputs(BufferArg(VALUE_TYPE_FLOAT, inDims, ASSIGN_TO));
|
||||
compare.run();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,115 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "PadLayer.h"
|
||||
#include "paddle/utils/Stat.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
REGISTER_LAYER(pad, PadLayer);
|
||||
|
||||
bool PadLayer::init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) {
|
||||
/* Initialize the basic parent class */
|
||||
Layer::init(layerMap, parameterMap);
|
||||
|
||||
auto& pad_conf = config_.inputs(0).pad_conf();
|
||||
auto& img_conf = pad_conf.image_conf();
|
||||
CHECK_EQ(config_.inputs_size(), 1);
|
||||
inDims_ = TensorShape(
|
||||
{0,
|
||||
img_conf.channels(),
|
||||
img_conf.has_img_size_y() ? img_conf.img_size_y() : img_conf.img_size(),
|
||||
img_conf.img_size()});
|
||||
|
||||
CHECK_EQ(2, pad_conf.pad_c_size());
|
||||
CHECK_EQ(2, pad_conf.pad_h_size());
|
||||
CHECK_EQ(2, pad_conf.pad_w_size());
|
||||
padc_.push_back(pad_conf.pad_c(0));
|
||||
padc_.push_back(pad_conf.pad_c(1));
|
||||
padh_.push_back(pad_conf.pad_h(0));
|
||||
padh_.push_back(pad_conf.pad_h(1));
|
||||
padw_.push_back(pad_conf.pad_w(0));
|
||||
padw_.push_back(pad_conf.pad_w(1));
|
||||
|
||||
outDims_ = TensorShape(4);
|
||||
setOutDims(0);
|
||||
|
||||
createFunction(forward_,
|
||||
"Pad",
|
||||
FuncConfig()
|
||||
.set("cstart", padc_[0])
|
||||
.set("cend", padc_[1])
|
||||
.set("hstart", padh_[0])
|
||||
.set("hend", padh_[1])
|
||||
.set("wstart", padw_[0])
|
||||
.set("wend", padw_[1]));
|
||||
createFunction(backward_,
|
||||
"PadGrad",
|
||||
FuncConfig()
|
||||
.set("cstart", padc_[0])
|
||||
.set("cend", padc_[1])
|
||||
.set("hstart", padh_[0])
|
||||
.set("hend", padh_[1])
|
||||
.set("wstart", padw_[0])
|
||||
.set("wend", padw_[1]));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void PadLayer::setOutDims(const size_t batchSize) {
|
||||
outDims_.reshape({batchSize,
|
||||
inDims_[1] + padc_[0] + padc_[1],
|
||||
inDims_[2] + padh_[0] + padh_[1],
|
||||
inDims_[3] + padw_[0] + padw_[1]});
|
||||
}
|
||||
|
||||
void PadLayer::setTensorDim(const size_t batchSize) {
|
||||
CHECK_EQ(static_cast<int>(inputLayers_.size()), 1);
|
||||
inDims_.setDim(0, batchSize);
|
||||
int h = inputLayers_[0]->getOutput().getFrameHeight();
|
||||
if (h != 0) inDims_.setDim(2, h);
|
||||
int w = inputLayers_[0]->getOutput().getFrameWidth();
|
||||
if (w != 0) inDims_.setDim(3, w);
|
||||
setOutDims(batchSize);
|
||||
}
|
||||
|
||||
void PadLayer::forward(PassType passType) {
|
||||
Layer::forward(passType);
|
||||
MatrixPtr input = inputLayers_[0]->getOutputValue();
|
||||
size_t batchSize = input->getHeight();
|
||||
setTensorDim(batchSize);
|
||||
int size = outDims_[1] * outDims_[2] * outDims_[3];
|
||||
resetOutput(batchSize, size);
|
||||
MatrixPtr outV = getOutputValue();
|
||||
REGISTER_TIMER_INFO("PadForward", getName().c_str());
|
||||
|
||||
BufferArgs inputs;
|
||||
BufferArgs outputs;
|
||||
inputs.addArg(*getInputValue(0), inDims_);
|
||||
outputs.addArg(*getOutputValue(), outDims_, ASSIGN_TO);
|
||||
forward_[0]->calc(inputs, outputs);
|
||||
}
|
||||
|
||||
void PadLayer::backward(const UpdateCallback& callback) {
|
||||
(void)callback;
|
||||
REGISTER_TIMER_INFO("PadBackward", getName().c_str());
|
||||
|
||||
BufferArgs inputs;
|
||||
BufferArgs outputs;
|
||||
inputs.addArg(*getOutputGrad(), outDims_);
|
||||
outputs.addArg(*getInputGrad(0), inDims_, ADD_TO);
|
||||
backward_[0]->calc(inputs, outputs);
|
||||
}
|
||||
} // namespace paddle
|
@ -0,0 +1,46 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Layer.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
/**
|
||||
* \brief This layer pads zeros to inputs according to the specify dimension.
|
||||
* The input and output is a 4D tensor. Padding zeros from the 2nd to
|
||||
* the 4th dimenstion according padc_, padh_ and padw_.
|
||||
*/
|
||||
class PadLayer : public Layer {
|
||||
public:
|
||||
explicit PadLayer(const LayerConfig& config) : Layer(config) {}
|
||||
|
||||
~PadLayer() {}
|
||||
|
||||
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
|
||||
void forward(PassType passType);
|
||||
void backward(const UpdateCallback& callback = nullptr);
|
||||
|
||||
protected:
|
||||
void setOutDims(const size_t batchSize);
|
||||
void setTensorDim(const size_t batchSize);
|
||||
|
||||
std::vector<int> padc_;
|
||||
std::vector<int> padh_;
|
||||
std::vector<int> padw_;
|
||||
TensorShape inDims_;
|
||||
TensorShape outDims_;
|
||||
};
|
||||
} // namespace paddle
|
@ -0,0 +1,21 @@
|
||||
from paddle.trainer_config_helpers import *
|
||||
|
||||
settings(batch_size=1000, learning_rate=1e-5)
|
||||
|
||||
data = data_layer(name='data', size=2304, height=48, width=42)
|
||||
|
||||
conv = img_conv_layer(
|
||||
input=data,
|
||||
filter_size=3,
|
||||
num_channels=1,
|
||||
num_filters=16,
|
||||
padding=1,
|
||||
act=LinearActivation(),
|
||||
bias_attr=True)
|
||||
|
||||
pool = img_pool_layer(
|
||||
input=conv, num_channels=8, pool_size=2, stride=2, pool_type=MaxPooling())
|
||||
|
||||
pad = pad_layer(input=pool, pad_c=[2, 3], pad_h=[1, 2], pad_w=[3, 1])
|
||||
|
||||
outputs(pad)
|
Loading…
Reference in new issue