parent
495649af57
commit
8b5431d5e1
@ -0,0 +1,185 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "PadOp.h"
|
||||
#include "paddle/math/Vector.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
template <>
|
||||
void Pad<DEVICE_TYPE_CPU>(real* outputs,
|
||||
const real* inputs,
|
||||
const int num,
|
||||
const int inC,
|
||||
const int inH,
|
||||
const int inW,
|
||||
const int padc0,
|
||||
const int padc1,
|
||||
const int padh0,
|
||||
const int padh1,
|
||||
const int padw0,
|
||||
const int padw1) {
|
||||
int outC = inC + padc0 + padc1;
|
||||
int outH = inH + padh0 + padh1;
|
||||
int outW = inW + padw0 + padw1;
|
||||
for (int i = 0; i < num; i++) {
|
||||
for (int c = 0; c < inC; c++) {
|
||||
for (int h = 0; h < inH; h++) {
|
||||
int inoff = ((i * inC + c) * inH + h) * inW;
|
||||
int outoff = ((i * outC + c + padc0) * outH + h + padh0) * outW + padw0;
|
||||
memcpy(outputs + outoff, inputs + inoff, inW * sizeof(real));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void PadGrad<DEVICE_TYPE_CPU>(real* inGrad,
|
||||
const real* outGrad,
|
||||
const int num,
|
||||
const int inC,
|
||||
const int inH,
|
||||
const int inW,
|
||||
const int padc0,
|
||||
const int padc1,
|
||||
const int padh0,
|
||||
const int padh1,
|
||||
const int padw0,
|
||||
const int padw1) {
|
||||
int outC = inC + padc0 + padc1;
|
||||
int outH = inH + padh0 + padh1;
|
||||
int outW = inW + padw0 + padw1;
|
||||
for (int i = 0; i < num; i++) {
|
||||
for (int c = 0; c < inC; c++) {
|
||||
for (int h = 0; h < inH; h++) {
|
||||
int inoff = ((i * inC + c) * inH + h) * inW;
|
||||
int outoff = ((i * outC + c + padc0) * outH + h + padh0) * outW + padw0;
|
||||
CpuVector inG = CpuVector(inW, inGrad + inoff);
|
||||
CpuVector outG = CpuVector(inW, const_cast<real*>(outGrad + outoff));
|
||||
inG += outG;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \param inputs[0] input value.
|
||||
* \param outputs[0] output value.
|
||||
*/
|
||||
template <DeviceType Device>
|
||||
class PadFunc : public FunctionBase {
|
||||
public:
|
||||
void init(const FuncConfig& config) override {
|
||||
padc0_ = config.get<int>("padc0");
|
||||
padc1_ = config.get<int>("padc1");
|
||||
padh0_ = config.get<int>("padh0");
|
||||
padh1_ = config.get<int>("padh1");
|
||||
padw0_ = config.get<int>("padw0");
|
||||
padw1_ = config.get<int>("padw1");
|
||||
}
|
||||
|
||||
void calc(const Arguments& inputs,
|
||||
const Arguments& outputs,
|
||||
const Arguments& inouts) override {
|
||||
CHECK_EQ(1, inputs.size());
|
||||
CHECK_EQ(1, outputs.size());
|
||||
CHECK_EQ(0, inouts.size());
|
||||
|
||||
size_t num = inputs[0].dims_[0];
|
||||
size_t inC = inputs[0].dims_[1];
|
||||
size_t inH = inputs[0].dims_[2];
|
||||
size_t inW = inputs[0].dims_[3];
|
||||
|
||||
Pad<Device>(outputs[0].getData(),
|
||||
inputs[0].getData(),
|
||||
num,
|
||||
inC,
|
||||
inH,
|
||||
inW,
|
||||
padc0_,
|
||||
padc1_,
|
||||
padh0_,
|
||||
padh1_,
|
||||
padw0_,
|
||||
padw1_);
|
||||
}
|
||||
|
||||
private:
|
||||
int padc0_;
|
||||
int padc1_;
|
||||
int padh0_;
|
||||
int padh1_;
|
||||
int padw0_;
|
||||
int padw1_;
|
||||
};
|
||||
|
||||
/**
|
||||
* \param inputs[0] input grad.
|
||||
* \param outputs[0] output grad.
|
||||
*/
|
||||
template <DeviceType Device>
|
||||
class PadGradFunc : public FunctionBase {
|
||||
public:
|
||||
void init(const FuncConfig& config) override {
|
||||
padc0_ = config.get<int>("padc0");
|
||||
padc1_ = config.get<int>("padc1");
|
||||
padh0_ = config.get<int>("padh0");
|
||||
padh1_ = config.get<int>("padh1");
|
||||
padw0_ = config.get<int>("padw0");
|
||||
padw1_ = config.get<int>("padw1");
|
||||
}
|
||||
|
||||
void calc(const Arguments& inputs,
|
||||
const Arguments& outputs,
|
||||
const Arguments& inouts) override {
|
||||
CHECK_EQ(1, inputs.size());
|
||||
CHECK_EQ(0, outputs.size());
|
||||
CHECK_EQ(1, inouts.size());
|
||||
|
||||
size_t n = inouts[0].dims_[0];
|
||||
size_t inC = inouts[0].dims_[1];
|
||||
size_t inH = inouts[0].dims_[2];
|
||||
size_t inW = inouts[0].dims_[3];
|
||||
|
||||
PadGrad<Device>(inouts[0].getData(),
|
||||
inputs[0].getData(),
|
||||
n,
|
||||
inC,
|
||||
inH,
|
||||
inW,
|
||||
padc0_,
|
||||
padc1_,
|
||||
padh0_,
|
||||
padh1_,
|
||||
padw0_,
|
||||
padw1_);
|
||||
}
|
||||
|
||||
private:
|
||||
int padc0_;
|
||||
int padc1_;
|
||||
int padh0_;
|
||||
int padh1_;
|
||||
int padw0_;
|
||||
int padw1_;
|
||||
};
|
||||
|
||||
REGISTER_TYPED_FUNC(Pad, CPU, PadFunc);
|
||||
REGISTER_TYPED_FUNC(PadGrad, CPU, PadGradFunc);
|
||||
#ifndef PADDLE_ONLY_CPU
|
||||
REGISTER_TYPED_FUNC(Pad, GPU, PadFunc);
|
||||
REGISTER_TYPED_FUNC(PadGrad, GPU, PadGradFunc);
|
||||
#endif
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,96 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Function.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
/**
|
||||
* \brief This funtion pads zeros to inputs according to the specify dimension.
|
||||
* The data structure of image data is NCHW.
|
||||
*
|
||||
* \param[out] outputs save results.
|
||||
* \param[in] inputs input data.
|
||||
* \param[in] num batch size of input data.
|
||||
* \param[in] inC channel number of input data.
|
||||
* \param[in] inH height of input data.
|
||||
* \param[in] inH with of input data.
|
||||
* \param[in] padc0 how many values to add before the data in dimension of
|
||||
* channel.
|
||||
* \param[in] padc1 how many values to add after the data in dimension of
|
||||
* channel.
|
||||
* \param[in] padh0 how many values to add before the data in dimension of
|
||||
* height.
|
||||
* \param[in] padh1 how many values to add after the data in dimension of
|
||||
* height.
|
||||
* \param[in] padw0 how many values to add before the data in dimension of
|
||||
* width.
|
||||
* \param[in] padw1 how many values to add after the data in dimension of
|
||||
* width.
|
||||
*
|
||||
*/
|
||||
template <DeviceType Device>
|
||||
void Pad(real* outputs,
|
||||
const real* inputs,
|
||||
const int num,
|
||||
const int inC,
|
||||
const int inH,
|
||||
const int inW,
|
||||
const int padc0,
|
||||
const int padc1,
|
||||
const int padh0,
|
||||
const int padh1,
|
||||
const int padw0,
|
||||
const int padw1);
|
||||
|
||||
/**
|
||||
* \brief Padding operation backward.
|
||||
* The data structure of image data is NCHW.
|
||||
*
|
||||
* \param[out] inGrad gradients of previous layer.
|
||||
* \param[in] outGrad output gradients.
|
||||
* \param[in] num batch size of input data.
|
||||
* \param[in] inC channel number of input data.
|
||||
* \param[in] inH height of input data.
|
||||
* \param[in] inH with of input data.
|
||||
* \param[in] padc0 how many values to add before the data in dimension of
|
||||
* channel.
|
||||
* \param[in] padc1 how many values to add after the data in dimension of
|
||||
* channel.
|
||||
* \param[in] padh0 how many values to add before the data in dimension of
|
||||
* height.
|
||||
* \param[in] padh1 how many values to add after the data in dimension of
|
||||
* height.
|
||||
* \param[in] padw0 how many values to add before the data in dimension of
|
||||
* width.
|
||||
* \param[in] padw1 how many values to add after the data in dimension of
|
||||
* width.
|
||||
*
|
||||
*/
|
||||
template <DeviceType Device>
|
||||
void PadGrad(real* inGrad,
|
||||
const real* outGrad,
|
||||
const int num,
|
||||
const int inC,
|
||||
const int inH,
|
||||
const int inW,
|
||||
const int padc0,
|
||||
const int padc1,
|
||||
const int padh0,
|
||||
const int padh1,
|
||||
const int padw0,
|
||||
const int padw1);
|
||||
} // namespace paddle
|
@ -0,0 +1,102 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "hl_base.h"
|
||||
#include "PadOp.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
__global__ void KePad(real* outputs, const real* inputs,
|
||||
int inC, int inH, int inW,
|
||||
int padc, int padh, int padw,
|
||||
int outC, int outH, int outW, int nthreads) {
|
||||
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
if (idx < nthreads) {
|
||||
const int w = idx % inW;
|
||||
const int h = (idx / inW) % inH;
|
||||
const int c = (idx / inW / inH) % inC;
|
||||
const int n = idx / inW / inH / inC;
|
||||
|
||||
const int off = ((n * outC + c + padc) * outH + h + padh) * outW + padw + w;
|
||||
outputs[off] = inputs[idx];
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void Pad<DEVICE_TYPE_GPU>(real* outputs,
|
||||
const real* inputs,
|
||||
const int num,
|
||||
const int inC,
|
||||
const int inH,
|
||||
const int inW,
|
||||
const int padc0,
|
||||
const int padc1,
|
||||
const int padh0,
|
||||
const int padh1,
|
||||
const int padw0,
|
||||
const int padw1) {
|
||||
size_t nth = num * inC * inH * inW;
|
||||
int blockSize = 1024;
|
||||
int gridSize = (nth + 1024 - 1) / 1024;
|
||||
int outC = inC + padc0 + padc1;
|
||||
int outH = inH + padh0 + padh1;
|
||||
int outW = inW + padw0 + padw1;
|
||||
KePad<<<gridSize, blockSize, 0, STREAM_DEFAULT>>>
|
||||
(outputs, inputs, inC, inH, inW, padc0, padh0, padw0,
|
||||
outC, outH, outW, nth);
|
||||
CHECK_SYNC("Pad");
|
||||
}
|
||||
|
||||
__global__ void KePadDiff(real* inGrad, const real* outGrad,
|
||||
int inC, int inH, int inW,
|
||||
int padc, int padh, int padw,
|
||||
int outC, int outH, int outW, int nthreads) {
|
||||
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
if (idx < nthreads) {
|
||||
const int w = idx % inW;
|
||||
const int h = (idx / inW) % inH;
|
||||
const int c = (idx / inW / inH) % inC;
|
||||
const int n = idx / inW / inH / inC;
|
||||
|
||||
const int off = ((n * outC + c + padc) * outH + h + padh) * outW + padw + w;
|
||||
inGrad[idx] += outGrad[off];
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
void PadGrad<DEVICE_TYPE_GPU>(real* inGrad,
|
||||
const real* outGrad,
|
||||
const int num,
|
||||
const int inC,
|
||||
const int inH,
|
||||
const int inW,
|
||||
const int padc0,
|
||||
const int padc1,
|
||||
const int padh0,
|
||||
const int padh1,
|
||||
const int padw0,
|
||||
const int padw1) {
|
||||
int nth = num * inC * inH * inW;
|
||||
int blockSize = 1024;
|
||||
int gridSize = (nth + 1024 - 1) / 1024;
|
||||
int outC = inC + padc0 + padc1;
|
||||
int outH = inH + padh0 + padh1;
|
||||
int outW = inW + padw0 + padw1;
|
||||
KePadDiff <<<gridSize, blockSize, 0, STREAM_DEFAULT>>>
|
||||
(inGrad, outGrad, inC, inH, inW, padc0, padh0, padw0,
|
||||
outC, outH, outW, nth);
|
||||
CHECK_SYNC("PadGrad");
|
||||
}
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,70 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "FunctionTest.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
TEST(Pad, real) {
|
||||
for (size_t numSamples : {5, 32}) {
|
||||
for (size_t channels : {1, 5, 32}) {
|
||||
for (size_t imgSizeH : {5, 33, 100}) {
|
||||
for (size_t imgSizeW : {5, 32, 96}) {
|
||||
VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
|
||||
<< " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW;
|
||||
|
||||
FunctionCompare compare("Pad",
|
||||
FuncConfig()
|
||||
.set("padc0", 2)
|
||||
.set("padc1", 3)
|
||||
.set("padh0", 1)
|
||||
.set("padh1", 2)
|
||||
.set("padw0", 3)
|
||||
.set("padw1", 2));
|
||||
Dims inDims{numSamples, channels, imgSizeH, imgSizeW};
|
||||
Dims outDims{numSamples, channels + 5, imgSizeH + 3, imgSizeW + 5};
|
||||
compare.cmpWithArg(
|
||||
{Tensor(nullptr, inDims)}, {Tensor(nullptr, outDims)}, {});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TEST(PadGrad, real) {
|
||||
// for (size_t numSamples : {5, 32}) {
|
||||
// for (size_t channels : {1, 5, 32}) {
|
||||
// for (size_t imgSizeH : {5, 33, 100}) {
|
||||
// for (size_t imgSizeW : {5, 32, 96}) {
|
||||
// VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
|
||||
// << " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW;
|
||||
//
|
||||
// FunctionCompare compare("PadGrad",
|
||||
// FuncConfig()
|
||||
// .set("padc0", 2).set("padc1", 3)
|
||||
// .set("padh0", 1).set("padh1", 2)
|
||||
// .set("padw0", 3).set("padw1", 2));
|
||||
// Dims inDims{numSamples, channels, imgSizeH, imgSizeW};
|
||||
// Dims outDims{numSamples, channels + 5, imgSizeH + 3, imgSizeW + 5};
|
||||
// compare.cmpWithArg({Tensor(nullptr, inDims)},
|
||||
// {Tensor(nullptr, outDims)},
|
||||
// {});
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,115 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "PadLayer.h"
|
||||
#include "paddle/utils/Stat.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
REGISTER_LAYER(pad, PadLayer);
|
||||
|
||||
bool PadLayer::init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) {
|
||||
/* Initialize the basic parent class */
|
||||
Layer::init(layerMap, parameterMap);
|
||||
|
||||
auto& pad_conf = config_.inputs(0).pad_conf();
|
||||
auto& img_conf = pad_conf.image_conf();
|
||||
CHECK_EQ(config_.inputs_size(), 1);
|
||||
inDims_.push_back(0);
|
||||
inDims_.push_back(img_conf.channels());
|
||||
inDims_.push_back(img_conf.has_img_size_y() ? img_conf.img_size_y()
|
||||
: img_conf.img_size());
|
||||
inDims_.push_back(img_conf.img_size());
|
||||
|
||||
CHECK_EQ(2UL, pad_conf.pad_c_size());
|
||||
CHECK_EQ(2UL, pad_conf.pad_h_size());
|
||||
CHECK_EQ(2UL, pad_conf.pad_w_size());
|
||||
padc_.push_back(pad_conf.pad_c(0));
|
||||
padc_.push_back(pad_conf.pad_c(1));
|
||||
padh_.push_back(pad_conf.pad_h(0));
|
||||
padh_.push_back(pad_conf.pad_h(1));
|
||||
padw_.push_back(pad_conf.pad_w(0));
|
||||
padw_.push_back(pad_conf.pad_w(1));
|
||||
|
||||
outDims_.resize(4);
|
||||
setOutDims(0);
|
||||
|
||||
createFunction(forward_,
|
||||
"Pad",
|
||||
FuncConfig()
|
||||
.set("padc0", padc_[0])
|
||||
.set("padc1", padc_[1])
|
||||
.set("padh0", padh_[0])
|
||||
.set("padh1", padh_[1])
|
||||
.set("padw0", padw_[0])
|
||||
.set("padw1", padw_[1]));
|
||||
createFunction(backward_,
|
||||
"PadGrad",
|
||||
FuncConfig()
|
||||
.set("padc0", padc_[0])
|
||||
.set("padc1", padc_[1])
|
||||
.set("padh0", padh_[0])
|
||||
.set("padh1", padh_[1])
|
||||
.set("padw0", padw_[0])
|
||||
.set("padw1", padw_[1]));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void PadLayer::setOutDims(int batchSize) {
|
||||
outDims_[0] = batchSize;
|
||||
outDims_[1] = inDims_[1] + padc_[0] + padc_[1];
|
||||
outDims_[2] = inDims_[2] + padh_[0] + padh_[1];
|
||||
outDims_[3] = inDims_[3] + padw_[0] + padw_[1];
|
||||
}
|
||||
|
||||
void PadLayer::setTensorDim(int batchSize) {
|
||||
CHECK_EQ(inputLayers_.size(), 1UL);
|
||||
inDims_[0] = batchSize;
|
||||
int h = inputLayers_[0]->getOutput().getFrameHeight();
|
||||
if (h != 0) inDims_[2];
|
||||
int w = inputLayers_[0]->getOutput().getFrameWidth();
|
||||
if (w != 0) inDims_[3];
|
||||
setOutDims(batchSize);
|
||||
}
|
||||
|
||||
void PadLayer::forward(PassType passType) {
|
||||
Layer::forward(passType);
|
||||
MatrixPtr input = inputLayers_[0]->getOutputValue();
|
||||
size_t batchSize = input->getHeight();
|
||||
setTensorDim(batchSize);
|
||||
int size = outDims_[1] * outDims_[2] * outDims_[3];
|
||||
resetOutput(batchSize, size);
|
||||
MatrixPtr outV = getOutputValue();
|
||||
REGISTER_TIMER_INFO("PadForward", getName().c_str());
|
||||
forward_[0]->calc({Tensor(input->getData(), inDims_)},
|
||||
{Tensor(outV->getData(), outDims_)},
|
||||
{});
|
||||
}
|
||||
|
||||
void PadLayer::backward(const UpdateCallback& callback) {
|
||||
(void)callback;
|
||||
|
||||
MatrixPtr preGrad = inputLayers_[0]->getOutputGrad();
|
||||
if (NULL == preGrad) {
|
||||
return;
|
||||
}
|
||||
MatrixPtr outGrad = getOutputGrad();
|
||||
REGISTER_TIMER_INFO("PadBackward", getName().c_str());
|
||||
backward_[0]->calc({Tensor(outGrad->getData(), outDims_)},
|
||||
{},
|
||||
{Tensor(preGrad->getData(), inDims_)});
|
||||
}
|
||||
} // namespace paddle
|
@ -0,0 +1,45 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Layer.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
/**
|
||||
* @brief response normalization across feature maps
|
||||
* namely normalize in number of size_ channels
|
||||
*/
|
||||
class PadLayer : public Layer {
|
||||
public:
|
||||
explicit PadLayer(const LayerConfig& config) : Layer(config) {}
|
||||
|
||||
~PadLayer() {}
|
||||
|
||||
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
|
||||
void forward(PassType passType);
|
||||
void backward(const UpdateCallback& callback = nullptr);
|
||||
|
||||
protected:
|
||||
void setOutDims(int batchSize);
|
||||
void setTensorDim(int batchSize);
|
||||
|
||||
std::vector<int> padc_;
|
||||
std::vector<int> padh_;
|
||||
std::vector<int> padw_;
|
||||
Dims inDims_;
|
||||
Dims outDims_;
|
||||
};
|
||||
} // namespace paddle
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,21 @@
|
||||
from paddle.trainer_config_helpers import *
|
||||
|
||||
settings(batch_size=1000, learning_rate=1e-5)
|
||||
|
||||
data = data_layer(name='data', size=2304, height=48, width=42)
|
||||
|
||||
conv = img_conv_layer(
|
||||
input=data,
|
||||
filter_size=3,
|
||||
num_channels=1,
|
||||
num_filters=16,
|
||||
padding=1,
|
||||
act=LinearActivation(),
|
||||
bias_attr=True)
|
||||
|
||||
pool = img_pool_layer(
|
||||
input=conv, num_channels=8, pool_size=2, stride=2, pool_type=MaxPooling())
|
||||
|
||||
pad = pad_layer(input=pool, pad_c=[2, 3], pad_h=[1, 2], pad_w=[3, 1])
|
||||
|
||||
outputs(pad)
|
Loading…
Reference in new issue