Add ImageExpandGrad Function.

cblas_new
hedaoyuan 9 years ago
parent 61aa1098fd
commit 2acb84fe70

@ -44,6 +44,7 @@ enum ColFormat { kCFO = 0, kOCF = 1 };
* input_channels, * input_channels,
* filter_height, * filter_height,
* filter_width] * filter_width]
* TODO(hedaoyuan): Refactor the arguments of the interface with TensorShape.
*/ */
template <ColFormat Format, DeviceType Device, class T> template <ColFormat Format, DeviceType Device, class T>
class Im2ColFunctor { class Im2ColFunctor {

File diff suppressed because it is too large Load Diff

@ -47,6 +47,12 @@ bool BlockExpandLayer::init(const LayerMap& layerMap,
.set("strides", strides) .set("strides", strides)
.set("paddings", paddings) .set("paddings", paddings)
.set("blocks", blocks)); .set("blocks", blocks));
createFunction(backward_,
"ImageExpandGrad",
FuncConfig()
.set("strides", strides)
.set("paddings", paddings)
.set("blocks", blocks));
} }
return true; return true;
@ -126,12 +132,12 @@ void BlockExpandLayer::forward(PassType passType) {
} }
start[batchSize] = batchSize * blockNum; start[batchSize] = batchSize * blockNum;
if (!useGpu_) { if (!useGpu_) {
TensorShape inputShape({batchSize, channels_, imgSizeH_, imgSizeW_}); inputShape_ = TensorShape({batchSize, channels_, imgSizeH_, imgSizeW_});
TensorShape outputShape({batchSize, blockNum, blockSize}); outputShape_ = TensorShape({batchSize, blockNum, blockSize});
BufferArgs inputs; BufferArgs inputs;
BufferArgs outputs; BufferArgs outputs;
inputs.addArg(*getInputValue(0), inputShape); inputs.addArg(*getInputValue(0), inputShape_);
outputs.addArg(*getOutputValue(), outputShape, ASSIGN_TO); outputs.addArg(*getOutputValue(), outputShape_, ASSIGN_TO);
forward_[0]->calc(inputs, outputs); forward_[0]->calc(inputs, outputs);
} }
} }
@ -144,6 +150,8 @@ void BlockExpandLayer::backward(const UpdateCallback& callback) {
if (!preGrad) { if (!preGrad) {
return; return;
} }
if (useGpu_) {
MatrixPtr grad = getOutputGrad(); MatrixPtr grad = getOutputGrad();
MatrixPtr gradTrans = Matrix::create(blockSize, blockNum, false, useGpu_); MatrixPtr gradTrans = Matrix::create(blockSize, blockNum, false, useGpu_);
size_t batchSize = preGrad->getHeight(); size_t batchSize = preGrad->getHeight();
@ -180,6 +188,13 @@ void BlockExpandLayer::backward(const UpdateCallback& callback) {
1.0, 1.0,
1.0); 1.0);
} }
} else {
BufferArgs inputs;
BufferArgs outputs;
inputs.addArg(*getOutputGrad(), outputShape_);
outputs.addArg(*getInputGrad(0), inputShape_, ADD_TO);
backward_[0]->calc(inputs, outputs);
}
} }
} // namespace paddle } // namespace paddle

@ -53,6 +53,9 @@ protected:
/// auxiliary variable, which saves the transposed output value. /// auxiliary variable, which saves the transposed output value.
MatrixPtr outVTrans_; MatrixPtr outVTrans_;
TensorShape inputShape_;
TensorShape outputShape_;
public: public:
explicit BlockExpandLayer(const LayerConfig& config) : Layer(config) {} explicit BlockExpandLayer(const LayerConfig& config) : Layer(config) {}

Loading…
Cancel
Save