Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into crop_op
commit
e2d75bd364
@ -1,124 +0,0 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "ExpandConvBaseLayer.h"
|
||||
|
||||
#include "paddle/utils/Logging.h"
|
||||
namespace paddle {
|
||||
|
||||
bool ExpandConvBaseLayer::init(const LayerMap &layerMap,
|
||||
const ParameterMap ¶meterMap) {
|
||||
/* Initialize the basic convolutional parent class */
|
||||
ConvBaseLayer::init(layerMap, parameterMap);
|
||||
|
||||
int index = 0;
|
||||
for (auto &inputConfig : config_.inputs()) {
|
||||
const ConvConfig &conf = inputConfig.conv_conf();
|
||||
/* Consistent caffe mode for multiple input */
|
||||
caffeMode_ = conf.caffe_mode();
|
||||
|
||||
// create a new weight
|
||||
size_t height, width;
|
||||
height = filterPixels_[index] * filterChannels_[index];
|
||||
width = (!isDeconv_) ? numFilters_ : channels_[index];
|
||||
CHECK_EQ(parameters_[index]->getSize(), width * height);
|
||||
Weight *w = new Weight(height, width, parameters_[index]);
|
||||
weights_.emplace_back(w);
|
||||
index++;
|
||||
}
|
||||
if (biasParameter_.get()) {
|
||||
if (sharedBiases_) {
|
||||
CHECK_EQ((size_t)numFilters_, biasParameter_->getSize());
|
||||
biases_ =
|
||||
std::unique_ptr<Weight>(new Weight(numFilters_, 1, biasParameter_));
|
||||
} else {
|
||||
biases_ =
|
||||
std::unique_ptr<Weight>(new Weight(getSize(), 1, biasParameter_));
|
||||
}
|
||||
}
|
||||
getOutputSize();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t ExpandConvBaseLayer::getOutputSize() {
|
||||
CHECK_NE(inputLayers_.size(), 0UL);
|
||||
size_t layerSize = ConvBaseLayer::calOutputSize();
|
||||
return layerSize;
|
||||
}
|
||||
|
||||
void ExpandConvBaseLayer::addSharedBias() {
|
||||
size_t mapW = getOutputSize() / numFilters_;
|
||||
size_t mapH = getOutputValue()->getElementCnt() / mapW;
|
||||
MatrixPtr out =
|
||||
Matrix::create(getOutputValue()->getData(), mapH, mapW, false, useGpu_);
|
||||
|
||||
Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_);
|
||||
|
||||
out->transpose(transOutValue_, false); // false means no memory allocation
|
||||
transOutValue_->reshape(transOutValue_->getElementCnt() / numFilters_,
|
||||
numFilters_);
|
||||
|
||||
MatrixPtr bias = Matrix::create(biases_->getW()->getData(),
|
||||
1,
|
||||
biases_->getW()->getElementCnt(),
|
||||
false,
|
||||
useGpu_);
|
||||
transOutValue_->addBias(*bias, 1.0f);
|
||||
|
||||
transOutValue_->reshape(mapW, mapH);
|
||||
transOutValue_->transpose(out, false); // false means no memory allocation
|
||||
|
||||
out->clear();
|
||||
bias->clear();
|
||||
}
|
||||
|
||||
void ExpandConvBaseLayer::addUnsharedBias() {
|
||||
MatrixPtr outValue = getOutputValue();
|
||||
MatrixPtr bias = Matrix::create(biases_->getW()->getData(),
|
||||
1,
|
||||
biases_->getW()->getElementCnt(),
|
||||
false,
|
||||
useGpu_);
|
||||
outValue->addBias(*bias, 1.0f);
|
||||
}
|
||||
|
||||
void ExpandConvBaseLayer::bpropSharedBias(MatrixPtr biases, MatrixPtr v) {
|
||||
size_t mapW = getOutputSize() / numFilters_;
|
||||
size_t mapH = v->getElementCnt() / mapW;
|
||||
MatrixPtr vTmp = Matrix::create(v->getData(), mapH, mapW, false, useGpu_);
|
||||
|
||||
Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_);
|
||||
|
||||
vTmp->transpose(transOutValue_, false); // false means no memory allocation
|
||||
transOutValue_->reshape(transOutValue_->getElementCnt() / numFilters_,
|
||||
numFilters_);
|
||||
biases->collectBias(*transOutValue_, 1.0f);
|
||||
}
|
||||
|
||||
void ExpandConvBaseLayer::bpropBiases(MatrixPtr v) {
|
||||
MatrixPtr biases = Matrix::create(biases_->getWGrad()->getData(),
|
||||
1,
|
||||
biases_->getWGrad()->getElementCnt(),
|
||||
false,
|
||||
useGpu_);
|
||||
if (sharedBiases_) {
|
||||
bpropSharedBias(biases, v);
|
||||
} else {
|
||||
biases->collectBias(*v, 1.0f);
|
||||
}
|
||||
biases->clear();
|
||||
}
|
||||
|
||||
} // namespace paddle
|
@ -1,57 +0,0 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "ConvBaseLayer.h"
|
||||
#include "paddle/math/Matrix.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
/**
|
||||
* @brief A subclass of ConvBaseLayer that is a superclass of both
|
||||
* ExpandConvLayer and ExpandConvTransLayer
|
||||
*/
|
||||
class ExpandConvBaseLayer : public ConvBaseLayer {
|
||||
protected:
|
||||
/// The transpose of output, which is an auxiliary matrix.
|
||||
MatrixPtr transOutValue_;
|
||||
|
||||
public:
|
||||
explicit ExpandConvBaseLayer(const LayerConfig& config)
|
||||
: ConvBaseLayer(config) {}
|
||||
|
||||
~ExpandConvBaseLayer() {}
|
||||
|
||||
bool init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) override;
|
||||
|
||||
size_t getOutputSize();
|
||||
|
||||
/**
|
||||
* Add shared bias.
|
||||
*/
|
||||
void addSharedBias();
|
||||
|
||||
/**
|
||||
* Add unshared bias.
|
||||
*/
|
||||
void addUnsharedBias();
|
||||
|
||||
void bpropSharedBias(MatrixPtr biases, MatrixPtr v);
|
||||
void bpropBiases(MatrixPtr v);
|
||||
};
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,51 @@
|
||||
import unittest
|
||||
import numpy as np
|
||||
from op_test import OpTest
|
||||
|
||||
|
||||
class TestSeqAvgPool1D(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = 'sequence_avg_pool'
|
||||
# one level, batch size is 4
|
||||
x = np.random.uniform(0.1, 1, [11, 23]).astype('float32')
|
||||
lod = [[0, 4, 5, 8, 11]]
|
||||
|
||||
out = np.zeros((4, 23)).astype('float32')
|
||||
for i in range(4):
|
||||
sub_x = x[lod[0][i]:lod[0][i + 1], :]
|
||||
out[i] = sub_x.mean(axis=0)
|
||||
|
||||
self.inputs = {'X': (x, lod)}
|
||||
self.outputs = {'Out': out}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad(self):
|
||||
self.check_grad(["X"], "Out")
|
||||
|
||||
|
||||
class TestSeqAvgPool2D(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = 'sequence_avg_pool'
|
||||
# one level, batch size is 4
|
||||
x = np.random.uniform(0.1, 1, [13, 3, 17]).astype('float32')
|
||||
lod = [[0, 4, 5, 8, 13]]
|
||||
|
||||
out = np.zeros((4, 3, 17)).astype('float32')
|
||||
for i in range(4):
|
||||
sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17))
|
||||
out[i] = np.reshape(sub_x.mean(axis=0), (3, 17))
|
||||
|
||||
self.inputs = {'X': (x, lod)}
|
||||
self.outputs = {'Out': out}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad(self):
|
||||
self.check_grad(["X"], "Out")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in new issue