|
|
|
@ -13,53 +13,13 @@ See the License for the specific language governing permissions and
|
|
|
|
|
limitations under the License. */
|
|
|
|
|
|
|
|
|
|
#include "Layer.h"
|
|
|
|
|
#include "NormLayer.h"
|
|
|
|
|
#include "paddle/math/BaseMatrix.h"
|
|
|
|
|
#include "paddle/math/Matrix.h"
|
|
|
|
|
|
|
|
|
|
namespace paddle {
|
|
|
|
|
/**
|
|
|
|
|
* This layer applys normalize across the channels of each sample to a
|
|
|
|
|
* conv layer's output and scale the output by a group of trainable factors
|
|
|
|
|
* which dimensions equal to the channel's number.
|
|
|
|
|
* - Input: One and only one input layer are accepted. The input layer must be
|
|
|
|
|
* be a data output layer.
|
|
|
|
|
* - Output: The normalized data of the input data.
|
|
|
|
|
* Reference:
|
|
|
|
|
* Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
|
|
|
|
|
* Cheng-Yang Fu, Alexander C. Berg. SSD: Single Shot MultiBox Detector
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
class NormalizeLayer : public Layer {
|
|
|
|
|
public:
|
|
|
|
|
explicit NormalizeLayer(const LayerConfig& config) : Layer(config) {}
|
|
|
|
|
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
|
|
|
|
|
|
|
|
|
|
void forward(PassType passType);
|
|
|
|
|
void backward(const UpdateCallback& callback);
|
|
|
|
|
|
|
|
|
|
protected:
|
|
|
|
|
size_t channels_;
|
|
|
|
|
std::unique_ptr<Weight> scale_;
|
|
|
|
|
MatrixPtr scaleDiff_;
|
|
|
|
|
MatrixPtr normBuffer_;
|
|
|
|
|
MatrixPtr dataBuffer_;
|
|
|
|
|
MatrixPtr channelBuffer_;
|
|
|
|
|
MatrixPtr spatialBuffer_;
|
|
|
|
|
MatrixPtr sampleBuffer_;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
REGISTER_LAYER(normalize, NormalizeLayer);
|
|
|
|
|
|
|
|
|
|
bool NormalizeLayer::init(const LayerMap& layerMap,
|
|
|
|
|
const ParameterMap& parameterMap) {
|
|
|
|
|
Layer::init(layerMap, parameterMap);
|
|
|
|
|
CHECK(parameters_[0]);
|
|
|
|
|
channels_ = config_.num_filters();
|
|
|
|
|
scale_.reset(new Weight(channels_, 1, parameters_[0]));
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void NormalizeLayer::forward(PassType passType) {
|
|
|
|
|
void CrossChannelNormLayer::forward(PassType passType) {
|
|
|
|
|
Layer::forward(passType);
|
|
|
|
|
auto in = getInput(0);
|
|
|
|
|
MatrixPtr inV = getInputValue(0);
|
|
|
|
@ -74,16 +34,12 @@ void NormalizeLayer::forward(PassType passType) {
|
|
|
|
|
|
|
|
|
|
Matrix::resizeOrCreate(dataBuffer_, batchSize, dataDim, false, useGpu_);
|
|
|
|
|
Matrix::resizeOrCreate(spatialBuffer_, 1, spatialDim, false, useGpu_);
|
|
|
|
|
Matrix::resizeOrCreate(channelBuffer_, channels_, 1, false, useGpu_);
|
|
|
|
|
Matrix::resizeOrCreate(sampleBuffer_, channels_, spatialDim, false, useGpu_);
|
|
|
|
|
Matrix::resizeOrCreate(normBuffer_, batchSize, spatialDim, false, useGpu_);
|
|
|
|
|
normBuffer_->zeroMem();
|
|
|
|
|
spatialBuffer_->zeroMem();
|
|
|
|
|
sampleBuffer_->zeroMem();
|
|
|
|
|
dataBuffer_->zeroMem();
|
|
|
|
|
// add eps to avoid overflow
|
|
|
|
|
normBuffer_->addScalar(*normBuffer_, 1e-6);
|
|
|
|
|
channelBuffer_->resetOne();
|
|
|
|
|
inV->square2(*dataBuffer_);
|
|
|
|
|
for (size_t i = 0; i < batchSize; i++) {
|
|
|
|
|
spatialBuffer_->zeroMem();
|
|
|
|
@ -102,18 +58,14 @@ void NormalizeLayer::forward(PassType passType) {
|
|
|
|
|
spatialBuffer_->sumCols(*dataTmp, 1, 1);
|
|
|
|
|
spatialBuffer_->sqrt2(*spatialBuffer_);
|
|
|
|
|
normTmp->copyFrom(*spatialBuffer_);
|
|
|
|
|
sampleBuffer_->mul(*channelBuffer_, *spatialBuffer_, 1., 0.);
|
|
|
|
|
sampleBuffer_->dotDiv(*inTmp, *sampleBuffer_);
|
|
|
|
|
outTmp->copyFrom(*sampleBuffer_);
|
|
|
|
|
|
|
|
|
|
outTmp->copyFrom(*inTmp);
|
|
|
|
|
outTmp->divRowVector(*spatialBuffer_);
|
|
|
|
|
// scale the layer.
|
|
|
|
|
spatialBuffer_->resetOne();
|
|
|
|
|
sampleBuffer_->mul(*scale_->getW(), *spatialBuffer_, 1., 0.);
|
|
|
|
|
outTmp->dotMul(*outTmp, *sampleBuffer_);
|
|
|
|
|
outTmp->mulColVector(*scale_->getW());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void NormalizeLayer::backward(const UpdateCallback& callback) {
|
|
|
|
|
void CrossChannelNormLayer::backward(const UpdateCallback& callback) {
|
|
|
|
|
MatrixPtr inG = getInputGrad(0);
|
|
|
|
|
MatrixPtr inV = getInputValue(0);
|
|
|
|
|
MatrixPtr outG = getOutputGrad();
|
|
|
|
@ -124,9 +76,10 @@ void NormalizeLayer::backward(const UpdateCallback& callback) {
|
|
|
|
|
size_t dataDim = inG->getWidth();
|
|
|
|
|
size_t spatialDim = dataDim / channels_;
|
|
|
|
|
|
|
|
|
|
bool syncFlag = hl_get_sync_flag();
|
|
|
|
|
dataBuffer_->dotMul(*outG, *outV);
|
|
|
|
|
Matrix::resizeOrCreate(scaleDiff_, channels_, 1, false, useGpu_);
|
|
|
|
|
Matrix::resizeOrCreate(channelBuffer_, channels_, 1, false, useGpu_);
|
|
|
|
|
Matrix::resizeOrCreate(sampleBuffer_, channels_, spatialDim, false, useGpu_);
|
|
|
|
|
scaleDiff_->zeroMem();
|
|
|
|
|
for (size_t i = 0; i < batchSize; i++) {
|
|
|
|
|
spatialBuffer_->zeroMem();
|
|
|
|
@ -154,28 +107,20 @@ void NormalizeLayer::backward(const UpdateCallback& callback) {
|
|
|
|
|
sampleBuffer_->dotMul(*inValueTmp, *outGradTmp);
|
|
|
|
|
spatialBuffer_->sumCols(*sampleBuffer_, 1., 1.);
|
|
|
|
|
// scale the grad
|
|
|
|
|
channelBuffer_->resetOne();
|
|
|
|
|
sampleBuffer_->mul(*channelBuffer_, *spatialBuffer_, 1., 0.);
|
|
|
|
|
|
|
|
|
|
inGradTmp->dotMul(*inValueTmp, *sampleBuffer_);
|
|
|
|
|
inGradTmp->copyFrom(*inValueTmp);
|
|
|
|
|
inGradTmp->mulRowVector(*spatialBuffer_);
|
|
|
|
|
// divide by square of norm
|
|
|
|
|
spatialBuffer_->dotMul(*normTmp, *normTmp);
|
|
|
|
|
sampleBuffer_->mul(*channelBuffer_, *spatialBuffer_, 1., 0.);
|
|
|
|
|
inGradTmp->dotDiv(*inGradTmp, *sampleBuffer_);
|
|
|
|
|
inGradTmp->divRowVector(*spatialBuffer_);
|
|
|
|
|
// subtract
|
|
|
|
|
inGradTmp->add(*outGradTmp, -1, 1);
|
|
|
|
|
// divide by norm
|
|
|
|
|
sampleBuffer_->mul(*channelBuffer_, *normTmp, 1., 0.);
|
|
|
|
|
inGradTmp->dotDiv(*inGradTmp, *sampleBuffer_);
|
|
|
|
|
inGradTmp->divRowVector(*normTmp);
|
|
|
|
|
// scale the diff
|
|
|
|
|
spatialBuffer_->resetOne();
|
|
|
|
|
sampleBuffer_->mul(*scale_->getW(), *spatialBuffer_, 1., 0.);
|
|
|
|
|
inGradTmp->dotMul(*inGradTmp, *sampleBuffer_);
|
|
|
|
|
inGradTmp->mulColVector(*scale_->getW());
|
|
|
|
|
}
|
|
|
|
|
// updata scale
|
|
|
|
|
if (scale_->getWGrad()) scale_->getWGrad()->copyFrom(*scaleDiff_);
|
|
|
|
|
hl_set_sync_flag(false);
|
|
|
|
|
hl_set_sync_flag(syncFlag);
|
|
|
|
|
scale_->getParameterPtr()->incUpdate(callback);
|
|
|
|
|
}
|
|
|
|
|
|