Merge pull request #6719 from tensor-tang/mkl_packed
enable MKL Packed Recurrent Layercross_channel_norm
commit
c10023006b
@ -0,0 +1,132 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "MKLPackedRecurrentLayer.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
REGISTER_LAYER(mkl_packed_recurrent, MKLPackedRecurrentLayer);
|
||||
|
||||
bool MKLPackedRecurrentLayer::init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) {
|
||||
if (!RecurrentLayer::init(layerMap, parameterMap)) return false;
|
||||
packed_weight_.reset(new MKLPackedWeight(weight_->getW()));
|
||||
packed_weight_->pack();
|
||||
if (needGradient_) {
|
||||
packed_weightT_.reset(new MKLPackedWeight(weight_->getW(), true));
|
||||
packed_weightT_->pack();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void MKLPackedRecurrentLayer::backward(const UpdateCallback& callback) {
|
||||
RecurrentLayer::backward(callback);
|
||||
packed_weight_->pack();
|
||||
if (needGradient_) {
|
||||
packed_weightT_->pack();
|
||||
}
|
||||
}
|
||||
|
||||
void MKLPackedRecurrentLayer::forwardBatch(int batchSize,
|
||||
size_t numSequences,
|
||||
const int* starts) {
|
||||
if (!batchValue_) {
|
||||
batchValue_.reset(new SequenceToBatch(useGpu_));
|
||||
}
|
||||
|
||||
batchValue_->resizeOrCreateBatch(batchSize, numSequences, starts, reversed_);
|
||||
|
||||
batchValue_->copyFromSeq(*output_.value);
|
||||
|
||||
{
|
||||
REGISTER_TIMER_INFO("RecurrentFwBatch", getName().c_str());
|
||||
/* forward one batch */
|
||||
for (size_t n = 0; n < batchValue_->getNumBatch(); n++) {
|
||||
MatrixPtr batchValue = batchValue_->getBatchValue(n);
|
||||
|
||||
if (n != 0) {
|
||||
MatrixPtr preBatchValue =
|
||||
batchValue_->getBatchValue(n - 1, batchValue->getHeight());
|
||||
|
||||
packed_weight_->gemm_compute(preBatchValue, batchValue);
|
||||
}
|
||||
Argument arg;
|
||||
arg.value = batchValue;
|
||||
activation_->forward(arg).check();
|
||||
}
|
||||
}
|
||||
batchValue_->copyBackSeq(*output_.value);
|
||||
}
|
||||
|
||||
void MKLPackedRecurrentLayer::backwardBatch(int batchSize,
|
||||
size_t numSequences,
|
||||
const int* starts) {
|
||||
if (!batchGrad_) {
|
||||
batchGrad_.reset(new SequenceToBatch(useGpu_));
|
||||
}
|
||||
batchGrad_->shareIndexWith(*batchValue_);
|
||||
|
||||
size_t numBatch = batchGrad_->getNumBatch();
|
||||
bool backwardByBatch = numBatch < numSequences;
|
||||
|
||||
batchGrad_->copyFromSeq(*output_.grad);
|
||||
{
|
||||
REGISTER_TIMER_INFO("RecurrentBwData", getName().c_str());
|
||||
/* backward one batch */
|
||||
for (int n = (int)numBatch - 1; n >= 0; n--) {
|
||||
MatrixPtr batchGrad = batchGrad_->getBatchValue(n);
|
||||
MatrixPtr batchValue =
|
||||
batchValue_->getBatchValue(n, batchGrad->getHeight());
|
||||
|
||||
Argument arg;
|
||||
arg.value = batchValue;
|
||||
arg.grad = batchGrad;
|
||||
activation_->backward(arg).check();
|
||||
|
||||
if (n != 0) {
|
||||
batchValue = batchGrad_->getBatchValue(n - 1, batchGrad->getHeight());
|
||||
packed_weightT_->gemm_compute(batchGrad, batchValue);
|
||||
}
|
||||
|
||||
if (backwardByBatch && weight_->getWGrad()) {
|
||||
if (n != 0) {
|
||||
/* backward weight */
|
||||
batchValue =
|
||||
batchValue_->getBatchValue(n - 1, batchGrad->getHeight());
|
||||
weight_->getWGrad()->mul(
|
||||
*batchValue->getTranspose(), *batchGrad, 1, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
batchGrad_->copyBackSeq(*output_.grad);
|
||||
|
||||
if (!backwardByBatch && weight_->getWGrad()) {
|
||||
REGISTER_TIMER_INFO("RecurrentBwWeight", getName().c_str());
|
||||
for (size_t seq = 0; seq < numSequences; ++seq) {
|
||||
int len = starts[seq + 1] - starts[seq];
|
||||
weight_->getWGrad()->mul(
|
||||
*output_.value
|
||||
->subMatrix(reversed_ ? starts[seq] + 1 : starts[seq], len - 1)
|
||||
->getTranspose(),
|
||||
*output_.grad->subMatrix(reversed_ ? starts[seq] : starts[seq] + 1,
|
||||
len - 1),
|
||||
1,
|
||||
1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,58 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "MKLPackedWeight.h"
|
||||
#include "RecurrentLayer.h"
|
||||
|
||||
DECLARE_bool(rnn_use_batch);
|
||||
|
||||
namespace paddle {
|
||||
|
||||
/**
|
||||
* @brief MKLPackedRecurrentLayer is almost the same with RecurrentLayer
|
||||
* but is optimized with MKL cblas packed gemm.
|
||||
* More details:
|
||||
* https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/mkl/mkl_packed.md
|
||||
*/
|
||||
|
||||
class MKLPackedRecurrentLayer : public RecurrentLayer {
|
||||
public:
|
||||
explicit MKLPackedRecurrentLayer(const LayerConfig& config)
|
||||
: RecurrentLayer(config) {}
|
||||
|
||||
bool init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) override;
|
||||
|
||||
void backward(const UpdateCallback& callback) override;
|
||||
|
||||
protected:
|
||||
void forwardBatch(int batchSize,
|
||||
size_t numSequences,
|
||||
const int* starts) override;
|
||||
|
||||
void backwardBatch(int batchSize,
|
||||
size_t numSequences,
|
||||
const int* starts) override;
|
||||
|
||||
protected:
|
||||
/// packed_weight_ contains same data with
|
||||
/// RecurrentLayer::weight_ but is packed
|
||||
std::unique_ptr<MKLPackedWeight> packed_weight_;
|
||||
/// packed_weightT_ is the transposition matrix of packed_weight_
|
||||
std::unique_ptr<MKLPackedWeight> packed_weightT_;
|
||||
};
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,86 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "paddle/math/MathFunctions.h"
|
||||
#include "paddle/parameter/Parameter.h"
|
||||
#include "paddle/parameter/Weight.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
class MKLPackedWeight {
|
||||
protected:
|
||||
/// The pointer of weight
|
||||
real *weight_;
|
||||
/// The pointer of cblas packed gemm to weight
|
||||
real *packedWeight_;
|
||||
size_t height_;
|
||||
size_t width_;
|
||||
bool transW_;
|
||||
|
||||
public:
|
||||
explicit MKLPackedWeight(MatrixPtr weight, bool transW = false) {
|
||||
packedWeight_ = nullptr;
|
||||
weight_ = weight->getData();
|
||||
height_ = weight->getHeight();
|
||||
width_ = weight->getWidth();
|
||||
transW_ = transW;
|
||||
}
|
||||
|
||||
~MKLPackedWeight() { free_(); }
|
||||
|
||||
void pack() { pack_(weight_); }
|
||||
|
||||
void gemm_compute(const MatrixPtr src, MatrixPtr dst) {
|
||||
cblas_sgemm_compute(CblasRowMajor,
|
||||
CblasNoTrans,
|
||||
CblasPacked,
|
||||
src->getHeight(),
|
||||
transW_ ? height_ : width_,
|
||||
transW_ ? width_ : height_,
|
||||
src->getData(),
|
||||
src->getWidth(),
|
||||
packedWeight_,
|
||||
width_,
|
||||
1.0,
|
||||
dst->getData(),
|
||||
dst->getWidth());
|
||||
}
|
||||
|
||||
protected:
|
||||
void pack_(real *src) {
|
||||
if (!packedWeight_) {
|
||||
packedWeight_ = cblas_sgemm_alloc(CblasBMatrix, 1, width_, height_);
|
||||
}
|
||||
cblas_sgemm_pack(CblasRowMajor,
|
||||
CblasBMatrix,
|
||||
transW_ ? CblasTrans : CblasNoTrans,
|
||||
1,
|
||||
transW_ ? height_ : width_,
|
||||
transW_ ? width_ : height_,
|
||||
1.0,
|
||||
src,
|
||||
width_,
|
||||
packedWeight_);
|
||||
}
|
||||
|
||||
void free_() {
|
||||
if (packedWeight_) {
|
||||
cblas_sgemm_free(packedWeight_);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,130 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
#pragma once
|
||||
#include <gflags/gflags.h>
|
||||
#include "Layer.h"
|
||||
#include "SequenceToBatch.h"
|
||||
#include "paddle/utils/Stat.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
/**
|
||||
* @brief RecurrentLayer takes 1 input layer. The output size is the same with
|
||||
* input layer.
|
||||
* For each sequence [start, end] it performs the following computation:
|
||||
* \f[
|
||||
* out_{i} = act(in_{i}) \ \ \text{for} \ i = start \\
|
||||
* out_{i} = act(in_{i} + out_{i-1} * W) \ \ \text{for} \ start < i <= end
|
||||
*
|
||||
* \f]
|
||||
* If reversed is true, the order is reversed:
|
||||
* \f[
|
||||
* out_{i} = act(in_{i}) \ \ \text{for} \ i = end \\
|
||||
* out_{i} = act(in_{i} + out_{i+1} * W) \ \ \text{for} \ start <= i < end
|
||||
* \f]
|
||||
* There are two methods to calculate rnn. One way is to compute rnn one
|
||||
* sequence by one sequence. The other way is to reorganize the input
|
||||
* into batches, then compute rnn one batch by one batch. Users can select
|
||||
* them by rnn_use_batch flag.
|
||||
*/
|
||||
|
||||
class RecurrentLayer : public Layer {
|
||||
public:
|
||||
explicit RecurrentLayer(const LayerConfig& config) : Layer(config) {}
|
||||
|
||||
bool init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) override;
|
||||
|
||||
void forward(PassType passType) override;
|
||||
|
||||
void backward(const UpdateCallback& callback) override;
|
||||
|
||||
void resetState() override;
|
||||
|
||||
void setState(LayerStatePtr state) override;
|
||||
|
||||
LayerStatePtr getState() override;
|
||||
|
||||
protected:
|
||||
/**
|
||||
* @brief If user do not set --rnn_use_batch=true, it will
|
||||
* compute rnn forward one sequence by one sequence in default.
|
||||
* @param batchSize Total words number of all samples in this batch.
|
||||
* @param numSequences The sample number.
|
||||
* @param starts Each start position of each samples.
|
||||
*/
|
||||
void forwardSequence(int batchSize, size_t numSequences, const int* starts);
|
||||
/**
|
||||
* @brief Compute rnn forward by one sequence.
|
||||
* @param start The start position of this sequence (or sample).
|
||||
* @param length The length of this sequence (or sample), namely the words
|
||||
* number of this sequence.
|
||||
*/
|
||||
void forwardOneSequence(int start, int length);
|
||||
/**
|
||||
* @brief Compute rnn backward one sequence by onesequence.
|
||||
* @param batchSize Total words number of all samples in this batch.
|
||||
* @param numSequences The sample number.
|
||||
* @param starts Each start position of each samples.
|
||||
*/
|
||||
void backwardSequence(int batchSize, size_t numSequences, const int* starts);
|
||||
/**
|
||||
* @brief Compute rnn backward by one sequence.
|
||||
* @param start The start position of this sequence (or sample).
|
||||
* @param length The length of this sequence (or sample), namely the words
|
||||
* number of this sequence.
|
||||
*/
|
||||
void backwardOneSequence(int start, int length);
|
||||
|
||||
/**
|
||||
* @brief Reorganize input into batches and compute rnn forward batch
|
||||
* by batch. It will convert batch shape to sequence after finishing forward.
|
||||
* The batch info can refer to SequenceToBatch class.
|
||||
* @param batchSize Total words number of all samples in this batch.
|
||||
* @param numSequences The sample number.
|
||||
* @param starts Each start position of each samples.
|
||||
*/
|
||||
virtual void forwardBatch(int batchSize,
|
||||
size_t numSequences,
|
||||
const int* starts);
|
||||
|
||||
/**
|
||||
* @brief Reorganize input into batches and compute rnn forward batch
|
||||
* by batch.
|
||||
* @param batchSize Total words number of all samples in this batch.
|
||||
* @param numSequences The sample number.
|
||||
* @param starts Each start position of each samples.
|
||||
*/
|
||||
virtual void backwardBatch(int batchSize,
|
||||
size_t numSequences,
|
||||
const int* starts);
|
||||
|
||||
protected:
|
||||
std::unique_ptr<Weight> weight_;
|
||||
std::unique_ptr<Weight> bias_;
|
||||
|
||||
/// frameOutput_[i] is used to hold the i-th sample of output_
|
||||
std::vector<Argument> frameOutput_;
|
||||
MatrixPtr prevOutput_;
|
||||
/// Whether compute rnn by reverse.
|
||||
bool reversed_;
|
||||
/// If compute batch by batch, batchValue_ will be used to save the
|
||||
/// reorganized input value.
|
||||
std::unique_ptr<SequenceToBatch> batchValue_;
|
||||
/// If compute batch by batch, batchGrad_ will be used to save the
|
||||
/// gradient with respect to reorganized input value.
|
||||
std::unique_ptr<SequenceToBatch> batchGrad_;
|
||||
};
|
||||
|
||||
} // namespace paddle
|
Loading…
Reference in new issue