From 735737d28369d6040d0bacbae9973052e51cd7af Mon Sep 17 00:00:00 2001 From: caoying03 Date: Fri, 29 Sep 2017 21:33:19 +0800 Subject: [PATCH 01/35] initialize crf opreator. --- paddle/operators/crf_op.cc | 48 +++++++++++++++++++ paddle/operators/crf_op.h | 41 ++++++++++++++++ .../paddle/v2/framework/tests/test_crf_op.py | 13 +++++ 3 files changed, 102 insertions(+) create mode 100644 paddle/operators/crf_op.cc create mode 100644 paddle/operators/crf_op.h create mode 100644 python/paddle/v2/framework/tests/test_crf_op.py diff --git a/paddle/operators/crf_op.cc b/paddle/operators/crf_op.cc new file mode 100644 index 0000000000..21ffcf48c0 --- /dev/null +++ b/paddle/operators/crf_op.cc @@ -0,0 +1,48 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/crf_op.h" + +namespace paddle { +namespace operators { + +class CrfOpMaker : public framework::OpProtoAndCheckerMaker { + public: + CrfOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) {} +}; + +class CrfOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase* ctx) const override {} +}; + +class CrfGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase* ctx) const override {} +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(crf, ops::CrfOp, ops::CrfOpMaker, crf_grad, ops::CrfGradOp); +REGISTER_OP_CPU_KERNEL(crf, ops::CrfOpKernel); +REGISTER_OP_CPU_KERNEL(crf_grad, ops::CrfGradOpKernel); diff --git a/paddle/operators/crf_op.h b/paddle/operators/crf_op.h new file mode 100644 index 0000000000..cb34c5c6a3 --- /dev/null +++ b/paddle/operators/crf_op.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class CrfOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "This kernel only runs on CPU."); + } +}; + +template +class CrfGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "This kernel only runs on CPU."); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_crf_op.py b/python/paddle/v2/framework/tests/test_crf_op.py new file mode 100644 index 0000000000..47c9341fa0 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_crf_op.py @@ -0,0 +1,13 @@ +import unittest +import numpy as np + + +class TestCrfOp(OpTest): + def setUp(self): + self.op_type = "crf" + batch_size = 3 + class_num = 37 + + +if __name__ == "__main__": + unittest.main() From d92c671d5f7fd8a14492856a2800c9e407078144 Mon Sep 17 00:00:00 2001 From: caoying03 Date: Tue, 10 Oct 2017 10:10:37 +0800 Subject: [PATCH 02/35] add python forward unittest. --- paddle/operators/crf_op.cc | 48 ------ paddle/operators/linear_chain_crf_op.cc | 141 ++++++++++++++++++ .../{crf_op.h => linear_chain_crf_op.h} | 4 +- .../softmax_with_cross_entropy_op.cc | 6 +- .../paddle/v2/framework/tests/test_crf_op.py | 13 -- .../tests/test_linear_chain_crf_op.py | 122 +++++++++++++++ 6 files changed, 268 insertions(+), 66 deletions(-) delete mode 100644 paddle/operators/crf_op.cc create mode 100644 paddle/operators/linear_chain_crf_op.cc rename paddle/operators/{crf_op.h => linear_chain_crf_op.h} (90%) delete mode 100644 python/paddle/v2/framework/tests/test_crf_op.py create mode 100644 python/paddle/v2/framework/tests/test_linear_chain_crf_op.py diff --git a/paddle/operators/crf_op.cc b/paddle/operators/crf_op.cc deleted file mode 100644 index 21ffcf48c0..0000000000 --- a/paddle/operators/crf_op.cc +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ - -#include "paddle/operators/crf_op.h" - -namespace paddle { -namespace operators { - -class CrfOpMaker : public framework::OpProtoAndCheckerMaker { - public: - CrfOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) {} -}; - -class CrfOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContextBase* ctx) const override {} -}; - -class CrfGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContextBase* ctx) const override {} -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OP(crf, ops::CrfOp, ops::CrfOpMaker, crf_grad, ops::CrfGradOp); -REGISTER_OP_CPU_KERNEL(crf, ops::CrfOpKernel); -REGISTER_OP_CPU_KERNEL(crf_grad, ops::CrfGradOpKernel); diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc new file mode 100644 index 0000000000..434382a72f --- /dev/null +++ b/paddle/operators/linear_chain_crf_op.cc @@ -0,0 +1,141 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/linear_chain_crf_op.h" + +namespace paddle { +namespace operators { + +class LinearChainCrfOpMaker : public framework::OpProtoAndCheckerMaker { + public: + LinearChainCrfOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput( + "Emission", + "(LoDTensor, default: LoDTensor). " + "The unscaled emission weight matrix for the linear chain CRF. " + "This input is a LoDTensor with shape [N x D] where N is the total " + "element number of all input squences in a mini-batch, " + "and D is the total tag number."); + AddInput( + "Transition", + "(Tensor, default: Tensor). A Tensor with shape [(D + 2) x D]. " + "The learnable parameter for linear_chain_crf operator. " + "See more details in the operator's comments."); + AddInput( + "Label", + "(LoDTensor, default: LoDTensor). The ground truth which is a 2-D " + "LoDTensor with shape [N x 1], where N is the total element number in " + "a mini-batch."); + AddOutput( + "Alpha", + "Tensor, default: Tensor. The forward vectors for the entire " + "batch. A two dimensional tensor with shape [N x D], " + "denoted as \f$\alpha\f$. \f$\alpha$\f is a memo table used to " + "calculate the normalization factor in CRF. \f$\alpha[k, v]$\f stores " + "the unnormalized probabilites of all possible unfinished sequences of " + "tags that end at position \f$k$\f with tag \f$v$\f. For each \f$k$\f, " + "\f$\alpha[k, v]$\f is a vector of length \f$D$\f with a component for " + "each tag value \f$v$\f. This vector is called a forward vecotr and " + "will also be used in backward computations.") + .AsIntermediate(); + AddOutput( + "LogLikelihood", + "(Tensor, default: Tensor). The logarithm of the conditional " + "likelihood of each training sample in a mini-batch. This is a 2-D " + "tensor with shape [S x 1], where S is the sequence number in a " + "mini-batch. " + "Note: S is equal to the sequence number in a mini-batch. The output " + "is no longer a LoDTensor."); + AddComment(R"DOC( +Conditional Random Field defines an undirected probabilistic graph with nodes +denoting random variables and edges denoting dependencies between these +variables. CRF learns the conditional probability \f$P(Y|X)\f$, where +\f$X = (x_1, x_2, ... , x_n)\f$ are structured inputs and +\f$Y = (y_1, y_2, ... , y_n)\f$ are labels for the inputs. + +Linear chain CRF is a special case of CRF that is useful for sequence labeling +task. Sequence labeling tasks do not assume a lot of conditional +independences among inputs. They only concern about the input and the output +being linear sequences. Thus, the graph model of CRF is a simple chain or +a line, which results in a linear chain CRF. + +This operator implements the Forward-Backward algorithm for linear chain CRF. +Please see http://www.cs.columbia.edu/~mcollins/fb.pdf for reference. + +Equation: + +- Denote the first input of this operator (Emission) as \f$x\f$ here. +- The first D values of the second input (Transition) of this operator are for +starting weights, denoted as \f$a\f$ here. +- The next D values of the second input (Transition) of this operator are for +ending weights, denoted as \f$b\f$ here. +- The remaning values of the second input (Transition) are for transition +weights, denoted as \f$w\f$ here. +- Denote the third input of this operator (Label) as \f$s\f$ here. + +The probability of a sequence \f$s\f$ of length \f$L\f$ is defined as: +\f$P(s) = (1/Z) exp(a_{s_1} + b_{s_L} + + \sum_{l=1}^L x_{s_l} + + \sum_{l=2}^L w_{s_{l-1},s_l})\f$ +where \f$Z\f$ is a normalization value so that the sum of \f$P(s)\f$ over +all possible sequences is \f$1\f$, and \f$x\f$ is the emission feature weight +to the linear chain CRF. + +Finaly, the linear chain CRF operator outputs the logarithm of the conditional +likelihood of each training sample in a mini-batch. + +NOTE: +1. The feature function for a CRF is made up of the emission features and the +transition features. The emission feature weights are NOT computed in +this operator. They MUST be computed first before this operator is called. + +2. Because this operator performs globally normaliztion over all possible +sequences internally, it expects UNSCALED emission feature weights. +Please do not call this op with the emission feature being output of any +nonlinear activation. + +3. The 2nd dimension of the first input of this operator (Emission) MUST be +equal to the tag number. + +)DOC"); + } +}; + +class LinearChainCrfOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase* ctx) const override {} +}; + +class LinearChainCrfGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContextBase* ctx) const override {} +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(linear_chain_crf, ops::LinearChainCrfOp, ops::LinearChainCrfOpMaker, + linear_chain_crf_grad, ops::LinearChainCrfGradOp); +REGISTER_OP_CPU_KERNEL(linear_chain_crf, ops::LinearChainCrfOpKernel); +REGISTER_OP_CPU_KERNEL(linear_chain_crf_grad, + ops::LinearChainCrfGradOpKernel); diff --git a/paddle/operators/crf_op.h b/paddle/operators/linear_chain_crf_op.h similarity index 90% rename from paddle/operators/crf_op.h rename to paddle/operators/linear_chain_crf_op.h index cb34c5c6a3..1c0749114f 100644 --- a/paddle/operators/crf_op.h +++ b/paddle/operators/linear_chain_crf_op.h @@ -20,7 +20,7 @@ namespace paddle { namespace operators { template -class CrfOpKernel : public framework::OpKernel { +class LinearChainCrfOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), @@ -29,7 +29,7 @@ class CrfOpKernel : public framework::OpKernel { }; template -class CrfGradOpKernel : public framework::OpKernel { +class LinearChainCrfGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index 42c1ba6fdf..ba81dd4c2d 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -32,9 +32,9 @@ class SoftmaxWithCrossEntropyOpMaker AddInput("Label", "(Tensor, default: Tensor), The ground truth which is a 2-D " "tensor. " - "If softLable is set to 0, Label is a Tensor with shape [N x " - "1]. " - "If softLable is set to 1, Label is a Tensor " + "If softLabel is set to false, Label is a Tensor with shape " + "[N x 1]." + "If softLabel is set to true, Label is a Tensor " "with shape [N x K]."); AddOutput( "Softmax", diff --git a/python/paddle/v2/framework/tests/test_crf_op.py b/python/paddle/v2/framework/tests/test_crf_op.py deleted file mode 100644 index 47c9341fa0..0000000000 --- a/python/paddle/v2/framework/tests/test_crf_op.py +++ /dev/null @@ -1,13 +0,0 @@ -import unittest -import numpy as np - - -class TestCrfOp(OpTest): - def setUp(self): - self.op_type = "crf" - batch_size = 3 - class_num = 37 - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py new file mode 100644 index 0000000000..b16c4d40b9 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py @@ -0,0 +1,122 @@ +import unittest +import random +import numpy as np + +from op_test import OpTest + + +class LinearChainCrfForward(object): + def __init__(self, seq_start_positions, emission_weights, + transition_weights, labels): + self.tag_num = emission_weights.shape[1] + self.seq_num = len(seq_start_positions) - 1 + + self.seq_start_positions = seq_start_positions + self.labels = labels + self.x = emission_weights + + self.x_row_max = np.amax(self.x, axis=1, keepdims=True) + self.x_exps = np.exp(self.x - self.x_row_max) + + # unnormalized logits of the transition weights for the start mark. + self.a = transition_weights[0, :] + self.a_exps = np.exp(self.a) + # unnormalized logits of the transition weights for the end mark. + self.b = transition_weights[1, :] + self.b_exps = np.exp(self.b) + # unnormalized logits of the transition weights for all the other tags. + self.w = transition_weights[2:, :] + self.w_exps = np.exp(self.w) + + # The output of linear chain crf operator. + # alpha is a memo table in dynamic programming to caculate + # nomalization factor. + self.alpha = np.zeros( + (seq_start_positions[-1], self.tag_num), dtype="float32") + self.log_likelihood = np.zeros((self.tag_num, 1)) + + def _l1_norm(self, x): + s = np.sum(x) + x /= s + return s + + def _forward_a_sequence(self, x, x_row_max, x_exps, label, alpha): + seq_len = x_row_max.shape[0] + log_likelihood = 0. + + for i in range(self.tag_num): + alpha[0, i] = self.a_exps[i] * x_exps[0, i] + log_likelihood = -x_row_max[0] - np.log(self._l1_norm(alpha[0, :])) + + # calculate the unnormalized logits of the normalization factor. + for k in range(1, seq_len): + for i in range(self.tag_num): + s = 0. + for j in range(self.tag_num): + s += alpha[k - 1, j] * self.w_exps[j, i] + alpha[k, i] = x_exps[k, i] * s + log_likelihood -= x_row_max[k] + np.log(self._l1_norm(alpha[k, :])) + s = 0. + for i in range(self.tag_num): + s += alpha[-1, i] * self.b_exps[i] + log_likelihood -= np.log(s) + + # calculate the noninator part. + log_likelihood += ( + self.a[label[0]] + self.x[0, label[0]] + self.b[label[-1]]) + for k in range(1, seq_len): + log_likelihood += ( + self.x[k, label[k]] + self.w[label[k - 1], label[k]]) + return log_likelihood + + def crf_forward_compute(self): + for i in range(self.seq_num): + start = self.seq_start_positions[i] + end = self.seq_start_positions[i + 1] + + self.log_likelihood[i] = self._forward_a_sequence( + self.x[start:end], self.x_row_max[start:end, :], + self.x_exps[start:end, :], self.labels[start:end, :], + self.alpha[start:end, :]) + return self.alpha, self.log_likelihood + + +class TestLinearChainCrfOp(OpTest): + def set_test_data(self): + SEQ_NUM = 3 + TAG_NUM = 17 + MAX_SEQ_LEN = 13 + + # the linear_chain_crf operator only supports sequence (LoD level = 1) + lod = [[0]] + for i in range(SEQ_NUM): + lod[-1].append(lod[-1][-1] + random.randint(1, MAX_SEQ_LEN)) + + emission = np.random.uniform(-1, 1, + [lod[-1][-1], TAG_NUM]).astype("float32") + transition = np.random.uniform(-0.5, 0.5, + [TAG_NUM + 2, TAG_NUM]).astype("float32") + labels = np.random.randint( + low=0, high=TAG_NUM, size=(lod[-1][-1], 1), dtype="int32") + + self.inputs = { + "Emission": (emission, lod), + "Transition": transition, + "label": (labels, lod) + } + + crf = LinearChainCrfForward(lod[0], emission, transition, labels) + alpha, log_likelihood = crf.crf_forward_compute() + + self.outputs = {"Alpha": alpha, "LogLikelihood": log_likelihood} + + def setUp(self): + self.op_type = "linear_chain_crf" + self.set_test_data() + + def test_check_output(self): + self.check_output() + + +if __name__ == "__main__": + unittest.main() From 91cc5d6208f55bb950d18f359e379002968f6cf9 Mon Sep 17 00:00:00 2001 From: caoying03 Date: Thu, 12 Oct 2017 10:54:06 +0800 Subject: [PATCH 03/35] add the forward operator. --- paddle/operators/linear_chain_crf_op.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc index 434382a72f..fd47398065 100644 --- a/paddle/operators/linear_chain_crf_op.cc +++ b/paddle/operators/linear_chain_crf_op.cc @@ -119,7 +119,7 @@ class LinearChainCrfOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override {} + void InferShape(framework::InferShapeContext* ctx) const override {} }; class LinearChainCrfGradOp : public framework::OperatorWithKernel { @@ -127,7 +127,7 @@ class LinearChainCrfGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContextBase* ctx) const override {} + void InferShape(framework::InferShapeContext* ctx) const override {} }; } // namespace operators From cc220eec367795c63a287118adffdba107cae9d5 Mon Sep 17 00:00:00 2001 From: caoying03 Date: Thu, 12 Oct 2017 20:23:18 +0800 Subject: [PATCH 04/35] add forward computation of crf operator. --- paddle/framework/tensor.h | 11 +- paddle/framework/tensor_impl.h | 7 +- paddle/operators/cross_entropy_op.cc | 2 +- paddle/operators/linear_chain_crf_op.cc | 214 ++++++++++++++++-- paddle/operators/linear_chain_crf_op.h | 26 ++- .../softmax_with_cross_entropy_op.cc | 14 +- .../tests/test_linear_chain_crf_op.py | 6 +- 7 files changed, 231 insertions(+), 49 deletions(-) diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 3304d857ae..3962d55324 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -114,16 +114,19 @@ class Tensor { const platform::DeviceContext& ctx); /** - * @brief Return the slice of the tensor. + * @brief Return a sub-tensor of the given tensor. * - * @param[in] begin_idx The begin index of the slice. - * @param[in] end_idx The end index of the slice. + * @param[in] begin_idx The index of the start row(inclusive) to slice. + * The index number begins from 0. + * @param[in] end_idx The index of the end row(exclusive) to slice. + * The index number begins from 0. */ template inline Tensor Slice(const int& begin_idx, const int& end_idx) const; platform::Place place() const { - PADDLE_ENFORCE_NOT_NULL(holder_, "Tensor get place() must contains holder"); + PADDLE_ENFORCE_NOT_NULL( + holder_, "A holder must exist when calling the method place()."); return holder_->place(); } diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index ce73e0a9ed..635a84f415 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -168,10 +168,11 @@ inline void Tensor::CopyFromVector(const std::vector& src, template inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { check_memory_size(); - PADDLE_ENFORCE_GE(begin_idx, 0, "Slice begin index is less than zero."); - PADDLE_ENFORCE_LE(end_idx, dims_[0], "Slice end index is out of bound."); + PADDLE_ENFORCE_GE(begin_idx, 0, + "The start row index must be greater than 0."); + PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is out of bound."); PADDLE_ENFORCE_LT(begin_idx, end_idx, - "Begin index must be less than end index."); + "The start row index must be less than the end row index."); if (dims_[0] == 1) { return *this; diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index 6a13f82cce..b4ea0338b2 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -49,7 +49,7 @@ class CrossEntropyOp : public framework::OperatorWithKernel { ctx->ShareLoD("X", /*->*/ "Y"); } - // Explicitly set data type of output of the cross_entropy operator + // Explicitly set that data type of the output of the cross_entropy operator // is determined by its input "X". framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc index bdff6ffc6a..b451ae62e2 100644 --- a/paddle/operators/linear_chain_crf_op.cc +++ b/paddle/operators/linear_chain_crf_op.cc @@ -17,6 +17,9 @@ limitations under the License. */ namespace paddle { namespace operators { +using framework::LoDTensor; +using framework::LoD; + class LinearChainCrfOpMaker : public framework::OpProtoAndCheckerMaker { public: LinearChainCrfOpMaker(framework::OpProto* proto, @@ -77,14 +80,14 @@ Please see http://www.cs.columbia.edu/~mcollins/fb.pdf for reference. Equation: -- Denote the first input of this operator (Emission) as \f$x\f$ here. -- The first D values of the second input (Transition) of this operator are for -starting weights, denoted as \f$a\f$ here. -- The next D values of the second input (Transition) of this operator are for -ending weights, denoted as \f$b\f$ here. -- The remaning values of the second input (Transition) are for transition -weights, denoted as \f$w\f$ here. -- Denote the third input of this operator (Label) as \f$s\f$ here. +- Denote Input(Emission) to this operator as \f$x\f$ here. +- The first D values of Input(Transition) to this operator are for starting +weights, denoted as \f$a\f$ here. +- The next D values of Input(Transition) of this operator are for ending +weights, denoted as \f$b\f$ here. +- The remaning values of Input(Transition) are for transition weights, +denoted as \f$w\f$ here. +- Denote Input(Label) as \f$s\f$ here. The probability of a sequence \f$s\f$ of length \f$L\f$ is defined as: \f$P(s) = (1/Z) exp(a_{s_1} + b_{s_L} @@ -107,8 +110,7 @@ sequences internally, it expects UNSCALED emission feature weights. Please do not call this op with the emission feature being output of any nonlinear activation. -3. The 2nd dimension of the first input of this operator (Emission) MUST be -equal to the tag number. +3. The 2nd dimension of Input(Emission) MUST be equal to the tag number. )DOC"); } @@ -136,33 +138,188 @@ class LinearChainCrfOp : public framework::OperatorWithKernel { auto label_dims = ctx->GetInputDim("Label"); PADDLE_ENFORCE_EQ(emission_dims.size(), 2UL, - "The input Emission should be a 2-D tensor."); + "The Input(Emission) should be a 2-D tensor."); PADDLE_ENFORCE_EQ(transition_dims.size(), 2UL, - "The input Transition should be a 2-D tensor."); + "The Input(Transition) should be a 2-D tensor."); PADDLE_ENFORCE_EQ( - transition_dims[0] + 2, transition_dims[1], - "An invalid dimension for the input Transition, which should " + transition_dims[0] - 2, transition_dims[1], + "An invalid dimension for the Input(Transition), which should " "be a 2-D tensor with shape [D + 2 x D]."); PADDLE_ENFORCE_EQ( emission_dims[1], transition_dims[1], - "The 2nd dimension of the input Emission and the input Transition " + "The 2nd dimension of the Input(Emission) and the Input(Transition) " "should be equal to the tag number."); PADDLE_ENFORCE(label_dims.size() == 2UL && label_dims[1] == 1UL, - "The input Label should be a 2-D tensor " - "with the 2nd dimensions fixed to 1."); + "The Input(Label) should be a 2-D tensor with the 2nd " + "dimensions fixed to 1."); + PADDLE_ENFORCE_EQ( + emission_dims[0], label_dims[0], + "The height of Input(Emission) and the height of Input(Label) " + "should be the same."); ctx->SetOutputDim("Alpha", emission_dims); + + // (TODO caoying) This is tricky. The 1st dimension of Output(LogLikelihood) + // is the sequence number in a mini-batch. The dimension set here should be + // resized to its correct size in the function Compute. ctx->SetOutputDim("LogLikelihood", {emission_dims[0], 1}); } - // Explicitly set data type of output of the linear_chain_crf operator - // is determined by its input "Emission". + // Explicitly set that the data type of output of the linear_chain_crf + // operator is determined by its input "Emission". framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType(ctx.Input("Emission")->type()); } }; +template +class LinearChainCrfOpKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "This kernel only runs on CPU."); + + auto* emission_weights = ctx.Input("Emission"); + auto* transition_weights = ctx.Input("Transition"); + auto* label = ctx.Input("Label"); + + auto in_lod = emission_weights->lod(); + // TODO(caoying) The checks related to LoD information should be + // moved into InferShape once after the InferShape is refactored. + PADDLE_ENFORCE_EQ(emission_weights->NumLevels(), 1UL, + "The Input(Emission) should be a sequence."); + PADDLE_ENFORCE_EQ(label->NumLevels(), 1UL, + "The Input(Label) should be a sequence."); + const size_t level = 0; + + auto emission_dims = emission_weights->dims(); + const size_t seq_num = in_lod[level].size() - 1; + + // TODO(caoying) These local variables seems to be created and destroied + // every time this function is called. Will this bring additional overhead? + Tensor emission_exps; + Tensor emission_row_max; + Tensor transition_exps; + emission_exps.mutable_data(emission_dims, platform::CPUPlace()); + emission_row_max.mutable_data( + framework::make_ddim({emission_dims[0], 1}), platform::CPUPlace()); + transition_exps.mutable_data(transition_weights->dims(), + platform::CPUPlace()); + + auto* alpha = ctx.Output("Alpha"); + alpha->mutable_data(ctx.GetPlace()); + auto* ll = ctx.Output("LogLikelihood"); + // resize the output tensor to the correct dimension. + ll->Resize({static_cast(seq_num), 1}); + T* log_likelihood = ll->mutable_data(ctx.GetPlace()); + + for (size_t i = 0; i < seq_num; ++i) { + int start_pos = static_cast(in_lod[level][i]); + int end_pos = static_cast(in_lod[level][i + 1]); + + const Tensor one_seq = emission_weights->Slice(start_pos, end_pos); + Tensor one_seq_row_max = emission_row_max.Slice(start_pos, end_pos); + Tensor one_seq_exps = emission_exps.Slice(start_pos, end_pos); + const Tensor one_seq_label = label->Slice(start_pos, end_pos); + Tensor one_seq_alpha = alpha->Slice(start_pos, end_pos); + + log_likelihood[i] = ForwardOneSequence( + ctx.device_context(), one_seq, one_seq_row_max, one_seq_exps, + (*transition_weights), transition_exps, one_seq_label, one_seq_alpha); + } + } + + protected: + T ForwardOneSequence(const platform::DeviceContext& ctx, + const Tensor& emission, Tensor& emission_row_max, + Tensor& emission_exps, const Tensor& trans_weights, + Tensor& trans_weight_exps, const Tensor& label, + Tensor& alpha) const { + // (TODO caoying) Evaluate and optimize this. + // The Eigen compution kernel will be invoked for multiple times. + // Some computations regardless of sequence inforamtion could be performed + // only one time for the entire batch. This potentially could be optimized. + + auto x_dims = emission.dims(); + const size_t seq_length = x_dims[0]; + const size_t tag_num = x_dims[1]; + + T* alpha_value = alpha.data(); + + auto x = EigenMatrix::From(emission); + auto x_row_max = EigenMatrix::From(emission_row_max); + const int class_dim = 1; + x_row_max.device(*ctx.GetEigenDevice()) = + x.maximum(Eigen::DSizes(class_dim)) + .reshape(Eigen::DSizes(int(seq_length), 1)); + + auto x_exps = EigenMatrix::From(emission_exps); + x_exps.device(*ctx.GetEigenDevice()) = + (x - x_row_max.broadcast(Eigen::DSizes(1, tag_num))).exp(); + + auto w = EigenMatrix::From(trans_weights); + auto w_exps = EigenMatrix::From(trans_weight_exps); + w_exps.device(*ctx.GetEigenDevice()) = w.exp(); + // The 1st row of w are transition weights for start mask. + const size_t start_ridx = 0; + // The 2nd row of w are transition weights for end mask. + const size_t end_ridx = 1; + // Transition weights among other tags begins from the 3rd row of w. + const size_t state_base_ridx = 2; + + for (size_t i = 0; i < tag_num; ++i) { + alpha_value[i] = w_exps(start_ridx, i) * x_exps(0, i); + } + T ll = -x_row_max(0, 1) - std::log(NormalizeL1(alpha_value, tag_num)); + + for (size_t k = 1; k < seq_length; ++k) { + for (size_t i = 0; i < tag_num; ++i) { + T sum = 0.; + for (size_t j = 0; j < tag_num; ++j) { + sum += alpha_value[(k - 1) * tag_num + j] * + w_exps(j + state_base_ridx, i); + } + alpha_value[k * tag_num + i] = x_exps(k, i) * sum; + } + ll -= x_row_max(k, 1) + + std::log(NormalizeL1(alpha_value + k * tag_num, tag_num)); + } + T sum = 0.; + for (size_t i = 0; i < tag_num; ++i) { + sum += alpha_value[(seq_length - 1) * tag_num + i] * w_exps(end_ridx, i); + } + ll -= std::log(sum); + + const int* lbl = label.data(); + PADDLE_ENFORCE_LT( + *std::max_element(lbl, lbl + seq_length), tag_num, + "An invalid tag label that execesses the largest tag number."); + + // Calculate the nominator part, which depends on the label sequence. + ll += w(start_ridx, lbl[0]) + x(start_ridx, lbl[0]) + + w(end_ridx, lbl[seq_length - 1]); + for (size_t k = 1; k < seq_length; ++k) + ll += x(k, lbl[k]) + w(lbl[k - 1], lbl[k]); + return -ll; + } + + private: + T NormalizeL1(T* x, size_t len) const { + T sum = 0.; + for (size_t i = 0; i < len; ++i) sum += x[i]; + // (This comment is from the old LinearChainCRFLayer.) + // Right now, we just bet that sum won't be zero. If this really happens, we + // will figure out what should be done then. + PADDLE_ENFORCE(sum, + "The unnormalized probabilites of all possible unfinished " + "sequences must be greater than 0."); + for (size_t i = 0; i < len; ++i) x[i] /= sum; + return sum; + } +}; + class LinearChainCrfGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -171,12 +328,25 @@ class LinearChainCrfGradOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override {} }; +template +class LinearChainCrfGradOpKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "This kernel only runs on CPU."); + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP(linear_chain_crf, ops::LinearChainCrfOp, ops::LinearChainCrfOpMaker, linear_chain_crf_grad, ops::LinearChainCrfGradOp); -REGISTER_OP_CPU_KERNEL(linear_chain_crf, ops::LinearChainCrfOpKernel); -REGISTER_OP_CPU_KERNEL(linear_chain_crf_grad, - ops::LinearChainCrfGradOpKernel); +REGISTER_OP_CPU_KERNEL( + linear_chain_crf, + ops::LinearChainCrfOpKernel); +REGISTER_OP_CPU_KERNEL( + linear_chain_crf_grad, + ops::LinearChainCrfGradOpKernel); diff --git a/paddle/operators/linear_chain_crf_op.h b/paddle/operators/linear_chain_crf_op.h index ddea39b0c7..a656e233c2 100644 --- a/paddle/operators/linear_chain_crf_op.h +++ b/paddle/operators/linear_chain_crf_op.h @@ -19,27 +19,31 @@ limitations under the License. */ namespace paddle { namespace operators { -using Tensor = framework::Tensor; +using framework::Tensor; template using EigenMatrix = framework::EigenMatrix; -template +template class LinearChainCrfOpKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), - "This kernel only runs on CPU."); - } + void Compute(const framework::ExecutionContext& ctx) const override; + + protected: + T ForwardOneSequence(const platform::DeviceContext& ctx, + const Tensor& emission, Tensor& emission_row_max, + Tensor& emission_exps, const Tensor& trans_weights, + Tensor& trans_weight_exps, const Tensor& label, + Tensor& a) const; + + private: + T NormalizeL1(T* x, size_t len) const; }; -template +template class LinearChainCrfGradOpKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), - "This kernel only runs on CPU."); - } + void Compute(const framework::ExecutionContext& ctx) const override; }; } // namespace operators diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/operators/softmax_with_cross_entropy_op.cc index e639f3a468..98a1c70f11 100644 --- a/paddle/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/operators/softmax_with_cross_entropy_op.cc @@ -60,19 +60,23 @@ Because this operators performs a softmax on logits internally, it expects unscaled logits. Please do not call this op with the output of softmax operator, which will produce incorrect results. -This operators expects mutually exclusive hard labels, each sample in a batch -is in exactly one class with probabilities 1. Each sample in the batch with one -and only one label. +When the attribute softLabel is set false, this operators expects mutually +exclusive hard labels, each sample in a batch is in exactly one class with +probabilities 1. Each sample in the batch with one and only one label. Equation: 1) hard label (one-hot label) -Loss_j = -\text{Logit}_{Label_j} + \log\left(\sum_{i=0}^{K}\exp(\text{Logit}_i)\right), j = 1, ..., K +Loss_j = \f$ -\text{Logit}_{Label_j} + +\log\left(\sum_{i=0}^{K}\exp(\text{Logit}_i)\right), +j = 1, ..., K $\f 2) soft label (a distribution over all classes) -Loss_j = -\sum_{i=0}^{K}\text{Label}_i\left(\text{Logit}_i-\log\left(\sum_{i=0}^{K}\exp(\text{Logit}_i)\right)\right), j = 1,...,K +Loss_j = \f$ -\sum_{i=0}^{K}\text{Label}_i\left(\text{Logit}_i - +\log\left(\sum_{i=0}^{K}\exp(\text{Logit}_i)\right)\right), +j = 1,...,K $\f )DOC"); } diff --git a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py index b16c4d40b9..413210e75b 100644 --- a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py +++ b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py @@ -61,13 +61,13 @@ class LinearChainCrfForward(object): s += alpha[-1, i] * self.b_exps[i] log_likelihood -= np.log(s) - # calculate the noninator part. + # calculate the nominator part. log_likelihood += ( self.a[label[0]] + self.x[0, label[0]] + self.b[label[-1]]) for k in range(1, seq_len): log_likelihood += ( self.x[k, label[k]] + self.w[label[k - 1], label[k]]) - return log_likelihood + return -log_likelihood def crf_forward_compute(self): for i in range(self.seq_num): @@ -102,7 +102,7 @@ class TestLinearChainCrfOp(OpTest): self.inputs = { "Emission": (emission, lod), "Transition": transition, - "label": (labels, lod) + "Label": (labels, lod) } crf = LinearChainCrfForward(lod[0], emission, transition, labels) From 80a5ee005262a7fd8f08ea483d77a9fb9aac3d4d Mon Sep 17 00:00:00 2001 From: caoying03 Date: Tue, 17 Oct 2017 16:16:40 +0800 Subject: [PATCH 05/35] fix forward and add backward. --- paddle/operators/linear_chain_crf_op.cc | 334 ++++++++++++++---- paddle/operators/linear_chain_crf_op.h | 20 +- .../tests/test_linear_chain_crf_op.py | 42 ++- 3 files changed, 302 insertions(+), 94 deletions(-) diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc index e127811a10..14ae74ab66 100644 --- a/paddle/operators/linear_chain_crf_op.cc +++ b/paddle/operators/linear_chain_crf_op.cc @@ -17,6 +17,22 @@ limitations under the License. */ namespace paddle { namespace operators { +namespace { +template +T NormalizeL1(T* x, size_t len) { + T sum = 0.; + for (size_t i = 0; i < len; ++i) sum += x[i]; + // (This comment is from the old LinearChainCRFLayer.) + // Right now, we just bet that sum won't be zero. If this really happens, we + // will figure out what should be done then. + PADDLE_ENFORCE(sum, + "The unnormalized probabilites of all possible unfinished " + "sequences must be greater than 0."); + for (size_t i = 0; i < len; ++i) x[i] /= sum; + return sum; +} +} // namespace + using framework::LoDTensor; using framework::LoD; @@ -54,13 +70,25 @@ class LinearChainCrfOpMaker : public framework::OpProtoAndCheckerMaker { "each tag value \f$v$\f. This vector is called a forward vecotr and " "will also be used in backward computations.") .AsIntermediate(); + AddOutput("EmissionExps", + "The exponentials of Input(Emission). This is an intermediate " + "computational result in forward computation, and will be reused " + "in backward computation.") + .AsIntermediate(); + AddOutput("TransitionExps", + "The exponentials of Input(Transition). This is an intermediate " + "computational result in forward computation, and will be reused " + "in backward computation.") + .AsIntermediate(); AddOutput( "LogLikelihood", - "(Tensor, default: Tensor). The logarithm of the conditional " + "(Tensor, default: Tensor). The logarithm of the " + "conditional " "likelihood of each training sample in a mini-batch. This is a 2-D " "tensor with shape [S x 1], where S is the sequence number in a " "mini-batch. " - "Note: S is equal to the sequence number in a mini-batch. The output " + "Note: S is equal to the sequence number in a mini-batch. The " + "output " "is no longer a LoDTensor."); AddComment(R"DOC( Conditional Random Field defines an undirected probabilistic graph with nodes @@ -129,6 +157,10 @@ class LinearChainCrfOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Alpha"), "Output(Alpha) should be not null."); + PADDLE_ENFORCE(ctx->HasOutput("EmissionExps"), + "Output(EmissionExps) should be not null."); + PADDLE_ENFORCE(ctx->HasOutput("TransitionExps"), + "Output(TransitionExps) should be not null."); PADDLE_ENFORCE(ctx->HasOutput("LogLikelihood"), "Output(LogLikelihood) should be not null."); @@ -143,7 +175,7 @@ class LinearChainCrfOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( transition_dims[0] - 2, transition_dims[1], "An invalid dimension for the Input(Transition), which should " - "be a 2-D tensor with shape [D + 2 x D]."); + "be a 2-D tensor with shape [(D + 2) x D]."); PADDLE_ENFORCE_EQ( emission_dims[1], transition_dims[1], "The 2nd dimension of the Input(Emission) and the Input(Transition) " @@ -157,11 +189,14 @@ class LinearChainCrfOp : public framework::OperatorWithKernel { "should be the same."); ctx->SetOutputDim("Alpha", emission_dims); - + ctx->SetOutputDim("EmissionExps", emission_dims); + ctx->SetOutputDim("TransitionExps", transition_dims); // (TODO caoying) This is tricky. The 1st dimension of Output(LogLikelihood) // is the sequence number in a mini-batch. The dimension set here should be // resized to its correct size in the function Compute. ctx->SetOutputDim("LogLikelihood", {emission_dims[0], 1}); + + ctx->ShareLoD("Emission", /*->*/ "EmissionExps"); } protected: @@ -180,9 +215,12 @@ class LinearChainCrfOpKernel void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), "This kernel only runs on CPU."); - auto* emission_weights = ctx.Input("Emission"); auto* transition_weights = ctx.Input("Transition"); + auto* emission_exps = ctx.Output("EmissionExps"); + emission_exps->mutable_data(platform::CPUPlace()); + auto* transition_exps = ctx.Output("TransitionExps"); + transition_exps->mutable_data(platform::CPUPlace()); auto* label = ctx.Input("Label"); auto in_lod = emission_weights->lod(); @@ -195,18 +233,29 @@ class LinearChainCrfOpKernel const size_t level = 0; auto emission_dims = emission_weights->dims(); + const size_t batch_size = emission_dims[0]; + const size_t tag_num = emission_dims[1]; const size_t seq_num = in_lod[level].size() - 1; - // TODO(caoying) These local variables seems to be created and destroied - // every time this function is called. Will this bring additional overhead? - Tensor emission_exps; Tensor emission_row_max; - Tensor transition_exps; - emission_exps.mutable_data(emission_dims, platform::CPUPlace()); emission_row_max.mutable_data( - framework::make_ddim({emission_dims[0], 1}), platform::CPUPlace()); - transition_exps.mutable_data(transition_weights->dims(), - platform::CPUPlace()); + framework::make_ddim({static_cast(batch_size), 1}), + platform::CPUPlace()); + + auto place = ctx.GetEigenDevice(); + auto x = EigenMatrix::From(*emission_weights); + auto x_row_max = EigenMatrix::From(emission_row_max); + x_row_max.device(place) = + x.maximum(Eigen::DSizes(1)) + .reshape(Eigen::DSizes(int(batch_size), 1)); + + auto x_exps = EigenMatrix::From(*emission_exps); + x_exps.device(place) = + (x - x_row_max.broadcast(Eigen::DSizes(1, tag_num))).exp(); + + auto w = EigenMatrix::From(*transition_weights); + auto w_exps = EigenMatrix::From(*transition_exps); + w_exps.device(place) = w.exp(); auto* alpha = ctx.Output("Alpha"); alpha->mutable_data(ctx.GetPlace()); @@ -214,117 +263,124 @@ class LinearChainCrfOpKernel // resize the output tensor to the correct dimension. ll->Resize({static_cast(seq_num), 1}); T* log_likelihood = ll->mutable_data(ctx.GetPlace()); - for (size_t i = 0; i < seq_num; ++i) { int start_pos = static_cast(in_lod[level][i]); int end_pos = static_cast(in_lod[level][i + 1]); const Tensor one_seq = emission_weights->Slice(start_pos, end_pos); Tensor one_seq_row_max = emission_row_max.Slice(start_pos, end_pos); - Tensor one_seq_exps = emission_exps.Slice(start_pos, end_pos); + Tensor one_seq_exps = emission_exps->Slice(start_pos, end_pos); const Tensor one_seq_label = label->Slice(start_pos, end_pos); Tensor one_seq_alpha = alpha->Slice(start_pos, end_pos); log_likelihood[i] = ForwardOneSequence( - ctx.device_context(), one_seq, one_seq_row_max, one_seq_exps, - (*transition_weights), transition_exps, one_seq_label, one_seq_alpha); + &one_seq, &one_seq_row_max, &one_seq_exps, transition_weights, + transition_exps, &one_seq_label, &one_seq_alpha); } } protected: - T ForwardOneSequence(const platform::DeviceContext& ctx, - const Tensor& emission, Tensor& emission_row_max, - Tensor& emission_exps, const Tensor& trans_weights, - Tensor& trans_weight_exps, const Tensor& label, - Tensor& alpha) const { - // (TODO caoying) Evaluate and optimize this. - // The Eigen compution kernel will be invoked for multiple times. - // Some computations regardless of sequence inforamtion could be performed - // only one time for the entire batch. This potentially could be optimized. - - auto x_dims = emission.dims(); + T ForwardOneSequence(const Tensor* emission, const Tensor* emission_row_max, + const Tensor* emission_exps, const Tensor* trans_weights, + const Tensor* trans_weight_exps, const Tensor* label, + Tensor* alpha) const { + const T* x = emission->data(); + const T* x_row_max = emission_row_max->data(); + const T* x_exps = emission_exps->data(); + const T* w = trans_weights->data(); + const T* w_exps = trans_weight_exps->data(); + T* alpha_value = alpha->data(); + + auto x_dims = emission->dims(); const size_t seq_length = x_dims[0]; const size_t tag_num = x_dims[1]; - - T* alpha_value = alpha.data(); - - auto x = EigenMatrix::From(emission); - auto x_row_max = EigenMatrix::From(emission_row_max); - const int class_dim = 1; - x_row_max.device(*ctx.GetEigenDevice()) = - x.maximum(Eigen::DSizes(class_dim)) - .reshape(Eigen::DSizes(int(seq_length), 1)); - - auto x_exps = EigenMatrix::From(emission_exps); - x_exps.device(*ctx.GetEigenDevice()) = - (x - x_row_max.broadcast(Eigen::DSizes(1, tag_num))).exp(); - - auto w = EigenMatrix::From(trans_weights); - auto w_exps = EigenMatrix::From(trans_weight_exps); - w_exps.device(*ctx.GetEigenDevice()) = w.exp(); // The 1st row of w are transition weights for start mask. - const size_t start_ridx = 0; // The 2nd row of w are transition weights for end mask. - const size_t end_ridx = 1; // Transition weights among other tags begins from the 3rd row of w. - const size_t state_base_ridx = 2; + const size_t state_trans_base_idx = 2; for (size_t i = 0; i < tag_num; ++i) { - alpha_value[i] = w_exps(start_ridx, i) * x_exps(0, i); + alpha_value[i] = w_exps[i] * x_exps[i]; } - T ll = -x_row_max(0, 1) - std::log(NormalizeL1(alpha_value, tag_num)); + T ll = -x_row_max[0] - std::log(NormalizeL1(alpha_value, tag_num)); for (size_t k = 1; k < seq_length; ++k) { for (size_t i = 0; i < tag_num; ++i) { T sum = 0.; for (size_t j = 0; j < tag_num; ++j) { sum += alpha_value[(k - 1) * tag_num + j] * - w_exps(j + state_base_ridx, i); + w_exps[(j + state_trans_base_idx) * tag_num + i]; } - alpha_value[k * tag_num + i] = x_exps(k, i) * sum; + alpha_value[k * tag_num + i] = x_exps[k * tag_num + i] * sum; } - ll -= x_row_max(k, 1) + - std::log(NormalizeL1(alpha_value + k * tag_num, tag_num)); + ll -= x_row_max[k] + + std::log(NormalizeL1(alpha_value + k * tag_num, tag_num)); } T sum = 0.; for (size_t i = 0; i < tag_num; ++i) { - sum += alpha_value[(seq_length - 1) * tag_num + i] * w_exps(end_ridx, i); + sum += alpha_value[(seq_length - 1) * tag_num + i] * w_exps[tag_num + i]; } ll -= std::log(sum); - const int* lbl = label.data(); + const int* lbl = label->data(); PADDLE_ENFORCE_LT( *std::max_element(lbl, lbl + seq_length), tag_num, "An invalid tag label that execesses the largest tag number."); - // Calculate the nominator part, which depends on the label sequence. - ll += w(start_ridx, lbl[0]) + x(start_ridx, lbl[0]) + - w(end_ridx, lbl[seq_length - 1]); + ll += w[lbl[0]] /*start transition*/ + x[lbl[0]] + + w[tag_num + lbl[seq_length - 1]] /*end transition*/; for (size_t k = 1; k < seq_length; ++k) - ll += x(k, lbl[k]) + w(lbl[k - 1], lbl[k]); + ll += x[k * tag_num + lbl[k]] + w[lbl[k - 1] * tag_num + lbl[k]]; return -ll; } - - private: - T NormalizeL1(T* x, size_t len) const { - T sum = 0.; - for (size_t i = 0; i < len; ++i) sum += x[i]; - // (This comment is from the old LinearChainCRFLayer.) - // Right now, we just bet that sum won't be zero. If this really happens, we - // will figure out what should be done then. - PADDLE_ENFORCE(sum, - "The unnormalized probabilites of all possible unfinished " - "sequences must be greater than 0."); - for (size_t i = 0; i < len; ++i) x[i] /= sum; - return sum; - } }; class LinearChainCrfGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - void InferShape(framework::InferShapeContext* ctx) const override {} + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("EmissionExps"), + "Input(EmissionExps) should be not null."); + PADDLE_ENFORCE(ctx->HasInput("TransitionExps"), + "Input(TransitionExps) should be not null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("LogLikelihood")), + "Input(LogLikelihood@GRAD) shoudl be not null."); + + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Emission")), + "Output(Emission@GRAD) should be not null."); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Transition")), + "Output(Transition@GRAD) should be not null."); + + auto emission_exps_dims = ctx->GetInputDim("EmissionExps"); + auto transition_exps_dims = + ctx->GetInputDim(framework::GradVarName("TransitionExps")); + auto label_dims = ctx->GetInputDim("Label"); + + PADDLE_ENFORCE_EQ(emission_exps_dims.size(), 2UL, + "The Input(EmissionExps) should be a 2-D tensor."); + PADDLE_ENFORCE_EQ(transition_exps_dims.size(), 2UL, + "The Input(TransitionExps) should be a 2-D tensor."); + PADDLE_ENFORCE_EQ( + transition_exps_dims[0] - 2, transition_exps_dims[1], + "An invalid dimension for the Input(TransitionExps), which should " + "be a 2-D tensor with shape [(D + 2) x D]."); + PADDLE_ENFORCE_EQ( + emission_exps_dims[1], transition_exps_dims[1], + "The 2nd dimension of the Input(EmissionExps) and the " + "Input(TransitionExps) should be equal to the tag number."); + PADDLE_ENFORCE(label_dims.size() == 2UL && label_dims[1] == 1UL, + "The Input(Label) should be a 2-D tensor with the 2nd " + "dimensions fixed to 1."); + PADDLE_ENFORCE_EQ( + emission_exps_dims[0], label_dims[0], + "The height of Input(EmissionExps) and the height of Input(Label) " + "should be the same."); + + ctx->SetOutputDim(framework::GradVarName("Emission"), emission_exps_dims); + ctx->SetOutputDim(framework::GradVarName("Transition"), + transition_exps_dims); + } }; template @@ -334,6 +390,134 @@ class LinearChainCrfGradOpKernel void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), "This kernel only runs on CPU."); + auto* ll_grad = + ctx.Input(framework::GradVarName("LogLikelihood")); + auto* label = ctx.Input("Label"); + auto* emission_exps = ctx.Input("EmissionExps"); + auto* transition_exps = ctx.Input("TransitionExps"); + auto* alpha = ctx.Input("Alpha"); + + auto* emission_grad = + ctx.Output(framework::GradVarName("Emission")); + emission_grad->mutable_data(platform::CPUPlace()); + + auto* trans_grad = ctx.Output(framework::GradVarName("Transition")); + if (trans_grad) trans_grad->mutable_data(platform::CPUPlace()); + + auto emission_dims = emission_exps->dims(); + + // Beta is the memo table used in dynamic programming to calculate the + // backwark vectors. For a backward vector i (the i-th row of beta), it + // captures the unnormalized probabilities of partial sequences starting at + // position i. + Tensor beta; + beta.mutable_data(emission_dims, platform::CPUPlace()); + + auto place = ctx.GetEigenDevice(); + auto x_grad = EigenMatrix::From(*emission_grad); + auto out_grad = EigenMatrix::From(*ll_grad); + x_grad.device(place) = + x_grad * out_grad.broadcast(Eigen::DSizes(1, emission_dims[1])); + + const size_t level = 0; // currently, only support sequence. + auto lod = emission_exps->lod(); + for (size_t i = 0; i < lod[level].size() - 1; ++i) { + int start_pos = static_cast(lod[level][i]); + int end_pos = static_cast(lod[level][i + 1]); + + const Tensor one_seq_emission_exps = + emission_exps->Slice(start_pos, end_pos); + const Tensor one_seq_label = label->Slice(start_pos, end_pos); + const Tensor one_seq_alpha = alpha->Slice(start_pos, end_pos); + Tensor one_seq_beta = beta.Slice(start_pos, end_pos); + Tensor one_seq_emission_grad = + emission_grad->Slice(start_pos, end_pos); + + BackwardOneSequence(ctx.device_context(), &one_seq_emission_exps, + transition_exps, &one_seq_alpha, &one_seq_label, + &one_seq_beta, trans_grad, &one_seq_emission_grad); + } + } + + protected: + void BackwardOneSequence(const platform::DeviceContext& ctx, + const Tensor* emission_exps, + const Tensor* transition_exps, const Tensor* alpha, + const Tensor* label, Tensor* beta, + Tensor* transition_grad, + Tensor* emission_grad) const { + const T* w_exps = transition_exps->data(); + const T* x_exps = emission_exps->data(); + const int* label_value = label->data(); + T* beta_value = beta->data(); + + auto x_dims = emission_exps->dims(); + const size_t seq_length = x_dims[0]; + const size_t tag_num = x_dims[1]; + const size_t state_trans_base_idx = 2; + + // Calculate the backwark vectors beta. + for (int i = 0; i < tag_num; ++i) + beta_value[(seq_length - 1) * tag_num + i] = w_exps[tag_num + i]; + NormalizeL1(beta_value + (seq_length - 1) * tag_num, tag_num); + + for (int k = seq_length - 2; k >= 0; --k) { + for (int i = 0; i < tag_num; ++i) { + T sum = 0.; + for (int j = 0; j < tag_num; ++j) { + sum += x_exps[(i + state_trans_base_idx) * tag_num + j] * + beta_value[(k + 1) * tag_num + j] * + x_exps[(k + 1) * tag_num + j]; + } + beta_value[k * tag_num + i] = sum; + } + NormalizeL1(beta_value + k * tag_num, tag_num); + } + + auto alpha_mat = EigenMatrix::From(*alpha); + auto beta_mat = EigenMatrix::From(*beta); + auto x_grad_mat = EigenMatrix::From(*emission_grad); + + auto* place = ctx.GetEigenDevice(); + x_grad_mat.device(*place) = alpha_mat * beta_mat; + x_grad_mat /= x_grad_mat.sum(Eigen::DSizes(1)) + .reshape(Eigen::DSizes(seq_length, 1)) + .broadcast(Eigen::DSizes(1, tag_num)); + + for (int k = 0; k < seq_length; ++k) + x_grad_mat(k, label_value[k]) -= static_cast(1); + + if (transition_grad) { + T* trans_grad = transition_grad->data(); + for (size_t k = 0; k < tag_num; ++k) { + trans_grad[k] += x_grad_mat(/*from start state*/ 0, k); + trans_grad[tag_num + k] += + x_grad_mat(/*to end state*/ seq_length - 1, k); + } + + auto x_exps_mat = EigenMatrix::From(*emission_exps); + beta_mat = beta_mat * x_exps_mat; + beta_mat /= beta_mat.sum(Eigen::DSizes(1)) + .reshape(Eigen::DSizes(seq_length, 1)) + .broadcast(Eigen::DSizes(1, tag_num)); + + for (int k = 1; k < seq_length; ++k) { + T sum = 0.; + for (int i = 0; i < tag_num; ++i) { + for (int j = 0; j < tag_num; ++j) + sum += x_exps_mat(i, j) * alpha_mat(k - 1, i) * beta_mat(k, j); + } + sum = static_cast(1) / sum; + for (int i = 0; i < tag_num; ++i) { + for (int j = 0; j < tag_num; ++j) { + trans_grad[(i + 2) * tag_num + j] += + sum * x_exps_mat(i, j) * alpha_mat(k - 1, i) * beta_mat(k, j); + } + } + trans_grad[label_value[k - 1] * tag_num + label_value[k]] -= + static_cast(1); + } + } } }; diff --git a/paddle/operators/linear_chain_crf_op.h b/paddle/operators/linear_chain_crf_op.h index a656e233c2..e9852de595 100644 --- a/paddle/operators/linear_chain_crf_op.h +++ b/paddle/operators/linear_chain_crf_op.h @@ -30,20 +30,24 @@ class LinearChainCrfOpKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override; protected: - T ForwardOneSequence(const platform::DeviceContext& ctx, - const Tensor& emission, Tensor& emission_row_max, - Tensor& emission_exps, const Tensor& trans_weights, - Tensor& trans_weight_exps, const Tensor& label, - Tensor& a) const; - - private: - T NormalizeL1(T* x, size_t len) const; + T ForwardOneSequence(const Tensor* emission, const Tensor* emission_row_max, + const Tensor* emission_exps, const Tensor* trans_weights, + const Tensor* trans_weight_exps, const Tensor* label, + Tensor* alpha) const; }; template class LinearChainCrfGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override; + + protected: + void BackwardOneSequence(const platform::DeviceContext& ctx, + const Tensor* emission_exps, + const Tensor* transition_exps, const Tensor* alpha, + const Tensor* label, Tensor* beta, + Tensor* transition_grad, + Tensor* emission_grad) const; }; } // namespace operators diff --git a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py index 413210e75b..9b73e26eb9 100644 --- a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py +++ b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py @@ -4,10 +4,12 @@ import numpy as np from op_test import OpTest +import pdb + class LinearChainCrfForward(object): - def __init__(self, seq_start_positions, emission_weights, - transition_weights, labels): + def __init__(self, seq_start_positions, emission_weights, emission_row_max, + emission_exps, transition_weights, transition_exps, labels): self.tag_num = emission_weights.shape[1] self.seq_num = len(seq_start_positions) - 1 @@ -15,25 +17,25 @@ class LinearChainCrfForward(object): self.labels = labels self.x = emission_weights - self.x_row_max = np.amax(self.x, axis=1, keepdims=True) - self.x_exps = np.exp(self.x - self.x_row_max) + self.x_row_max = emission_row_max + self.x_exps = emission_exps # unnormalized logits of the transition weights for the start mark. self.a = transition_weights[0, :] - self.a_exps = np.exp(self.a) + self.a_exps = transition_exps[0, :] # unnormalized logits of the transition weights for the end mark. self.b = transition_weights[1, :] - self.b_exps = np.exp(self.b) + self.b_exps = transition_exps[1, :] # unnormalized logits of the transition weights for all the other tags. self.w = transition_weights[2:, :] - self.w_exps = np.exp(self.w) + self.w_exps = transition_exps[2:, :] # The output of linear chain crf operator. # alpha is a memo table in dynamic programming to caculate # nomalization factor. self.alpha = np.zeros( (seq_start_positions[-1], self.tag_num), dtype="float32") - self.log_likelihood = np.zeros((self.tag_num, 1)) + self.log_likelihood = np.zeros((self.seq_num, 1)) def _l1_norm(self, x): s = np.sum(x) @@ -91,11 +93,15 @@ class TestLinearChainCrfOp(OpTest): lod = [[0]] for i in range(SEQ_NUM): lod[-1].append(lod[-1][-1] + random.randint(1, MAX_SEQ_LEN)) - emission = np.random.uniform(-1, 1, [lod[-1][-1], TAG_NUM]).astype("float32") + emission_row_max = np.amax(emission, axis=1, keepdims=True) + emission_exps = np.exp(emission - emission_row_max) + transition = np.random.uniform(-0.5, 0.5, [TAG_NUM + 2, TAG_NUM]).astype("float32") + transition_exps = np.exp(transition) + labels = np.random.randint( low=0, high=TAG_NUM, size=(lod[-1][-1], 1), dtype="int32") @@ -105,10 +111,17 @@ class TestLinearChainCrfOp(OpTest): "Label": (labels, lod) } - crf = LinearChainCrfForward(lod[0], emission, transition, labels) + crf = LinearChainCrfForward(lod[0], emission, emission_row_max, + emission_exps, transition, transition_exps, + labels) alpha, log_likelihood = crf.crf_forward_compute() - self.outputs = {"Alpha": alpha, "LogLikelihood": log_likelihood} + self.outputs = { + "Alpha": alpha, + "EmissionExps": emission_exps, + "TransitionExps": transition_exps, + "LogLikelihood": log_likelihood + } def setUp(self): self.op_type = "linear_chain_crf" @@ -117,6 +130,13 @@ class TestLinearChainCrfOp(OpTest): def test_check_output(self): self.check_output() + def test_check_grad(self): + self.check_grad(["Emission", "Transition"], "LogLikelihood") + + def test_check_grad_ignore_transition(self): + self.check_grad( + ["Emission"], "LogLikelihood", no_grad_set=set("Transition")) + if __name__ == "__main__": unittest.main() From 427644b2fa01e6a44b6d3bc0b4d2fcc8ba8b6265 Mon Sep 17 00:00:00 2001 From: caoying03 Date: Mon, 23 Oct 2017 10:07:12 +0800 Subject: [PATCH 06/35] fix the computation kernels. --- paddle/framework/operator.h | 2 +- paddle/operators/linear_chain_crf_op.cc | 122 +++++++++++------- paddle/operators/linear_chain_crf_op.h | 2 +- .../tests/test_linear_chain_crf_op.py | 15 +-- 4 files changed, 84 insertions(+), 57 deletions(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 0d0304ac9e..e9cf2f97e0 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -659,7 +659,7 @@ class OperatorWithKernel : public OperatorBase { if (t != nullptr) { int tmp = static_cast(ToDataType(t->type())); PADDLE_ENFORCE(tmp == data_type || data_type == -1, - "DataType of Paddle Op must be same."); + "DataType of Paddle Op must be the same."); data_type = tmp; } } diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc index 268b1c41db..12034d7d6e 100644 --- a/paddle/operators/linear_chain_crf_op.cc +++ b/paddle/operators/linear_chain_crf_op.cc @@ -165,11 +165,11 @@ class LinearChainCrfOp : public framework::OperatorWithKernel { "Output(LogLikelihood) should be not null."); auto emission_dims = ctx->GetInputDim("Emission"); - auto transition_dims = ctx->GetInputDim("Transition"); - auto label_dims = ctx->GetInputDim("Label"); - PADDLE_ENFORCE_EQ(emission_dims.size(), 2UL, "The Input(Emission) should be a 2-D tensor."); + PADDLE_ENFORCE(emission_dims[0], "An empty mini-batch is not allowed."); + + auto transition_dims = ctx->GetInputDim("Transition"); PADDLE_ENFORCE_EQ(transition_dims.size(), 2UL, "The Input(Transition) should be a 2-D tensor."); PADDLE_ENFORCE_EQ( @@ -180,6 +180,8 @@ class LinearChainCrfOp : public framework::OperatorWithKernel { emission_dims[1], transition_dims[1], "The 2nd dimension of the Input(Emission) and the Input(Transition) " "should be equal to the tag number."); + + auto label_dims = ctx->GetInputDim("Label"); PADDLE_ENFORCE(label_dims.size() == 2UL && label_dims[1] == 1UL, "The Input(Label) should be a 2-D tensor with the 2nd " "dimensions fixed to 1."); @@ -204,7 +206,7 @@ class LinearChainCrfOp : public framework::OperatorWithKernel { // operator is determined by its input "Emission". framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { - return framework::ToDataType(ctx.Input("Emission")->type()); + return framework::ToDataType(ctx.Input("Emission")->type()); } }; @@ -224,6 +226,8 @@ class LinearChainCrfOpKernel auto* label = ctx.Input("Label"); auto in_lod = emission_weights->lod(); + PADDLE_ENFORCE(in_lod.size(), "Input(Emission) is not a sequence."); + // TODO(caoying) The checks related to LoD information should be // moved into InferShape once after the InferShape is refactored. PADDLE_ENFORCE_EQ(emission_weights->NumLevels(), 1UL, @@ -266,12 +270,17 @@ class LinearChainCrfOpKernel for (size_t i = 0; i < seq_num; ++i) { int start_pos = static_cast(in_lod[level][i]); int end_pos = static_cast(in_lod[level][i + 1]); + if (end_pos == start_pos) { + // If an empty input sequence is given, pad 0 for its cost. + log_likelihood[i] = static_cast(0.); + continue; + } - const Tensor one_seq = emission_weights->Slice(start_pos, end_pos); - Tensor one_seq_row_max = emission_row_max.Slice(start_pos, end_pos); - Tensor one_seq_exps = emission_exps->Slice(start_pos, end_pos); - const Tensor one_seq_label = label->Slice(start_pos, end_pos); - Tensor one_seq_alpha = alpha->Slice(start_pos, end_pos); + const Tensor one_seq = emission_weights->Slice(start_pos, end_pos); + Tensor one_seq_row_max = emission_row_max.Slice(start_pos, end_pos); + Tensor one_seq_exps = emission_exps->Slice(start_pos, end_pos); + const Tensor one_seq_label = label->Slice(start_pos, end_pos); + Tensor one_seq_alpha = alpha->Slice(start_pos, end_pos); log_likelihood[i] = ForwardOneSequence( &one_seq, &one_seq_row_max, &one_seq_exps, transition_weights, @@ -306,7 +315,7 @@ class LinearChainCrfOpKernel for (size_t k = 1; k < seq_length; ++k) { for (size_t i = 0; i < tag_num; ++i) { - T sum = 0.; + T sum = static_cast(0.); for (size_t j = 0; j < tag_num; ++j) { sum += alpha_value[(k - 1) * tag_num + j] * w_exps[(j + state_trans_base_idx) * tag_num + i]; @@ -326,11 +335,14 @@ class LinearChainCrfOpKernel PADDLE_ENFORCE_LT( *std::max_element(lbl, lbl + seq_length), tag_num, "An invalid tag label that execesses the largest tag number."); + // Calculate the nominator part, which depends on the label sequence. ll += w[lbl[0]] /*start transition*/ + x[lbl[0]] + w[tag_num + lbl[seq_length - 1]] /*end transition*/; - for (size_t k = 1; k < seq_length; ++k) - ll += x[k * tag_num + lbl[k]] + w[lbl[k - 1] * tag_num + lbl[k]]; + for (size_t k = 1; k < seq_length; ++k) { + ll += x[k * tag_num + lbl[k]] + + w[(lbl[k - 1] + state_trans_base_idx) * tag_num + lbl[k]]; + } return -ll; } }; @@ -353,12 +365,13 @@ class LinearChainCrfGradOp : public framework::OperatorWithKernel { "Output(Transition@GRAD) should be not null."); auto emission_exps_dims = ctx->GetInputDim("EmissionExps"); - auto transition_exps_dims = - ctx->GetInputDim(framework::GradVarName("TransitionExps")); - auto label_dims = ctx->GetInputDim("Label"); - PADDLE_ENFORCE_EQ(emission_exps_dims.size(), 2UL, "The Input(EmissionExps) should be a 2-D tensor."); + PADDLE_ENFORCE(emission_exps_dims[0], + "An empty mini-batch is not allowed."); + + auto transition_exps_dims = + ctx->GetInputDim(framework::GradVarName("TransitionExps")); PADDLE_ENFORCE_EQ(transition_exps_dims.size(), 2UL, "The Input(TransitionExps) should be a 2-D tensor."); PADDLE_ENFORCE_EQ( @@ -369,6 +382,8 @@ class LinearChainCrfGradOp : public framework::OperatorWithKernel { emission_exps_dims[1], transition_exps_dims[1], "The 2nd dimension of the Input(EmissionExps) and the " "Input(TransitionExps) should be equal to the tag number."); + + auto label_dims = ctx->GetInputDim("Label"); PADDLE_ENFORCE(label_dims.size() == 2UL && label_dims[1] == 1UL, "The Input(Label) should be a 2-D tensor with the 2nd " "dimensions fixed to 1."); @@ -381,6 +396,14 @@ class LinearChainCrfGradOp : public framework::OperatorWithKernel { ctx->SetOutputDim(framework::GradVarName("Transition"), transition_exps_dims); } + + protected: + // Explicitly set that the data type of output of the linear_chain_crf_grad + // operator is determined by its input "EmissionExps". + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType(ctx.Input("EmissionExps")->type()); + } }; template @@ -390,12 +413,12 @@ class LinearChainCrfGradOpKernel void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), "This kernel only runs on CPU."); - auto* ll_grad = - ctx.Input(framework::GradVarName("LogLikelihood")); auto* label = ctx.Input("Label"); auto* emission_exps = ctx.Input("EmissionExps"); auto* transition_exps = ctx.Input("TransitionExps"); - auto* alpha = ctx.Input("Alpha"); + auto* alpha = ctx.Input("Alpha"); + const T* ll_grad = + ctx.Input(framework::GradVarName("LogLikelihood"))->data(); auto* emission_grad = ctx.Output(framework::GradVarName("Emission")); @@ -413,34 +436,31 @@ class LinearChainCrfGradOpKernel Tensor beta; beta.mutable_data(emission_dims, platform::CPUPlace()); - auto place = ctx.GetEigenDevice(); - auto x_grad = EigenMatrix::From(*emission_grad); - auto out_grad = EigenMatrix::From(*ll_grad); - x_grad.device(place) = - x_grad * out_grad.broadcast(Eigen::DSizes(1, emission_dims[1])); - const size_t level = 0; // currently, only support sequence. - auto lod = emission_exps->lod(); + auto lod = label->lod(); + PADDLE_ENFORCE(lod.size(), "Input(Label) is not a sequence."); + for (size_t i = 0; i < lod[level].size() - 1; ++i) { int start_pos = static_cast(lod[level][i]); int end_pos = static_cast(lod[level][i + 1]); + if (end_pos == start_pos) continue; const Tensor one_seq_emission_exps = - emission_exps->Slice(start_pos, end_pos); - const Tensor one_seq_label = label->Slice(start_pos, end_pos); - const Tensor one_seq_alpha = alpha->Slice(start_pos, end_pos); - Tensor one_seq_beta = beta.Slice(start_pos, end_pos); - Tensor one_seq_emission_grad = - emission_grad->Slice(start_pos, end_pos); - - BackwardOneSequence(ctx.device_context(), &one_seq_emission_exps, - transition_exps, &one_seq_alpha, &one_seq_label, - &one_seq_beta, trans_grad, &one_seq_emission_grad); + emission_exps->Slice(start_pos, end_pos); + const Tensor one_seq_label = label->Slice(start_pos, end_pos); + const Tensor one_seq_alpha = alpha->Slice(start_pos, end_pos); + Tensor one_seq_beta = beta.Slice(start_pos, end_pos); + Tensor one_seq_emission_grad = emission_grad->Slice(start_pos, end_pos); + + BackwardOneSequence(ctx.device_context(), ll_grad[i], + &one_seq_emission_exps, transition_exps, + &one_seq_alpha, &one_seq_label, &one_seq_beta, + trans_grad, &one_seq_emission_grad); } } protected: - void BackwardOneSequence(const platform::DeviceContext& ctx, + void BackwardOneSequence(const platform::DeviceContext& ctx, const T ll_grad, const Tensor* emission_exps, const Tensor* transition_exps, const Tensor* alpha, const Tensor* label, Tensor* beta, @@ -457,12 +477,15 @@ class LinearChainCrfGradOpKernel const size_t state_trans_base_idx = 2; // Calculate the backwark vectors beta. - for (int i = 0; i < tag_num; ++i) + // First, calculate the initialition state. + for (int i = 0; i < tag_num; ++i) { beta_value[(seq_length - 1) * tag_num + i] = w_exps[tag_num + i]; + } NormalizeL1(beta_value + (seq_length - 1) * tag_num, tag_num); + for (int k = seq_length - 2; k >= 0; --k) { for (int i = 0; i < tag_num; ++i) { - T sum = 0.; + T sum = static_cast(0.); for (int j = 0; j < tag_num; ++j) { sum += w_exps[(i + state_trans_base_idx) * tag_num + j] * x_exps[(k + 1) * tag_num + j] * @@ -476,6 +499,7 @@ class LinearChainCrfGradOpKernel auto alpha_mat = EigenMatrix::From(*alpha); auto beta_mat = EigenMatrix::From(*beta); auto x_grad_mat = EigenMatrix::From(*emission_grad); + x_grad_mat.setConstant(ll_grad); auto* place = ctx.GetEigenDevice(); x_grad_mat.device(*place) = alpha_mat * beta_mat; @@ -483,8 +507,9 @@ class LinearChainCrfGradOpKernel .reshape(Eigen::DSizes(seq_length, 1)) .broadcast(Eigen::DSizes(1, tag_num)); - for (int k = 0; k < seq_length; ++k) + for (int k = 0; k < seq_length; ++k) { x_grad_mat(k, label_value[k]) -= static_cast(1); + } if (transition_grad) { T* trans_grad = transition_grad->data(); @@ -501,20 +526,23 @@ class LinearChainCrfGradOpKernel .broadcast(Eigen::DSizes(1, tag_num)); for (int k = 1; k < seq_length; ++k) { - T sum = 0.; + T sum = static_cast(0.); for (int i = 0; i < tag_num; ++i) { - for (int j = 0; j < tag_num; ++j) - sum += x_exps_mat(i, j) * alpha_mat(k - 1, i) * beta_mat(k, j); + for (int j = 0; j < tag_num; ++j) { + sum += w_exps[(i + state_trans_base_idx) * tag_num + j] * + alpha_mat(k - 1, i) * beta_mat(k, j); + } } - sum = static_cast(1) / sum; + sum = static_cast(1.) / sum; for (int i = 0; i < tag_num; ++i) { for (int j = 0; j < tag_num; ++j) { - trans_grad[(i + 2) * tag_num + j] += - sum * x_exps_mat(i, j) * alpha_mat(k - 1, i) * beta_mat(k, j); + trans_grad[(i + state_trans_base_idx) * tag_num + j] += + sum * w_exps[(i + state_trans_base_idx) * tag_num + j] * + alpha_mat(k - 1, i) * beta_mat(k, j); } } trans_grad[label_value[k - 1] * tag_num + label_value[k]] -= - static_cast(1); + static_cast(1.); } } } diff --git a/paddle/operators/linear_chain_crf_op.h b/paddle/operators/linear_chain_crf_op.h index e9852de595..f65d268bb6 100644 --- a/paddle/operators/linear_chain_crf_op.h +++ b/paddle/operators/linear_chain_crf_op.h @@ -42,7 +42,7 @@ class LinearChainCrfGradOpKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override; protected: - void BackwardOneSequence(const platform::DeviceContext& ctx, + void BackwardOneSequence(const platform::DeviceContext& ctx, const T ll_grad, const Tensor* emission_exps, const Tensor* transition_exps, const Tensor* alpha, const Tensor* label, Tensor* beta, diff --git a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py index 9b73e26eb9..0f169ada95 100644 --- a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py +++ b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py @@ -4,8 +4,6 @@ import numpy as np from op_test import OpTest -import pdb - class LinearChainCrfForward(object): def __init__(self, seq_start_positions, emission_weights, emission_row_max, @@ -65,10 +63,10 @@ class LinearChainCrfForward(object): # calculate the nominator part. log_likelihood += ( - self.a[label[0]] + self.x[0, label[0]] + self.b[label[-1]]) + self.a[label[0]] + x[0, label[0]] + self.b[label[-1]]) + for k in range(1, seq_len): - log_likelihood += ( - self.x[k, label[k]] + self.w[label[k - 1], label[k]]) + log_likelihood += (x[k, label[k]] + self.w[label[k - 1], label[k]]) return -log_likelihood def crf_forward_compute(self): @@ -77,7 +75,7 @@ class LinearChainCrfForward(object): end = self.seq_start_positions[i + 1] self.log_likelihood[i] = self._forward_a_sequence( - self.x[start:end], self.x_row_max[start:end, :], + self.x[start:end, :], self.x_row_max[start:end, :], self.x_exps[start:end, :], self.labels[start:end, :], self.alpha[start:end, :]) return self.alpha, self.log_likelihood @@ -85,10 +83,11 @@ class LinearChainCrfForward(object): class TestLinearChainCrfOp(OpTest): def set_test_data(self): - SEQ_NUM = 3 + SEQ_NUM = 2 TAG_NUM = 17 - MAX_SEQ_LEN = 13 + MAX_SEQ_LEN = 5 + random.seed(1) # the linear_chain_crf operator only supports sequence (LoD level = 1) lod = [[0]] for i in range(SEQ_NUM): From 3d8b6ebcf8700d9f459903c1aba322c909691656 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Tue, 24 Oct 2017 12:50:52 +0800 Subject: [PATCH 07/35] Add LSTM backward implenmentation. --- paddle/operators/lstm_op.cc | 56 ++++--- paddle/operators/lstm_op.h | 214 ++++++++++++++++++++++--- paddle/operators/math/sequence2batch.h | 12 +- 3 files changed, 237 insertions(+), 45 deletions(-) diff --git a/paddle/operators/lstm_op.cc b/paddle/operators/lstm_op.cc index 0a089b7c2d..9cc89c7d99 100644 --- a/paddle/operators/lstm_op.cc +++ b/paddle/operators/lstm_op.cc @@ -21,7 +21,6 @@ class LSTMOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("Input"), "Input(Input) of LSTM should not be null."); @@ -30,8 +29,8 @@ class LSTMOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Cell"), "Output(Cell) of LSTM should not be null."); - auto x_dims = ctx->GetInputDim("Input"); - PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank must be 2."); + auto in_dims = ctx->GetInputDim("Input"); + PADDLE_ENFORCE_EQ(in_dims.size(), 2, "Input(X)'s rank must be 2."); if (ctx->HasInput("H0")) { PADDLE_ENFORCE(ctx->HasInput("C0"), @@ -44,7 +43,7 @@ class LSTMOp : public framework::OperatorWithKernel { "should be the same."); } - int frame_size = x_dims[1] / 4; + int frame_size = in_dims[1] / 4; auto w_dims = ctx->GetInputDim("Weight"); PADDLE_ENFORCE_EQ(w_dims.size(), 2, "The rank of Input(Weight) should be 2."); @@ -71,9 +70,11 @@ class LSTMOp : public framework::OperatorWithKernel { "4 * %d if disable peepholes connection", frame_size); } - ctx->SetOutputDim("Hidden", {x_dims[0], frame_size}); - ctx->SetOutputDim("Cell", {x_dims[0], frame_size}); - ctx->SetOutputDim("BatchGate", x_dims); + framework::DDim out_dims({in_dims[0], frame_size}); + ctx->SetOutputDim("Hidden", out_dims); + ctx->SetOutputDim("Cell", out_dims); + ctx->SetOutputDim("BatchGate", in_dims); + ctx->SetOutputDim("BatchCellPreAct", out_dims); ctx->ShareLoD("Input", "Hidden"); ctx->ShareLoD("Input", "Cell"); } @@ -86,7 +87,7 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Input", "(LoDTensor) the first input is a LodTensor, which support " "variable-time length input sequence. The underlying tensor in " - "this LoDTensor is a matrix with shape (T X 4D), where, T is the " + "this LoDTensor is a matrix with shape (T X 4D), where T is the " "total time steps in this mini-batch, D is the hidden size."); AddInput("H0", "(Tensor, optional) the initial hidden state is an optional " @@ -110,21 +111,25 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { "2. `usePeepholes = True` " " - The shape is (1 x 7D). " " - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}."); + AddOutput("Hidden", + "(LoDTensor) the hidden state lod tensor of LSTM operator. " + "The shape and lod is the same with the `Input`."); + AddOutput("Cell", + "(LoDTensor) the cell state lod tensor of LSTM operator. " + "The shape and lod is the same with the `Input`."); AddOutput("BatchGate", "(LoDTensor) This LoDTensor contains input gate, forget gate " "and output gate after the nonlinear computation. This " "LoDTensor has the same shape with the reorganized input, which " - "was also be called batch input. The LoD size is 2. The first " + "is also be called batch input. The LoD size is 2. The first " "LoD is the batch offsets and the second LoD contains the " "indexes, which denote the position of reorganized sequence " "in the raw input.") .AsIntermediate(); - AddOutput("Hidden", - "(LoDTensor) the hidden state lod tensor of LSTM operator. " - "The shape and lod is the same with the `Input`."); - AddOutput("Cell", - "(LoDTensor) the cell state lod tensor of LSTM operator. " - "The shape and lod is the same with the `Input`."); + AddOutput("BatchCellPreAct", + "(LoDTensor) This LoDTensor is get in the forward and used " + "in the backward.") + .AsIntermediate(); AddAttr("usePeepholes", "(bool, defalut: True) " "whether to enable diagonal/peephole connections.") @@ -202,15 +207,28 @@ class LSTMGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Hidden")), "Input(Hidden@GRAD) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Cell")), "Input(Cell@GRAD) should not be null"); - ctx->SetOutputDim(framework::GradVarName("Weight"), - ctx->GetInputDim("Weight")); - ctx->SetOutputDim(framework::GradVarName("Bias"), ctx->GetInputDim("Bias")); + + ctx->SetOutputDim(framework::GradVarName("Input"), + ctx->GetInputDim("Input")); + if (ctx->HasInput("Weight")) { + ctx->SetOutputDim(framework::GradVarName("Weight"), + ctx->GetInputDim("Weight")); + } + if (ctx->HasInput("Bias")) { + ctx->SetOutputDim(framework::GradVarName("Bias"), + ctx->GetInputDim("Bias")); + } + if (ctx->HasInput("H0")) { + ctx->SetOutputDim(framework::GradVarName("H0"), ctx->GetInputDim("H0")); + } + if (ctx->HasInput("C0")) { + ctx->SetOutputDim(framework::GradVarName("C0"), ctx->GetInputDim("C0")); + } } }; diff --git a/paddle/operators/lstm_op.h b/paddle/operators/lstm_op.h index 0af5694c48..8945a22d7f 100644 --- a/paddle/operators/lstm_op.h +++ b/paddle/operators/lstm_op.h @@ -21,8 +21,9 @@ limitations under the License. */ namespace paddle { namespace operators { -using framework::LoDTensor; -using framework::Tensor; +using LoDTensor = framework::LoDTensor; +using Tensor = framework::Tensor; + template using EigenMatrix = framework::EigenMatrix; @@ -31,15 +32,15 @@ template class LSTMKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* input = ctx.Input("Input"); - auto* weight = ctx.Input("Weight"); - auto* bias = ctx.Input("Bias"); + auto* input = ctx.Input("Input"); + auto* weight = ctx.Input("Weight"); + auto* bias = ctx.Input("Bias"); - auto* batch_gate = ctx.Output("BatchGate"); + auto* batch_gate = ctx.Output("BatchGate"); batch_gate->mutable_data(ctx.GetPlace()); - auto* hidden_out = ctx.Output("Hidden"); + auto* hidden_out = ctx.Output("Hidden"); hidden_out->mutable_data(ctx.GetPlace()); - auto* cell_out = ctx.Output("Cell"); + auto* cell_out = ctx.Output("Cell"); cell_out->mutable_data(ctx.GetPlace()); // Now the function ShareLoD in InferShape is not implemented. @@ -49,7 +50,8 @@ class LSTMKernel : public framework::OpKernel { bool is_reverse = ctx.Attr("isReverse"); math::LoDTensor2BatchFunctor to_batch; - to_batch(ctx.device_context(), *input, *batch_gate, is_reverse); + auto& device_ctx = ctx.device_context(); + to_batch(device_ctx, *input, *batch_gate, true, is_reverse); auto in_dims = input->dims(); int frame_size = static_cast(in_dims[1] / 4); @@ -69,15 +71,23 @@ class LSTMKernel : public framework::OpKernel { } math::LstmMetaValue lstm_value; - T* bias_data = const_cast(bias->data()); - // the code style in LstmMetaValue will be updated later. - lstm_value.checkIg = bias_data + 4 * frame_size; - lstm_value.checkFg = lstm_value.checkIg + frame_size; - lstm_value.checkOg = lstm_value.checkFg + frame_size; + if (bias) { + T* bias_data = const_cast(bias->data()); + // the code style in LstmMetaValue will be updated later. + lstm_value.checkIg = bias_data + 4 * frame_size; + lstm_value.checkFg = lstm_value.checkIg + frame_size; + lstm_value.checkOg = lstm_value.checkFg + frame_size; + } else { + lstm_value.checkIg = nullptr; + lstm_value.checkFg = nullptr; + lstm_value.checkOg = nullptr; + } lstm_value.prevStateValue = nullptr; - framework::LoDTensor batch_out, batch_cell, batch_cell_pre_act; - batch_out.mutable_data(dims, ctx.GetPlace()); + // Use the local variable as here. + LoDTensor batch_hidden, batch_cell; + auto batch_cell_pre_act = *(ctx.Output("BatchCellPreAct")); + batch_hidden.mutable_data(dims, ctx.GetPlace()); batch_cell.mutable_data(dims, ctx.GetPlace()); batch_cell_pre_act.mutable_data(dims, ctx.GetPlace()); @@ -92,7 +102,7 @@ class LSTMKernel : public framework::OpKernel { int bend = static_cast(batch_starts[n + 1]); Tensor gate_t = batch_gate->Slice(bstart, bend); - Tensor out_t = batch_out.Slice(bstart, bend); + Tensor out_t = batch_hidden.Slice(bstart, bend); Tensor cell_t = batch_cell.Slice(bstart, bend); Tensor cell_pre_act_t = batch_cell_pre_act.Slice(bstart, bend); @@ -101,9 +111,9 @@ class LSTMKernel : public framework::OpKernel { if (n != 0) { int pre_h_start = static_cast(batch_starts[n - 1]); int pre_h_end = pre_h_start + cur_batch_size; - auto pre_hidden_t = batch_out.Slice(pre_h_start, pre_h_end); - math::matmul(ctx.device_context(), pre_hidden_t, false, - *weight, false, static_cast(1.0), &gate_t, + auto pre_hidden_t = batch_hidden.Slice(pre_h_start, pre_h_end); + math::matmul(device_ctx, pre_hidden_t, false, *weight, false, + static_cast(1.0), &gate_t, static_cast(1.0)); } // else if : FIXME support the initial hidden and cell @@ -112,27 +122,181 @@ class LSTMKernel : public framework::OpKernel { lstm_value.outputValue = out_t.data(); lstm_value.stateValue = cell_t.data(); lstm_value.stateActiveValue = cell_pre_act_t.data(); - math::LstmUnitFunctor::compute(ctx.device_context(), lstm_value, + math::LstmUnitFunctor::compute(device_ctx, lstm_value, frame_size, cur_batch_size, gate_act, cell_act, cand_act); lstm_value.prevStateValue = lstm_value.stateValue; } math::Batch2LoDTensorFunctor to_seq; - batch_out.set_lod(batch_gate->lod()); + batch_hidden.set_lod(batch_gate->lod()); // restore the output hidden in LoDTensor from the batch hidden - to_seq(ctx.device_context(), batch_out, *hidden_out); + to_seq(device_ctx, batch_hidden, *hidden_out); batch_cell.set_lod(batch_gate->lod()); // restore the output cell state in LoDTensor from the batch cell - to_seq(ctx.device_context(), batch_cell, *cell_out); + to_seq(device_ctx, batch_cell, *cell_out); } }; template class LSTMGradKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override {} + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("Input"); + auto* weight = ctx.Input("Weight"); + auto* bias = ctx.Input("Bias"); + + auto* hidden_out = ctx.Input("Hidden"); + auto* cell_out = ctx.Input("Cell"); + + auto* batch_gate = ctx.Input("BatchGate"); + auto* batch_cell_pre_act = ctx.Input("BatchCellPreAct"); + + auto* hidden_g = ctx.Input(framework::GradVarName("Hidden")); + auto* cell_g = ctx.Input(framework::GradVarName("Cell")); + + auto* in_g = ctx.Output(framework::GradVarName("Input")); + auto* weight_g = ctx.Output(framework::GradVarName("Weight")); + auto* bias_g = ctx.Output(framework::GradVarName("Bias")); + + auto& device_ctx = ctx.device_context(); + if (weight_g) { + math::SetConstant zero; + zero(device_ctx, weight_g, static_cast(0.0)); + } + + auto in_dims = input->dims(); + auto out_dims = hidden_g->dims(); + int frame_size = static_cast(in_dims[1] / 4); + PADDLE_ENFORCE_EQ(frame_size, out_dims[1]); + + math::LstmMetaValue lstm_value; + if (bias) { + T* bias_data = const_cast(bias->data()); + lstm_value.checkIg = bias_data + 4 * frame_size; + lstm_value.checkFg = lstm_value.checkIg + frame_size; + lstm_value.checkOg = lstm_value.checkFg + frame_size; + } else { + lstm_value.checkIg = nullptr; + lstm_value.checkFg = nullptr; + lstm_value.checkOg = nullptr; + } + + math::LstmMetaGrad lstm_grad; + if (bias && bias_g) { + T* bias_g_data = const_cast(bias_g->mutable_data(ctx.GetPlace())); + lstm_grad.checkIgGrad = bias_g_data + 4 * frame_size; + lstm_grad.checkFgGrad = lstm_grad.checkIgGrad + frame_size; + lstm_grad.checkOgGrad = lstm_grad.checkFgGrad + frame_size; + } else { + lstm_grad.checkIgGrad = nullptr; + lstm_grad.checkFgGrad = nullptr; + lstm_grad.checkOgGrad = nullptr; + } + + math::LoDTensor2BatchFunctor to_batch; + + // use the local variable as here. + LoDTensor batch_hidden; + batch_hidden.mutable_data(out_dims, ctx.GetPlace()); + batch_hidden.set_lod(batch_gate->lod()); + to_batch(device_ctx, *hidden_out, batch_hidden, false); + + LoDTensor batch_hidden_g; + batch_hidden_g.mutable_data(out_dims, ctx.GetPlace()); + batch_hidden_g.set_lod(batch_gate->lod()); + to_batch(device_ctx, *hidden_g, batch_hidden_g, false); + + LoDTensor batch_cell; + batch_cell.mutable_data(out_dims, ctx.GetPlace()); + batch_cell.set_lod(batch_gate->lod()); + to_batch(device_ctx, *cell_out, batch_cell, false); + + LoDTensor batch_cell_g; + batch_cell_g.mutable_data(out_dims, ctx.GetPlace()); + batch_cell_g.set_lod(batch_gate->lod()); + to_batch(device_ctx, *cell_g, batch_cell_g, false); + + LoDTensor batch_gate_g; + batch_gate_g.mutable_data(batch_gate->dims(), ctx.GetPlace()); + batch_gate_g.set_lod(batch_gate->lod()); + + auto gate_act = ctx.Attr("gateActivation"); + auto cell_act = ctx.Attr("cellActivation"); + auto cand_act = ctx.Attr("candidateActivation"); + + auto batch_starts = batch_gate->lod()[0]; + size_t num_batch = batch_starts.size() - 1; + for (int n = static_cast(num_batch); n >= 0; n--) { + int bstart = static_cast(batch_starts[n]); + int bend = static_cast(batch_starts[n + 1]); + + Tensor gate = batch_gate->Slice(bstart, bend); + Tensor cell = batch_cell.Slice(bstart, bend); + Tensor cell_pre_act = batch_cell_pre_act->Slice(bstart, bend); + lstm_value.gateValue = gate.data(); + lstm_value.stateValue = cell.data(); + lstm_value.stateActiveValue = cell_pre_act.data(); + + Tensor out_g = batch_hidden_g.Slice(bstart, bend); + Tensor gate_g = batch_gate_g.Slice(bstart, bend); + Tensor cell_g = batch_cell_g.Slice(bstart, bend); + lstm_grad.stateGrad = cell_g.data(); + lstm_grad.gateGrad = gate_g.data(); + lstm_grad.outputGrad = out_g.data(); + + if (n != 0) { + int bstart_pre = static_cast(batch_starts[n - 1]); + Tensor cell_pre = batch_cell.Slice(bstart_pre, bstart); + Tensor cell_pre_g = batch_cell_g.Slice(bstart_pre, bstart); + lstm_value.prevStateValue = cell_pre.data(); + lstm_grad.prevStateGrad = cell_pre_g.data(); + } else { + lstm_value.prevStateValue = nullptr; + lstm_grad.prevStateGrad = nullptr; + } + + int cur_batch_size = bend - bstart; + math::LstmUnitGradFunctor::compute( + device_ctx, lstm_value, lstm_grad, frame_size, cur_batch_size, + gate_act, cell_act, cand_act); + + if (n != 0) { + int pre_h_start = static_cast(batch_starts[n - 1]); + int pre_h_end = pre_h_start + cur_batch_size; + auto pre_hidden_g = batch_hidden_g.Slice(pre_h_start, pre_h_end); + math::matmul(device_ctx, gate_g, false, *weight, true, + static_cast(1.0), &pre_hidden_g, + static_cast(1.0)); + if (weight_g) { + /* backward weight */ + auto pre_hidden = batch_hidden.Slice(pre_h_start, pre_h_end); + math::matmul(device_ctx, pre_hidden, true, gate_g, false, + static_cast(1.0), weight_g, + static_cast(1.0)); + } + } + } + + math::Batch2LoDTensorFunctor to_seq; + if (in_g) { + /* backward data */ + to_seq(device_ctx, batch_gate_g, *in_g); + } + if (bias && bias_g) { + /* backward bias */ + bias_g->mutable_data(ctx.GetPlace()); + auto bias_g_e = EigenMatrix::From(*bias_g); + auto gate_g_e = EigenMatrix::From(batch_gate_g); + Eigen::array extents({{1, 4 * frame_size}}); + Eigen::array offsets({{0, 0}}); + auto bg = bias_g_e.slice(offsets, extents) + .reshape(Eigen::array({{1, frame_size * 4}})); + bg.device(ctx.GetEigenDevice()) = + gate_g_e.sum(Eigen::array({{0}})); + } + } }; } // namespace operators diff --git a/paddle/operators/math/sequence2batch.h b/paddle/operators/math/sequence2batch.h index 03cd018e46..47a0f18496 100644 --- a/paddle/operators/math/sequence2batch.h +++ b/paddle/operators/math/sequence2batch.h @@ -53,7 +53,17 @@ class LoDTensor2BatchFunctor { public: void operator()(const platform::DeviceContext& context, const framework::LoDTensor& lod_tensor, - framework::LoDTensor& batch, bool is_reverse) const { + framework::LoDTensor& batch, bool is_cal_batch_lod, + bool is_reverse = false) const { + if (!is_cal_batch_lod) { + auto lods = batch.lod(); + PADDLE_ENFORCE_EQ(lods.size(), 2UL); + PADDLE_ENFORCE_EQ(lods[1].size(), lod_tensor.dims()[1]); + CopyMatrixRowsFunctor to_batch; + to_batch(context, lod_tensor, lods[1].data(), batch, true); + return; + } + auto lods = lod_tensor.lod(); PADDLE_ENFORCE_EQ(lods.size(), 1UL, "Only support one level sequence now."); auto lod = lods[0]; From c74107bfdc690d20315a978feb8bb9527b4b3ea3 Mon Sep 17 00:00:00 2001 From: caoying03 Date: Tue, 24 Oct 2017 19:52:42 +0800 Subject: [PATCH 08/35] fix backward computation. --- paddle/gserver/layers/CRFLayer.cpp | 6 +- paddle/gserver/layers/LinearChainCRF.cpp | 1 - paddle/operators/linear_chain_crf_op.cc | 77 ++++++++++--------- .../tests/test_linear_chain_crf_op.py | 14 ++-- 4 files changed, 54 insertions(+), 44 deletions(-) diff --git a/paddle/gserver/layers/CRFLayer.cpp b/paddle/gserver/layers/CRFLayer.cpp index 0b54442009..867303b4fa 100644 --- a/paddle/gserver/layers/CRFLayer.cpp +++ b/paddle/gserver/layers/CRFLayer.cpp @@ -101,8 +101,10 @@ void CRFLayer::backward(const UpdateCallback& callback) { : real(1.0f); instanceWeight *= coeff_; - MatrixPtr grad = output.grad->subRowMatrix(starts[i], starts[i + 1]); - grad->add(*crfs_[i].getXGrad(), real(1.0f), instanceWeight); + if (output.grad) { + MatrixPtr grad = output.grad->subRowMatrix(starts[i], starts[i + 1]); + grad->add(*crfs_[i].getXGrad(), real(1.0f), instanceWeight); + } if (needWGrad) { weight_->getWGrad()->add( *crfs_[i].getWGrad(), real(1.0f), instanceWeight); diff --git a/paddle/gserver/layers/LinearChainCRF.cpp b/paddle/gserver/layers/LinearChainCRF.cpp index dc3dc15679..abaa1802b7 100644 --- a/paddle/gserver/layers/LinearChainCRF.cpp +++ b/paddle/gserver/layers/LinearChainCRF.cpp @@ -102,7 +102,6 @@ real LinearChainCRF::forward(real* x, int* s, int length) { } void LinearChainCRF::backward(real* x, int* s, int length, bool needWGrad) { - MatrixPtr matX = Matrix::create(x, length, numClasses_); Matrix::resizeOrCreate(matGrad_, length, numClasses_); Matrix::resizeOrCreate(beta_, length, numClasses_); real* b = b_->getData(); diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc index 62201dccb9..d13d4829d9 100644 --- a/paddle/operators/linear_chain_crf_op.cc +++ b/paddle/operators/linear_chain_crf_op.cc @@ -272,7 +272,7 @@ class LinearChainCrfOpKernel int end_pos = static_cast(in_lod[level][i + 1]); if (end_pos == start_pos) { // If an empty input sequence is given, pad 0 for its cost. - log_likelihood[i] = static_cast(0.); + log_likelihood[i] = 0.; continue; } @@ -305,7 +305,7 @@ class LinearChainCrfOpKernel const size_t tag_num = x_dims[1]; // The 1st row of w are transition weights for start mask. // The 2nd row of w are transition weights for end mask. - // Transition weights among other tags begins from the 3rd row of w. + // Transition weights among other tags begin from the 3rd row of w. const size_t state_trans_base_idx = 2; for (size_t i = 0; i < tag_num; ++i) { @@ -315,7 +315,7 @@ class LinearChainCrfOpKernel for (size_t k = 1; k < seq_length; ++k) { for (size_t i = 0; i < tag_num; ++i) { - T sum = static_cast(0.); + T sum = 0.; for (size_t j = 0; j < tag_num; ++j) { sum += alpha_value[(k - 1) * tag_num + j] * w_exps[(j + state_trans_base_idx) * tag_num + i]; @@ -476,17 +476,17 @@ class LinearChainCrfGradOpKernel const size_t tag_num = x_dims[1]; const size_t state_trans_base_idx = 2; - // Calculate the backwark vectors beta. + // Calculate the backward vectors: beta. // First, calculate the initialition state. - for (int i = 0; i < tag_num; ++i) { + for (size_t i = 0; i < tag_num; ++i) { beta_value[(seq_length - 1) * tag_num + i] = w_exps[tag_num + i]; } NormalizeL1(beta_value + (seq_length - 1) * tag_num, tag_num); - for (int k = seq_length - 2; k >= 0; --k) { - for (int i = 0; i < tag_num; ++i) { - T sum = static_cast(0.); - for (int j = 0; j < tag_num; ++j) { + for (int k = static_cast(seq_length) - 2; k >= 0; --k) { + for (size_t i = 0; i < tag_num; ++i) { + T sum = 0.; + for (size_t j = 0; j < tag_num; ++j) { sum += w_exps[(i + state_trans_base_idx) * tag_num + j] * x_exps[(k + 1) * tag_num + j] * beta_value[(k + 1) * tag_num + j]; @@ -500,13 +500,14 @@ class LinearChainCrfGradOpKernel auto beta_mat = EigenMatrix::From(*beta); auto x_grad_mat = EigenMatrix::From(*emission_grad); auto* place = ctx.GetEigenDevice(); - x_grad_mat.device(*place) = alpha_mat * beta_mat; - x_grad_mat /= x_grad_mat.sum(Eigen::DSizes(1)) - .reshape(Eigen::DSizes(seq_length, 1)) - .broadcast(Eigen::DSizes(1, tag_num)); - - for (int k = 0; k < seq_length; ++k) { - x_grad_mat(k, label_value[k]) -= static_cast(1); + auto prob = alpha_mat * beta_mat; + auto row_sum = prob.sum(Eigen::DSizes(1)) + .reshape(Eigen::DSizes(seq_length, 1)) + .broadcast(Eigen::DSizes(1, tag_num)); + x_grad_mat.device(*place) = prob / row_sum; + + for (size_t k = 0; k < seq_length; ++k) { + x_grad_mat(k, label_value[k]) -= static_cast(1.); } if (transition_grad) { @@ -518,29 +519,35 @@ class LinearChainCrfGradOpKernel } auto x_exps_mat = EigenMatrix::From(*emission_exps); - beta_mat = beta_mat * x_exps_mat; - beta_mat /= beta_mat.sum(Eigen::DSizes(1)) - .reshape(Eigen::DSizes(seq_length, 1)) - .broadcast(Eigen::DSizes(1, tag_num)); - - for (int k = 1; k < seq_length; ++k) { - T sum = static_cast(0.); - for (int i = 0; i < tag_num; ++i) { - for (int j = 0; j < tag_num; ++j) { + + // TODO(caoying): Fix this to avoid using this local variable. + Tensor tmp; + tmp.mutable_data(beta->dims(), platform::CPUPlace()); + auto tmp_mat = EigenMatrix::From(tmp); + auto prob = beta_mat * x_exps_mat; + auto row_sum = prob.sum(Eigen::DSizes(1)) + .reshape(Eigen::DSizes(seq_length, 1)) + .broadcast(Eigen::DSizes(1, tag_num)); + tmp_mat.device(*place) = prob / row_sum; + + for (size_t k = 1; k < seq_length; ++k) { + T sum = 0.; + for (size_t i = 0; i < tag_num; ++i) { + for (size_t j = 0; j < tag_num; ++j) { sum += w_exps[(i + state_trans_base_idx) * tag_num + j] * - alpha_mat(k - 1, i) * beta_mat(k, j); + alpha_mat(k - 1, i) * tmp_mat(k, j); } } - sum = static_cast(1.) / sum; - for (int i = 0; i < tag_num; ++i) { - for (int j = 0; j < tag_num; ++j) { + sum = 1. / sum; + for (size_t i = 0; i < tag_num; ++i) { + for (size_t j = 0; j < tag_num; ++j) { trans_grad[(i + state_trans_base_idx) * tag_num + j] += sum * w_exps[(i + state_trans_base_idx) * tag_num + j] * - alpha_mat(k - 1, i) * beta_mat(k, j); + alpha_mat(k - 1, i) * tmp_mat(k, j); } } - trans_grad[label_value[k - 1] * tag_num + label_value[k]] -= - static_cast(1.); + trans_grad[(label_value[k - 1] + state_trans_base_idx) * tag_num + + label_value[k]] -= static_cast(1.); } } } @@ -554,9 +561,7 @@ REGISTER_OP(linear_chain_crf, ops::LinearChainCrfOp, ops::LinearChainCrfOpMaker, linear_chain_crf_grad, ops::LinearChainCrfGradOp); REGISTER_OP_CPU_KERNEL( linear_chain_crf, - ops::LinearChainCrfOpKernel, - ops::LinearChainCrfOpKernel); + ops::LinearChainCrfOpKernel); REGISTER_OP_CPU_KERNEL( linear_chain_crf_grad, - ops::LinearChainCrfGradOpKernel, - ops::LinearChainCrfGradOpKernel); + ops::LinearChainCrfGradOpKernel); diff --git a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py index 0f169ada95..4d0cac2ad3 100644 --- a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py +++ b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py @@ -83,11 +83,10 @@ class LinearChainCrfForward(object): class TestLinearChainCrfOp(OpTest): def set_test_data(self): - SEQ_NUM = 2 + SEQ_NUM = 3 TAG_NUM = 17 MAX_SEQ_LEN = 5 - random.seed(1) # the linear_chain_crf operator only supports sequence (LoD level = 1) lod = [[0]] for i in range(SEQ_NUM): @@ -109,7 +108,6 @@ class TestLinearChainCrfOp(OpTest): "Transition": transition, "Label": (labels, lod) } - crf = LinearChainCrfForward(lod[0], emission, emission_row_max, emission_exps, transition, transition_exps, labels) @@ -130,11 +128,17 @@ class TestLinearChainCrfOp(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(["Emission", "Transition"], "LogLikelihood") + self.check_grad( + ["Emission", "Transition"], + "LogLikelihood", + max_relative_error=0.05) def test_check_grad_ignore_transition(self): self.check_grad( - ["Emission"], "LogLikelihood", no_grad_set=set("Transition")) + ["Emission"], + "LogLikelihood", + max_relative_error=0.05, + no_grad_set=set("Transition")) if __name__ == "__main__": From cd382866848ecbdc2b95e363c8fe73e1aa82e882 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 26 Oct 2017 11:37:29 +0800 Subject: [PATCH 09/35] Add gradient check unit testing and fix bug. --- paddle/operators/lstm_op.cc | 57 +++++++------ paddle/operators/lstm_op.h | 41 +++++++--- paddle/operators/math/math_function.cc | 20 +++++ paddle/operators/math/math_function.cu | 27 ++++++ paddle/operators/math/math_function.h | 5 ++ paddle/operators/math/sequence2batch.h | 9 +- .../paddle/v2/framework/tests/test_lstm_op.py | 82 +++++++++++-------- 7 files changed, 163 insertions(+), 78 deletions(-) diff --git a/paddle/operators/lstm_op.cc b/paddle/operators/lstm_op.cc index 9cc89c7d99..73ab9b18dc 100644 --- a/paddle/operators/lstm_op.cc +++ b/paddle/operators/lstm_op.cc @@ -28,6 +28,10 @@ class LSTMOp : public framework::OperatorWithKernel { "Output(Hidden) of LSTM should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Cell"), "Output(Cell) of LSTM should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("BatchGate"), + "Output(BatchGate) of LSTM should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("BatchCellPreAct"), + "Output(BatchGate) of LSTM should not be null."); auto in_dims = ctx->GetInputDim("Input"); PADDLE_ENFORCE_EQ(in_dims.size(), 2, "Input(X)'s rank must be 2."); @@ -92,11 +96,13 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("H0", "(Tensor, optional) the initial hidden state is an optional " "input. This is a tensor with shape (N x D), where N is the " - "batch size, D is the hidden size."); + "batch size, D is the hidden size.") + .AsDispensable(); AddInput("C0", "(Tensor, optional) the initial cell state is an optional " "input. This is a tensor with shape (N x D), where N is the " - "batch size. `H0` and `C0` can be NULL but only at the same time"); + "batch size. `H0` and `C0` can be NULL but only at the same time") + .AsDispensable(); AddInput("Weight", "(Tensor) the learnable hidden-hidden weights." " - The shape is (D x 4D), where D is the hidden size. " @@ -110,7 +116,8 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { " - Bias = {b_c, b_i, b_f, b_o}." "2. `usePeepholes = True` " " - The shape is (1 x 7D). " - " - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}."); + " - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}.") + .AsDispensable(); AddOutput("Hidden", "(LoDTensor) the hidden state lod tensor of LSTM operator. " "The shape and lod is the same with the `Input`."); @@ -208,27 +215,29 @@ class LSTMGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Hidden")), - "Input(Hidden@GRAD) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Cell")), - "Input(Cell@GRAD) should not be null"); - - ctx->SetOutputDim(framework::GradVarName("Input"), - ctx->GetInputDim("Input")); - if (ctx->HasInput("Weight")) { - ctx->SetOutputDim(framework::GradVarName("Weight"), - ctx->GetInputDim("Weight")); - } - if (ctx->HasInput("Bias")) { - ctx->SetOutputDim(framework::GradVarName("Bias"), - ctx->GetInputDim("Bias")); - } - if (ctx->HasInput("H0")) { - ctx->SetOutputDim(framework::GradVarName("H0"), ctx->GetInputDim("H0")); - } - if (ctx->HasInput("C0")) { - ctx->SetOutputDim(framework::GradVarName("C0"), ctx->GetInputDim("C0")); - } + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input(Input) of LSTM should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Hidden"), + "Input(Hidden) of LSTM should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Cell"), + "Input(Cell) of LSTM should not be null."); + + PADDLE_ENFORCE(ctx->HasInput("BatchGate"), + "Input(BatchGate) of LSTM should not be null."); + PADDLE_ENFORCE(ctx->HasInput("BatchCellPreAct"), + "Input(BatchGate) of LSTM should not be null."); + + auto in_g_name = framework::GradVarName("Input"); + if (ctx->HasOutput(in_g_name)) + ctx->SetOutputDim(in_g_name, ctx->GetInputDim("Input")); + + auto w_g_name = framework::GradVarName("Weight"); + if (ctx->HasOutput(w_g_name)) + ctx->SetOutputDim(w_g_name, ctx->GetInputDim("Weight")); + + auto b_g_name = framework::GradVarName("Bias"); + if (ctx->HasOutput(b_g_name)) + ctx->SetOutputDim(b_g_name, ctx->GetInputDim("Bias")); } }; diff --git a/paddle/operators/lstm_op.h b/paddle/operators/lstm_op.h index 8945a22d7f..fbdb28bf60 100644 --- a/paddle/operators/lstm_op.h +++ b/paddle/operators/lstm_op.h @@ -74,6 +74,7 @@ class LSTMKernel : public framework::OpKernel { if (bias) { T* bias_data = const_cast(bias->data()); // the code style in LstmMetaValue will be updated later. + lstm_value.checkIg = bias_data + 4 * frame_size; lstm_value.checkFg = lstm_value.checkIg + frame_size; lstm_value.checkOg = lstm_value.checkFg + frame_size; @@ -86,10 +87,10 @@ class LSTMKernel : public framework::OpKernel { // Use the local variable as here. LoDTensor batch_hidden, batch_cell; - auto batch_cell_pre_act = *(ctx.Output("BatchCellPreAct")); + auto* batch_cell_pre_act = ctx.Output("BatchCellPreAct"); batch_hidden.mutable_data(dims, ctx.GetPlace()); batch_cell.mutable_data(dims, ctx.GetPlace()); - batch_cell_pre_act.mutable_data(dims, ctx.GetPlace()); + batch_cell_pre_act->mutable_data(dims, ctx.GetPlace()); auto batch_starts = batch_gate->lod()[0]; size_t num_batch = batch_starts.size() - 1; @@ -104,7 +105,7 @@ class LSTMKernel : public framework::OpKernel { Tensor gate_t = batch_gate->Slice(bstart, bend); Tensor out_t = batch_hidden.Slice(bstart, bend); Tensor cell_t = batch_cell.Slice(bstart, bend); - Tensor cell_pre_act_t = batch_cell_pre_act.Slice(bstart, bend); + Tensor cell_pre_act_t = batch_cell_pre_act->Slice(bstart, bend); int cur_batch_size = bend - bstart; @@ -162,6 +163,7 @@ class LSTMGradKernel : public framework::OpKernel { auto& device_ctx = ctx.device_context(); if (weight_g) { + weight_g->mutable_data(ctx.GetPlace()); math::SetConstant zero; zero(device_ctx, weight_g, static_cast(0.0)); } @@ -228,7 +230,7 @@ class LSTMGradKernel : public framework::OpKernel { auto batch_starts = batch_gate->lod()[0]; size_t num_batch = batch_starts.size() - 1; - for (int n = static_cast(num_batch); n >= 0; n--) { + for (int n = static_cast(num_batch) - 1; n >= 0; n--) { int bstart = static_cast(batch_starts[n]); int bend = static_cast(batch_starts[n + 1]); @@ -282,19 +284,32 @@ class LSTMGradKernel : public framework::OpKernel { math::Batch2LoDTensorFunctor to_seq; if (in_g) { /* backward data */ + in_g->mutable_data(ctx.GetPlace()); to_seq(device_ctx, batch_gate_g, *in_g); } if (bias && bias_g) { /* backward bias */ - bias_g->mutable_data(ctx.GetPlace()); - auto bias_g_e = EigenMatrix::From(*bias_g); - auto gate_g_e = EigenMatrix::From(batch_gate_g); - Eigen::array extents({{1, 4 * frame_size}}); - Eigen::array offsets({{0, 0}}); - auto bg = bias_g_e.slice(offsets, extents) - .reshape(Eigen::array({{1, frame_size * 4}})); - bg.device(ctx.GetEigenDevice()) = - gate_g_e.sum(Eigen::array({{0}})); + // Following Eigen computation failed for double type on GPU device. + // bias_g->mutable_data(ctx.GetPlace()); + // Tensor bias_mat; + // bias_mat.ShareDataWith(*bias_g); + // bias_mat.Resize({1, 4 * frame_size}); + + // auto bias_g_e = EigenVector::Flatten(bias_mat); + // auto gate_g_e = EigenMatrix::From(batch_gate_g); + // Eigen::array dims{{0}}; + // bias_g_e.device(ctx.GetEigenDevice()) = gate_g_e.sum(dims); + + int m = static_cast(batch_gate_g.dims()[0]); + int n = static_cast(batch_gate_g.dims()[1]); + + Tensor ones; + ones.mutable_data({1, m}, ctx.GetPlace()); + math::SetConstant set; + set(device_ctx, &ones, static_cast(1.0)); + + math::gemv(device_ctx, true, m, n, 1., batch_gate_g.data(), + ones.data(), 0., bias_g->data()); } } }; diff --git a/paddle/operators/math/math_function.cc b/paddle/operators/math/math_function.cc index aad1357598..2a9c09a0f1 100644 --- a/paddle/operators/math/math_function.cc +++ b/paddle/operators/math/math_function.cc @@ -211,6 +211,26 @@ void batched_gemm( } #endif +template <> +void gemv(const platform::DeviceContext& context, + const bool trans_a, const int M, + const int N, const float alpha, + const float* A, const float* B, + const float beta, float* C) { + CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans; + cblas_sgemv(CblasRowMajor, transA, M, N, alpha, A, N, B, 1, beta, C, 1); +} + +template <> +void gemv(const platform::DeviceContext& context, + const bool trans_a, const int M, + const int N, const double alpha, + const double* A, const double* B, + const double beta, double* C) { + CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans; + cblas_dgemv(CblasRowMajor, transA, M, N, alpha, A, N, B, 1, beta, C, 1); +} + template struct SetConstant; } // namespace math diff --git a/paddle/operators/math/math_function.cu b/paddle/operators/math/math_function.cu index 5583683c6e..e6fd8bf235 100644 --- a/paddle/operators/math/math_function.cu +++ b/paddle/operators/math/math_function.cu @@ -203,6 +203,33 @@ void batched_gemm( &beta, C, ldc, strideC, batchCount)); } +template <> +void gemv(const platform::DeviceContext& context, + const bool trans_a, const int M, + const int N, const float alpha, + const float* A, const float* B, + const float beta, float* C) { + cublasOperation_t cuTransA = (trans_a == false) ? CUBLAS_OP_T : CUBLAS_OP_N; + + PADDLE_ENFORCE(platform::dynload::cublasSgemv( + reinterpret_cast(context) + .cublas_handle(), + cuTransA, N, M, &alpha, A, N, B, 1, &beta, C, 1)); +} + +template <> +void gemv(const platform::DeviceContext& context, + const bool trans_a, const int M, + const int N, const double alpha, + const double* A, const double* B, + const double beta, double* C) { + cublasOperation_t cuTransA = (trans_a == false) ? CUBLAS_OP_T : CUBLAS_OP_N; + PADDLE_ENFORCE(platform::dynload::cublasDgemv( + reinterpret_cast(context) + .cublas_handle(), + cuTransA, N, M, &alpha, A, N, B, 1, &beta, C, 1)); +} + template struct SetConstant; } // namespace math diff --git a/paddle/operators/math/math_function.h b/paddle/operators/math/math_function.h index 9777ebfd15..3bb5aa0332 100644 --- a/paddle/operators/math/math_function.h +++ b/paddle/operators/math/math_function.h @@ -93,6 +93,11 @@ void batched_gemm(const platform::DeviceContext& context, const T* A, const T* B, const T beta, T* C, const int batchCount, const int strideA, const int strideB); +template +void gemv(const platform::DeviceContext& context, const bool trans_a, + const int M, const int N, const T alpha, const T* A, const T* B, + const T beta, T* C); + template struct SetConstant { void operator()(const platform::DeviceContext& context, diff --git a/paddle/operators/math/sequence2batch.h b/paddle/operators/math/sequence2batch.h index 47a0f18496..b833a326c8 100644 --- a/paddle/operators/math/sequence2batch.h +++ b/paddle/operators/math/sequence2batch.h @@ -58,7 +58,7 @@ class LoDTensor2BatchFunctor { if (!is_cal_batch_lod) { auto lods = batch.lod(); PADDLE_ENFORCE_EQ(lods.size(), 2UL); - PADDLE_ENFORCE_EQ(lods[1].size(), lod_tensor.dims()[1]); + PADDLE_ENFORCE_EQ(lods[1].size(), lod_tensor.dims()[0]); CopyMatrixRowsFunctor to_batch; to_batch(context, lod_tensor, lods[1].data(), batch, true); return; @@ -142,11 +142,8 @@ class Batch2LoDTensorFunctor { auto in_lod = batch.lod(); PADDLE_ENFORCE_EQ(in_lod.size(), 2UL, "The LoD size of input `batch` should be 2."); - auto out_lod = lod_tensor.lod()[0]; - auto num = out_lod[out_lod.size() - 1]; - PADDLE_ENFORCE_EQ(num, lod_tensor.dims()[0]); - PADDLE_ENFORCE_EQ(num, in_lod[1].size()); - PADDLE_ENFORCE_EQ(num, batch.dims()[0]); + PADDLE_ENFORCE_EQ(in_lod[1].size(), + static_cast(lod_tensor.dims()[0])); CopyMatrixRowsFunctor to_seq; size_t* index = in_lod[1].data(); to_seq(context, batch, index, lod_tensor, false); diff --git a/python/paddle/v2/framework/tests/test_lstm_op.py b/python/paddle/v2/framework/tests/test_lstm_op.py index 93a4e450e9..2cc0c5d7d9 100644 --- a/python/paddle/v2/framework/tests/test_lstm_op.py +++ b/python/paddle/v2/framework/tests/test_lstm_op.py @@ -100,9 +100,9 @@ def lstm( cell.append(c_pre.flatten()) gate.append(g_pre.flatten()) - hidden = np.array(hidden).astype("float64") - cell = np.array(cell).astype("float64") - gate = np.array(gate).astype("float64") + hidden = np.array(hidden).astype('float64') + cell = np.array(cell).astype('float64') + gate = np.array(gate).astype('float64') hidden = _reverse(hidden, offset) if is_reverse else hidden cell = _reverse(cell, offset) if is_reverse else cell @@ -115,28 +115,35 @@ def lstm( class TestLstmOp(OpTest): def set_data(self): - self.lod = [[0, 2, 6, 9]] - self.D = 64 - self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] + # self.lod = [[0, 2, 6, 9]] + # self.D = 64 + # self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] - self.act_gate = "sigmoid" - self.act_cell = "tanh" - self.act_cand = "tanh" + self.lod = [[0, 1]] + self.D = 4 + self.sort_idx = [0] + + # self.act_gate = 'identity' + # self.act_cell = 'identity' + # self.act_cand = 'identity' + self.act_gate = 'sigmoid' + self.act_cell = 'tanh' + self.act_cand = 'tanh' self.is_reverse = False def setUp(self): self.set_data() - self.op_type = "lstm" + self.op_type = 'lstm' T = self.lod[0][-1] N = len(self.lod[0]) - 1 - x = np.random.normal(size=(T, 4 * self.D)).astype("float64") - h0 = np.zeros((N, self.D)).astype("float64") - c0 = np.zeros((N, self.D)).astype("float64") - w = np.random.normal(size=(self.D, 4 * self.D)).astype("float64") - b = np.random.normal(size=(1, 7 * self.D)).astype("float64") + x = np.random.normal(size=(T, 4 * self.D)).astype('float64') + h0 = np.zeros((N, self.D)).astype('float64') + c0 = np.zeros((N, self.D)).astype('float64') + w = np.random.normal(size=(self.D, 4 * self.D)).astype('float64') + b = np.random.normal(size=(1, 7 * self.D)).astype('float64') w_b = b[:, 0:4 * self.D] w_c = b[:, 4 * self.D:] @@ -158,32 +165,37 @@ class TestLstmOp(OpTest): self.outputs = { 'Hidden': (h, self.lod), 'Cell': (c, self.lod), - 'BatchGate': g_sort + #'BatchGate': g_sort, } self.attrs = { 'usePeepholes': True, 'isReverse': self.is_reverse, - 'gateActivation': 'sigmoid', - 'cellActivation': 'tanh', - 'candidateActivation': 'tanh' + 'gateActivation': self.act_gate, + 'cellActivation': self.act_cell, + 'candidateActivation': self.act_cand } - def test_check_output(self): + def not_test_check_output(self): self.check_output() - -class TestLstmOpRerverse(TestLstmOp): - def set_data(self): - self.lod = [[0, 2, 6, 9]] - self.D = 64 - self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] - - self.act_gate = "sigmoid" - self.act_cell = "tanh" - self.act_cand = "tanh" - - self.is_reverse = True - - -if __name__ == "__main__": + def test_check_grad(self): + self.outputs['BatchGate'] = None + self.outputs['BatchCellPreAct'] = None + self.check_grad(['Input', 'Weight'], ['Hidden', 'Cell']) + #['Input', 'Weight', 'Bias'], ['Hidden', 'Cell']) + + #class TestLstmOpRerverse(TestLstmOp): + # def set_data(self): + # self.lod = [[0, 2, 6, 9]] + # self.D = 64 + # self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] + # + # self.act_gate = 'sigmoid' + # self.act_cell = 'tanh' + # self.act_cand = 'tanh' + # + # self.is_reverse = True + + +if __name__ == '__main__': unittest.main() From ac3370a4671a9d68111c068cb602f9ca2fac8b1f Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 26 Oct 2017 18:00:40 +0800 Subject: [PATCH 10/35] Add unit testing for gemv and fix the gradien check for bais. --- paddle/framework/lod_tensor_test.cu | 8 +- paddle/operators/lstm_op.h | 7 +- paddle/operators/math/math_function_test.cc | 50 ++++++++++++ paddle/operators/math/math_function_test.cu | 62 ++++++++++++++ .../paddle/v2/framework/tests/test_lstm_op.py | 80 ++++++++++--------- 5 files changed, 165 insertions(+), 42 deletions(-) diff --git a/paddle/framework/lod_tensor_test.cu b/paddle/framework/lod_tensor_test.cu index c79c4d0c72..5b90fbfca7 100644 --- a/paddle/framework/lod_tensor_test.cu +++ b/paddle/framework/lod_tensor_test.cu @@ -36,8 +36,8 @@ TEST(LoDTensor, LoDInGPU) { lod_tensor.mutable_data(place); lod_tensor.set_lod(src_lod); - CHECK_EQ(lod_tensor.lod_element(0, 2).first, 4UL); - CHECK_EQ(lod_tensor.lod_element(0, 4).first, 8UL); + EXPECT_EQ(lod_tensor.lod_element(0, 2).first, 4UL); + EXPECT_EQ(lod_tensor.lod_element(0, 4).first, 8UL); auto lod = lod_tensor.lod(); @@ -45,6 +45,6 @@ TEST(LoDTensor, LoDInGPU) { cudaDeviceSynchronize(); for (size_t i = 0; i < src_lod[0].size(); ++i) { - CHECK_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2); + EXPECT_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2); } -} \ No newline at end of file +} diff --git a/paddle/operators/lstm_op.h b/paddle/operators/lstm_op.h index fbdb28bf60..f910e3bc34 100644 --- a/paddle/operators/lstm_op.h +++ b/paddle/operators/lstm_op.h @@ -162,9 +162,9 @@ class LSTMGradKernel : public framework::OpKernel { auto* bias_g = ctx.Output(framework::GradVarName("Bias")); auto& device_ctx = ctx.device_context(); + math::SetConstant zero; if (weight_g) { weight_g->mutable_data(ctx.GetPlace()); - math::SetConstant zero; zero(device_ctx, weight_g, static_cast(0.0)); } @@ -188,6 +188,7 @@ class LSTMGradKernel : public framework::OpKernel { math::LstmMetaGrad lstm_grad; if (bias && bias_g) { T* bias_g_data = const_cast(bias_g->mutable_data(ctx.GetPlace())); + zero(device_ctx, bias_g, static_cast(0.0)); lstm_grad.checkIgGrad = bias_g_data + 4 * frame_size; lstm_grad.checkFgGrad = lstm_grad.checkIgGrad + frame_size; lstm_grad.checkOgGrad = lstm_grad.checkFgGrad + frame_size; @@ -219,6 +220,8 @@ class LSTMGradKernel : public framework::OpKernel { batch_cell_g.mutable_data(out_dims, ctx.GetPlace()); batch_cell_g.set_lod(batch_gate->lod()); to_batch(device_ctx, *cell_g, batch_cell_g, false); + // TODO(qingqing) support the case output cell has gradient. + zero(device_ctx, &batch_cell_g, static_cast(0.0)); LoDTensor batch_gate_g; batch_gate_g.mutable_data(batch_gate->dims(), ctx.GetPlace()); @@ -304,7 +307,7 @@ class LSTMGradKernel : public framework::OpKernel { int n = static_cast(batch_gate_g.dims()[1]); Tensor ones; - ones.mutable_data({1, m}, ctx.GetPlace()); + ones.mutable_data({m}, ctx.GetPlace()); math::SetConstant set; set(device_ctx, &ones, static_cast(1.0)); diff --git a/paddle/operators/math/math_function_test.cc b/paddle/operators/math/math_function_test.cc index 3b9f92e7ae..7d84ad9aad 100644 --- a/paddle/operators/math/math_function_test.cc +++ b/paddle/operators/math/math_function_test.cc @@ -89,3 +89,53 @@ TEST(math_function, zero) { EXPECT_EQ(t[2], 1); EXPECT_EQ(t[3], 1); } + +template +void GemvTest(int m, int n, bool trans) { + paddle::framework::Tensor mat_a; + paddle::framework::Tensor vec_b; + paddle::framework::Tensor vec_c; + auto* cpu_place = new paddle::platform::CPUPlace(); + int b_num = trans ? m : n; + int c_num = trans ? n : m; + + T* data_a = mat_a.mutable_data({m, n}, *cpu_place); + T* data_b = vec_b.mutable_data({b_num}, *cpu_place); + T* data_c = vec_c.mutable_data({c_num}, *cpu_place); + for (int i = 0; i < mat_a.numel(); ++i) { + data_a[i] = static_cast(i); + } + for (int i = 0; i < vec_b.numel(); ++i) { + data_b[i] = static_cast(i); + } + + paddle::platform::CPUDeviceContext context(*cpu_place); + paddle::operators::math::gemv( + context, trans, static_cast(m), static_cast(n), 1., data_a, + data_b, 0., data_c); + + if (!trans) { + for (int i = 0; i < m; ++i) { + T sum = 0.0; + for (int j = 0; j < n; ++j) { + sum += data_a[i * n + j] * data_b[j]; + } + ASSERT_FLOAT_EQ(data_c[i], sum); + } + } else { + for (int i = 0; i < n; ++i) { + T sum = 0.0; + for (int j = 0; j < m; ++j) { + sum += data_a[j * n + i] * data_b[j]; + } + ASSERT_FLOAT_EQ(data_c[i], sum); + } + } +} + +TEST(math_function, gemv) { + GemvTest(3, 13, false); + GemvTest(4, 5, false); + GemvTest(12, 7, true); + GemvTest(7, 9, true); +} diff --git a/paddle/operators/math/math_function_test.cu b/paddle/operators/math/math_function_test.cu index 8b22c71552..780d17ffc6 100644 --- a/paddle/operators/math/math_function_test.cu +++ b/paddle/operators/math/math_function_test.cu @@ -177,3 +177,65 @@ TEST(math_function, gemm_trans_cublas) { EXPECT_EQ(input3_ptr[7], 99); delete gpu_place; } + +template +void GemvTest(int m, int n, bool trans) { + paddle::framework::Tensor mat_a; + paddle::framework::Tensor vec_b; + paddle::framework::Tensor vec_c; + auto* cpu_place = new paddle::platform::CPUPlace(); + + T* data_a = mat_a.mutable_data({m, n}, *cpu_place); + T* data_b = vec_b.mutable_data({trans ? m : n}, *cpu_place); + T* data_c = vec_c.mutable_data({trans ? n : m}, *cpu_place); + + auto* gpu_place = new paddle::platform::GPUPlace(0); + paddle::framework::Tensor g_mat_a; + paddle::framework::Tensor g_vec_b; + paddle::framework::Tensor g_vec_c; + T* g_data_a = g_mat_a.mutable_data(mat_a.dims(), *gpu_place); + T* g_data_b = g_vec_b.mutable_data(vec_b.dims(), *gpu_place); + T* g_data_c = g_vec_c.mutable_data(vec_c.dims(), *gpu_place); + + for (int i = 0; i < mat_a.numel(); ++i) { + data_a[i] = static_cast(i); + } + for (int i = 0; i < vec_b.numel(); ++i) { + data_b[i] = static_cast(i); + } + + paddle::platform::CUDADeviceContext context(*gpu_place); + g_mat_a.CopyFrom(mat_a, *gpu_place, context); + g_vec_b.CopyFrom(vec_b, *gpu_place, context); + + paddle::operators::math::gemv( + context, trans, static_cast(m), static_cast(n), 1., g_data_a, + g_data_b, 0., g_data_c); + + vec_c.CopyFrom(g_vec_c, paddle::platform::CPUPlace(), context); + + if (!trans) { + for (int i = 0; i < m; ++i) { + T sum = 0.0; + for (int j = 0; j < n; ++j) { + sum += data_a[i * n + j] * data_b[j]; + } + ASSERT_FLOAT_EQ(data_c[i], sum); + } + } else { + for (int i = 0; i < n; ++i) { + T sum = 0.0; + for (int j = 0; j < m; ++j) { + sum += data_a[j * n + i] * data_b[j]; + } + ASSERT_FLOAT_EQ(data_c[i], sum); + } + } +} + +TEST(math_function, gemv) { + GemvTest(3, 13, false); + GemvTest(3, 13, false); + GemvTest(3, 13, true); + GemvTest(3, 13, true); +} diff --git a/python/paddle/v2/framework/tests/test_lstm_op.py b/python/paddle/v2/framework/tests/test_lstm_op.py index 2cc0c5d7d9..e10972bb3a 100644 --- a/python/paddle/v2/framework/tests/test_lstm_op.py +++ b/python/paddle/v2/framework/tests/test_lstm_op.py @@ -114,26 +114,20 @@ def lstm( class TestLstmOp(OpTest): - def set_data(self): - # self.lod = [[0, 2, 6, 9]] - # self.D = 64 - # self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] - - self.lod = [[0, 1]] - self.D = 4 - self.sort_idx = [0] - - # self.act_gate = 'identity' - # self.act_cell = 'identity' - # self.act_cand = 'identity' + def set_argument(self): + self.lod = [[0, 2, 6, 9]] + self.D = 16 + self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] + self.act_gate = 'sigmoid' self.act_cell = 'tanh' self.act_cand = 'tanh' + self.has_initial_state = True self.is_reverse = False def setUp(self): - self.set_data() + self.set_argument() self.op_type = 'lstm' T = self.lod[0][-1] @@ -155,17 +149,14 @@ class TestLstmOp(OpTest): for i, j in enumerate(self.sort_idx): g_sort[i, :] = g[j, :] - self.inputs = { - 'Input': (x, self.lod), - 'H0': h0, - 'C0': c0, - 'Weight': w, - 'Bias': b - } + self.inputs = {'Input': (x, self.lod), 'Weight': w, 'Bias': b} + self.inputs['H0'] = h0 + self.inputs['C0'] = c0 + self.outputs = { 'Hidden': (h, self.lod), 'Cell': (c, self.lod), - #'BatchGate': g_sort, + 'BatchGate': g_sort, } self.attrs = { 'usePeepholes': True, @@ -175,26 +166,43 @@ class TestLstmOp(OpTest): 'candidateActivation': self.act_cand } - def not_test_check_output(self): + def test_check_output(self): self.check_output() + #TODO(qingqing) add more unit testing case def test_check_grad(self): + # TODO(qingqing) remove folowing two lines after the check_grad is refined. self.outputs['BatchGate'] = None self.outputs['BatchCellPreAct'] = None - self.check_grad(['Input', 'Weight'], ['Hidden', 'Cell']) - #['Input', 'Weight', 'Bias'], ['Hidden', 'Cell']) - - #class TestLstmOpRerverse(TestLstmOp): - # def set_data(self): - # self.lod = [[0, 2, 6, 9]] - # self.D = 64 - # self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] - # - # self.act_gate = 'sigmoid' - # self.act_cell = 'tanh' - # self.act_cand = 'tanh' - # - # self.is_reverse = True + self.check_grad(['Input', 'Weight', 'Bias'], ['Hidden']) + + +class TestLstmOpHasNoInitial(TestLstmOp): + def set_argument(self): + self.lod = [[0, 2, 6, 9]] + self.D = 64 + self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] + + self.act_gate = 'sigmoid' + self.act_cell = 'tanh' + self.act_cand = 'tanh' + + self.has_initial_state = False + self.is_reverse = True + + +class TestLstmOpRerverse(TestLstmOp): + def set_argument(self): + self.lod = [[0, 2, 6, 9]] + self.D = 64 + self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] + + self.act_gate = 'sigmoid' + self.act_cell = 'tanh' + self.act_cand = 'tanh' + + self.has_initial_state = True + self.is_reverse = True if __name__ == '__main__': From bd680f157fb41177b1f2c3325879d5850505357b Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 26 Oct 2017 19:13:24 +0800 Subject: [PATCH 11/35] fix compiling warning. --- paddle/operators/lstm_op.h | 4 +- paddle/operators/math/sequence2batch.h | 7 +-- .../paddle/v2/framework/tests/test_lstm_op.py | 46 +++++++------------ 3 files changed, 23 insertions(+), 34 deletions(-) diff --git a/paddle/operators/lstm_op.h b/paddle/operators/lstm_op.h index f910e3bc34..d147b84aef 100644 --- a/paddle/operators/lstm_op.h +++ b/paddle/operators/lstm_op.h @@ -155,7 +155,7 @@ class LSTMGradKernel : public framework::OpKernel { auto* batch_cell_pre_act = ctx.Input("BatchCellPreAct"); auto* hidden_g = ctx.Input(framework::GradVarName("Hidden")); - auto* cell_g = ctx.Input(framework::GradVarName("Cell")); + // auto* cell_g = ctx.Input(framework::GradVarName("Cell")); auto* in_g = ctx.Output(framework::GradVarName("Input")); auto* weight_g = ctx.Output(framework::GradVarName("Weight")); @@ -219,8 +219,8 @@ class LSTMGradKernel : public framework::OpKernel { LoDTensor batch_cell_g; batch_cell_g.mutable_data(out_dims, ctx.GetPlace()); batch_cell_g.set_lod(batch_gate->lod()); - to_batch(device_ctx, *cell_g, batch_cell_g, false); // TODO(qingqing) support the case output cell has gradient. + // to_batch(device_ctx, *cell_g, batch_cell_g, false); zero(device_ctx, &batch_cell_g, static_cast(0.0)); LoDTensor batch_gate_g; diff --git a/paddle/operators/math/sequence2batch.h b/paddle/operators/math/sequence2batch.h index b833a326c8..b1ba35a6d4 100644 --- a/paddle/operators/math/sequence2batch.h +++ b/paddle/operators/math/sequence2batch.h @@ -58,7 +58,8 @@ class LoDTensor2BatchFunctor { if (!is_cal_batch_lod) { auto lods = batch.lod(); PADDLE_ENFORCE_EQ(lods.size(), 2UL); - PADDLE_ENFORCE_EQ(lods[1].size(), lod_tensor.dims()[0]); + PADDLE_ENFORCE_EQ(lods[1].size(), + static_cast(lod_tensor.dims()[0])); CopyMatrixRowsFunctor to_batch; to_batch(context, lod_tensor, lods[1].data(), batch, true); return; @@ -111,10 +112,10 @@ class LoDTensor2BatchFunctor { size_t* batch_starts = batch_lods[0].data(); size_t* seq2batch_idx = batch_lods[1].data(); batch_starts[0] = 0; - for (size_t n = 0; n < num_batch; n++) { + for (int n = 0; n < num_batch; n++) { auto batch_id = static_cast(batch_starts[n]); for (size_t i = 0; i < seq_info.size(); ++i) { - size_t seq_len = seq_info[i].length; + int seq_len = seq_info[i].length; int start = seq_info[i].start; if (n < seq_len) { seq2batch_idx[batch_id] = diff --git a/python/paddle/v2/framework/tests/test_lstm_op.py b/python/paddle/v2/framework/tests/test_lstm_op.py index e10972bb3a..7f428cd617 100644 --- a/python/paddle/v2/framework/tests/test_lstm_op.py +++ b/python/paddle/v2/framework/tests/test_lstm_op.py @@ -52,7 +52,7 @@ def lstm( g = np.dot(h_pre, w_h) # 1 x 4D g = g + x g = np.reshape(g, (1, g.size)) - c_tmp, g_i, g_f, g_o = np.split(g, 4, axis=1) + c, g_i, g_f, g_o = np.split(g, 4, axis=1) if w_c is None: g_i = act_gate(g_i) # 1 x D g_f = act_gate(g_f) # 1 x D @@ -60,7 +60,7 @@ def lstm( w_ic, w_fc, w_oc = np.split(w_c, 3, axis=1) g_i = act_gate(g_i + w_ic * c_pre) # 1 x D g_f = act_gate(g_f + w_fc * c_pre) # 1 x D - c = g_f * c_pre + g_i * act_cand(c_tmp) # 1 x D + c = g_f * c_pre + g_i * act_cand(c) # 1 x D if w_c is None: g_o = act_gate(g_o) # 1 x D @@ -68,8 +68,7 @@ def lstm( _, _, w_oc = np.split(w_c, 3, axis=1) g_o = act_gate(g_o + w_oc * c) # 1 x D h = g_o * act_cell(c) - bg = np.concatenate((act_cand(c_tmp), g_i, g_f, g_o), axis=1) - return h, c, bg + return h, c def _reverse(x, lod): y = np.zeros_like(x) @@ -82,7 +81,6 @@ def lstm( batch_size = len(offset) - 1 hidden = [] cell = [] - gate = [] input = _reverse(input, offset) if is_reverse else input if w_b is not None: input = input + np.tile(w_b, (offset[-1], 1)) @@ -94,30 +92,26 @@ def lstm( c_pre = c0[i] # 1 x D for j in range(seq_len): # compute one step - h_pre, c_pre, g_pre = _step(x[j], w_h, w_c, h_pre, c_pre, act_gate, - act_cell, act_cand) + h_pre, c_pre = _step(x[j], w_h, w_c, h_pre, c_pre, act_gate, + act_cell, act_cand) hidden.append(h_pre.flatten()) cell.append(c_pre.flatten()) - gate.append(g_pre.flatten()) hidden = np.array(hidden).astype('float64') cell = np.array(cell).astype('float64') - gate = np.array(gate).astype('float64') hidden = _reverse(hidden, offset) if is_reverse else hidden cell = _reverse(cell, offset) if is_reverse else cell - assert gate.shape == input.shape assert hidden.shape == (input.shape[0], input.shape[1] / 4) assert cell.shape == (input.shape[0], input.shape[1] / 4) - return hidden, cell, gate + return hidden, cell class TestLstmOp(OpTest): def set_argument(self): - self.lod = [[0, 2, 6, 9]] + self.lod = [[0, 2, 6]] self.D = 16 - self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] self.act_gate = 'sigmoid' self.act_cell = 'tanh' @@ -141,22 +135,18 @@ class TestLstmOp(OpTest): w_b = b[:, 0:4 * self.D] w_c = b[:, 4 * self.D:] - h, c, g = lstm(x, self.lod, h0, c0, w, w_b, w_c, self.is_reverse, - ACTVATION[self.act_gate], ACTVATION[self.act_cell], - ACTVATION[self.act_cand]) - - g_sort = np.zeros_like(x) - for i, j in enumerate(self.sort_idx): - g_sort[i, :] = g[j, :] + h, c = lstm(x, self.lod, h0, c0, w, w_b, w_c, self.is_reverse, + ACTVATION[self.act_gate], ACTVATION[self.act_cell], + ACTVATION[self.act_cand]) self.inputs = {'Input': (x, self.lod), 'Weight': w, 'Bias': b} - self.inputs['H0'] = h0 - self.inputs['C0'] = c0 + if self.has_initial_state: + self.inputs['H0'] = h0 + self.inputs['C0'] = c0 self.outputs = { 'Hidden': (h, self.lod), 'Cell': (c, self.lod), - 'BatchGate': g_sort, } self.attrs = { 'usePeepholes': True, @@ -179,9 +169,8 @@ class TestLstmOp(OpTest): class TestLstmOpHasNoInitial(TestLstmOp): def set_argument(self): - self.lod = [[0, 2, 6, 9]] - self.D = 64 - self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] + self.lod = [[0, 2, 6]] + self.D = 16 self.act_gate = 'sigmoid' self.act_cell = 'tanh' @@ -193,9 +182,8 @@ class TestLstmOpHasNoInitial(TestLstmOp): class TestLstmOpRerverse(TestLstmOp): def set_argument(self): - self.lod = [[0, 2, 6, 9]] - self.D = 64 - self.sort_idx = [2, 6, 0, 3, 7, 1, 4, 8, 5] + self.lod = [[0, 2, 6]] + self.D = 16 self.act_gate = 'sigmoid' self.act_cell = 'tanh' From bce4f7d6eba070e4465ad52d65524e57d3745bae Mon Sep 17 00:00:00 2001 From: caoying03 Date: Thu, 26 Oct 2017 17:41:01 +0800 Subject: [PATCH 12/35] follow comments. --- paddle/framework/tensor_impl.h | 5 ++- paddle/operators/linear_chain_crf_op.cc | 57 +++++++++++++------------ paddle/operators/linear_chain_crf_op.h | 4 +- 3 files changed, 34 insertions(+), 32 deletions(-) diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 9090ff9532..4097f92e02 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -228,8 +228,9 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { PADDLE_ENFORCE_GE(begin_idx, 0, "The start row index must be greater than 0."); PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is out of bound."); - PADDLE_ENFORCE_LT(begin_idx, end_idx, - "The start row index must be less than the end row index."); + PADDLE_ENFORCE_LT( + begin_idx, end_idx, + "The start row index must be smaller than the end row index."); if (dims_[0] == 1) { return *this; diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc index d13d4829d9..0f21ee7264 100644 --- a/paddle/operators/linear_chain_crf_op.cc +++ b/paddle/operators/linear_chain_crf_op.cc @@ -26,9 +26,10 @@ T NormalizeL1(T* x, size_t len) { // Right now, we just bet that sum won't be zero. If this really happens, we // will figure out what should be done then. PADDLE_ENFORCE(sum, - "The unnormalized probabilites of all possible unfinished " + "The unnormalized probabilities of all possible unfinished " "sequences must be greater than 0."); - for (size_t i = 0; i < len; ++i) x[i] /= sum; + T s = 1. / sum; + for (size_t i = 0; i < len; ++i) x[i] *= s; return sum; } } // namespace @@ -36,9 +37,9 @@ T NormalizeL1(T* x, size_t len) { using framework::LoDTensor; using framework::LoD; -class LinearChainCrfOpMaker : public framework::OpProtoAndCheckerMaker { +class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { public: - LinearChainCrfOpMaker(framework::OpProto* proto, + LinearChainCRFOpMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput( @@ -51,11 +52,11 @@ class LinearChainCrfOpMaker : public framework::OpProtoAndCheckerMaker { AddInput( "Transition", "(Tensor, default: Tensor). A Tensor with shape [(D + 2) x D]. " - "The learnable parameter for linear_chain_crf operator. " + "The learnable parameter for the linear_chain_crf operator. " "See more details in the operator's comments."); AddInput( "Label", - "(LoDTensor, default: LoDTensor). The ground truth which is a 2-D " + "(LoDTensor, default: LoDTensor). The groundtruth which is a 2-D " "LoDTensor with shape [N x 1], where N is the total element number in " "a mini-batch."); AddOutput( @@ -82,14 +83,11 @@ class LinearChainCrfOpMaker : public framework::OpProtoAndCheckerMaker { .AsIntermediate(); AddOutput( "LogLikelihood", - "(Tensor, default: Tensor). The logarithm of the " - "conditional " + "(Tensor, default: Tensor). The logarithm of the conditional " "likelihood of each training sample in a mini-batch. This is a 2-D " "tensor with shape [S x 1], where S is the sequence number in a " - "mini-batch. " - "Note: S is equal to the sequence number in a mini-batch. The " - "output " - "is no longer a LoDTensor."); + "mini-batch. Note: S is equal to the sequence number in a mini-batch. " + "The output is no longer a LoDTensor."); AddComment(R"DOC( Conditional Random Field defines an undirected probabilistic graph with nodes denoting random variables and edges denoting dependencies between these @@ -100,11 +98,11 @@ variables. CRF learns the conditional probability \f$P(Y|X)\f$, where Linear chain CRF is a special case of CRF that is useful for sequence labeling task. Sequence labeling tasks do not assume a lot of conditional independences among inputs. They only concern about the input and the output -being linear sequences. Thus, the graph model of CRF is a simple chain or -a line, which results in a linear chain CRF. +being linear sequences. Thus, the graph model of such a CRF is a simple chain +or a line, which results in the linear chain CRF. -This operator implements the Forward-Backward algorithm for linear chain CRF. -Please see http://www.cs.columbia.edu/~mcollins/fb.pdf for reference. +This operator implements the Forward-Backward algorithm for the linear chain +CRF. Please see http://www.cs.columbia.edu/~mcollins/fb.pdf for reference. Equation: @@ -144,7 +142,7 @@ nonlinear activation. } }; -class LinearChainCrfOp : public framework::OperatorWithKernel { +class LinearChainCRFOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -211,7 +209,7 @@ class LinearChainCrfOp : public framework::OperatorWithKernel { }; template -class LinearChainCrfOpKernel +class LinearChainCRFOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -262,11 +260,11 @@ class LinearChainCrfOpKernel w_exps.device(place) = w.exp(); auto* alpha = ctx.Output("Alpha"); - alpha->mutable_data(ctx.GetPlace()); + alpha->mutable_data(platform::CPUPlace()); auto* ll = ctx.Output("LogLikelihood"); // resize the output tensor to the correct dimension. ll->Resize({static_cast(seq_num), 1}); - T* log_likelihood = ll->mutable_data(ctx.GetPlace()); + T* log_likelihood = ll->mutable_data(platform::CPUPlace()); for (size_t i = 0; i < seq_num; ++i) { int start_pos = static_cast(in_lod[level][i]); int end_pos = static_cast(in_lod[level][i + 1]); @@ -322,6 +320,7 @@ class LinearChainCrfOpKernel } alpha_value[k * tag_num + i] = x_exps[k * tag_num + i] * sum; } + // NormalizeL1 is to avoid underflow or overflow at (*). ll -= x_row_max[k] + std::log(NormalizeL1(alpha_value + k * tag_num, tag_num)); } @@ -330,6 +329,7 @@ class LinearChainCrfOpKernel sum += alpha_value[(seq_length - 1) * tag_num + i] * w_exps[tag_num + i]; } ll -= std::log(sum); + // Now ll is equal to -log(Z). const int* lbl = label->data(); PADDLE_ENFORCE_LT( @@ -347,7 +347,7 @@ class LinearChainCrfOpKernel } }; -class LinearChainCrfGradOp : public framework::OperatorWithKernel { +class LinearChainCRFGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -407,11 +407,11 @@ class LinearChainCrfGradOp : public framework::OperatorWithKernel { }; template -class LinearChainCrfGradOpKernel +class LinearChainCRFGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + PADDLE_ENFORCE(platform::is_cpu_place(platform::CPUPlace()), "This kernel only runs on CPU."); auto* label = ctx.Input("Label"); auto* emission_exps = ctx.Input("EmissionExps"); @@ -493,6 +493,7 @@ class LinearChainCrfGradOpKernel } beta_value[k * tag_num + i] = sum; } + // NormalizeL1 is to avoid underflow or overflow at (**). NormalizeL1(beta_value + k * tag_num, tag_num); } @@ -534,7 +535,7 @@ class LinearChainCrfGradOpKernel T sum = 0.; for (size_t i = 0; i < tag_num; ++i) { for (size_t j = 0; j < tag_num; ++j) { - sum += w_exps[(i + state_trans_base_idx) * tag_num + j] * + sum += w_exps[(i + state_trans_base_idx) * tag_num + j] * // (**) alpha_mat(k - 1, i) * tmp_mat(k, j); } } @@ -557,11 +558,11 @@ class LinearChainCrfGradOpKernel } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(linear_chain_crf, ops::LinearChainCrfOp, ops::LinearChainCrfOpMaker, - linear_chain_crf_grad, ops::LinearChainCrfGradOp); +REGISTER_OP(linear_chain_crf, ops::LinearChainCRFOp, ops::LinearChainCRFOpMaker, + linear_chain_crf_grad, ops::LinearChainCRFGradOp); REGISTER_OP_CPU_KERNEL( linear_chain_crf, - ops::LinearChainCrfOpKernel); + ops::LinearChainCRFOpKernel); REGISTER_OP_CPU_KERNEL( linear_chain_crf_grad, - ops::LinearChainCrfGradOpKernel); + ops::LinearChainCRFGradOpKernel); diff --git a/paddle/operators/linear_chain_crf_op.h b/paddle/operators/linear_chain_crf_op.h index f65d268bb6..3175252c66 100644 --- a/paddle/operators/linear_chain_crf_op.h +++ b/paddle/operators/linear_chain_crf_op.h @@ -25,7 +25,7 @@ template ; template -class LinearChainCrfOpKernel : public framework::OpKernel { +class LinearChainCRFOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override; @@ -37,7 +37,7 @@ class LinearChainCrfOpKernel : public framework::OpKernel { }; template -class LinearChainCrfGradOpKernel : public framework::OpKernel { +class LinearChainCRFGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override; From 3afb9dc88a8d022e3a96ae9a45db84918c521957 Mon Sep 17 00:00:00 2001 From: caoying03 Date: Fri, 27 Oct 2017 11:38:07 +0800 Subject: [PATCH 13/35] use double in unittest. --- paddle/operators/linear_chain_crf_op.cc | 10 +++++----- .../framework/tests/test_linear_chain_crf_op.py | 16 +++++----------- 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc index 0f21ee7264..9caa2dc742 100644 --- a/paddle/operators/linear_chain_crf_op.cc +++ b/paddle/operators/linear_chain_crf_op.cc @@ -195,8 +195,6 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { // is the sequence number in a mini-batch. The dimension set here should be // resized to its correct size in the function Compute. ctx->SetOutputDim("LogLikelihood", {emission_dims[0], 1}); - - ctx->ShareLoD("Emission", /*->*/ "EmissionExps"); } protected: @@ -402,7 +400,7 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel { // operator is determined by its input "EmissionExps". framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { - return framework::ToDataType(ctx.Input("EmissionExps")->type()); + return framework::ToDataType(ctx.Input("LogLikelihood")->type()); } }; @@ -562,7 +560,9 @@ REGISTER_OP(linear_chain_crf, ops::LinearChainCRFOp, ops::LinearChainCRFOpMaker, linear_chain_crf_grad, ops::LinearChainCRFGradOp); REGISTER_OP_CPU_KERNEL( linear_chain_crf, - ops::LinearChainCRFOpKernel); + ops::LinearChainCRFOpKernel, + ops::LinearChainCRFOpKernel); REGISTER_OP_CPU_KERNEL( linear_chain_crf_grad, - ops::LinearChainCRFGradOpKernel); + ops::LinearChainCRFGradOpKernel, + ops::LinearChainCRFGradOpKernel); diff --git a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py index 4d0cac2ad3..1cc6dc1aaa 100644 --- a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py +++ b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py @@ -32,7 +32,7 @@ class LinearChainCrfForward(object): # alpha is a memo table in dynamic programming to caculate # nomalization factor. self.alpha = np.zeros( - (seq_start_positions[-1], self.tag_num), dtype="float32") + (seq_start_positions[-1], self.tag_num), dtype="float64") self.log_likelihood = np.zeros((self.seq_num, 1)) def _l1_norm(self, x): @@ -92,12 +92,12 @@ class TestLinearChainCrfOp(OpTest): for i in range(SEQ_NUM): lod[-1].append(lod[-1][-1] + random.randint(1, MAX_SEQ_LEN)) emission = np.random.uniform(-1, 1, - [lod[-1][-1], TAG_NUM]).astype("float32") + [lod[-1][-1], TAG_NUM]).astype("float64") emission_row_max = np.amax(emission, axis=1, keepdims=True) emission_exps = np.exp(emission - emission_row_max) transition = np.random.uniform(-0.5, 0.5, - [TAG_NUM + 2, TAG_NUM]).astype("float32") + [TAG_NUM + 2, TAG_NUM]).astype("float64") transition_exps = np.exp(transition) labels = np.random.randint( @@ -128,17 +128,11 @@ class TestLinearChainCrfOp(OpTest): self.check_output() def test_check_grad(self): - self.check_grad( - ["Emission", "Transition"], - "LogLikelihood", - max_relative_error=0.05) + self.check_grad(["Emission", "Transition"], "LogLikelihood") def test_check_grad_ignore_transition(self): self.check_grad( - ["Emission"], - "LogLikelihood", - max_relative_error=0.05, - no_grad_set=set("Transition")) + ["Emission"], "LogLikelihood", no_grad_set=set("Transition")) if __name__ == "__main__": From cca383cfba49fcf9b9a137922c4112623a80bc28 Mon Sep 17 00:00:00 2001 From: caoying03 Date: Fri, 27 Oct 2017 13:35:39 +0800 Subject: [PATCH 14/35] follow comments. --- paddle/operators/linear_chain_crf_op.cc | 324 +----------------------- paddle/operators/linear_chain_crf_op.h | 297 +++++++++++++++++++++- 2 files changed, 295 insertions(+), 326 deletions(-) diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc index 9caa2dc742..65bbfff0f8 100644 --- a/paddle/operators/linear_chain_crf_op.cc +++ b/paddle/operators/linear_chain_crf_op.cc @@ -17,26 +17,6 @@ limitations under the License. */ namespace paddle { namespace operators { -namespace { -template -T NormalizeL1(T* x, size_t len) { - T sum = 0.; - for (size_t i = 0; i < len; ++i) sum += x[i]; - // (This comment is from the old LinearChainCRFLayer.) - // Right now, we just bet that sum won't be zero. If this really happens, we - // will figure out what should be done then. - PADDLE_ENFORCE(sum, - "The unnormalized probabilities of all possible unfinished " - "sequences must be greater than 0."); - T s = 1. / sum; - for (size_t i = 0; i < len; ++i) x[i] *= s; - return sum; -} -} // namespace - -using framework::LoDTensor; -using framework::LoD; - class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { public: LinearChainCRFOpMaker(framework::OpProto* proto, @@ -206,145 +186,6 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { } }; -template -class LinearChainCRFOpKernel - : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), - "This kernel only runs on CPU."); - auto* emission_weights = ctx.Input("Emission"); - auto* transition_weights = ctx.Input("Transition"); - auto* emission_exps = ctx.Output("EmissionExps"); - emission_exps->mutable_data(platform::CPUPlace()); - auto* transition_exps = ctx.Output("TransitionExps"); - transition_exps->mutable_data(platform::CPUPlace()); - auto* label = ctx.Input("Label"); - - auto in_lod = emission_weights->lod(); - PADDLE_ENFORCE(in_lod.size(), "Input(Emission) is not a sequence."); - - // TODO(caoying) The checks related to LoD information should be - // moved into InferShape once after the InferShape is refactored. - PADDLE_ENFORCE_EQ(emission_weights->NumLevels(), 1UL, - "The Input(Emission) should be a sequence."); - PADDLE_ENFORCE_EQ(label->NumLevels(), 1UL, - "The Input(Label) should be a sequence."); - const size_t level = 0; - - auto emission_dims = emission_weights->dims(); - const size_t batch_size = emission_dims[0]; - const size_t tag_num = emission_dims[1]; - const size_t seq_num = in_lod[level].size() - 1; - - Tensor emission_row_max; - emission_row_max.mutable_data( - framework::make_ddim({static_cast(batch_size), 1}), - platform::CPUPlace()); - - auto place = ctx.GetEigenDevice(); - auto x = EigenMatrix::From(*emission_weights); - auto x_row_max = EigenMatrix::From(emission_row_max); - x_row_max.device(place) = - x.maximum(Eigen::DSizes(1)) - .reshape(Eigen::DSizes(int(batch_size), 1)); - - auto x_exps = EigenMatrix::From(*emission_exps); - x_exps.device(place) = - (x - x_row_max.broadcast(Eigen::DSizes(1, tag_num))).exp(); - - auto w = EigenMatrix::From(*transition_weights); - auto w_exps = EigenMatrix::From(*transition_exps); - w_exps.device(place) = w.exp(); - - auto* alpha = ctx.Output("Alpha"); - alpha->mutable_data(platform::CPUPlace()); - auto* ll = ctx.Output("LogLikelihood"); - // resize the output tensor to the correct dimension. - ll->Resize({static_cast(seq_num), 1}); - T* log_likelihood = ll->mutable_data(platform::CPUPlace()); - for (size_t i = 0; i < seq_num; ++i) { - int start_pos = static_cast(in_lod[level][i]); - int end_pos = static_cast(in_lod[level][i + 1]); - if (end_pos == start_pos) { - // If an empty input sequence is given, pad 0 for its cost. - log_likelihood[i] = 0.; - continue; - } - - const Tensor one_seq = emission_weights->Slice(start_pos, end_pos); - Tensor one_seq_row_max = emission_row_max.Slice(start_pos, end_pos); - Tensor one_seq_exps = emission_exps->Slice(start_pos, end_pos); - const Tensor one_seq_label = label->Slice(start_pos, end_pos); - Tensor one_seq_alpha = alpha->Slice(start_pos, end_pos); - - log_likelihood[i] = ForwardOneSequence( - &one_seq, &one_seq_row_max, &one_seq_exps, transition_weights, - transition_exps, &one_seq_label, &one_seq_alpha); - } - } - - protected: - T ForwardOneSequence(const Tensor* emission, const Tensor* emission_row_max, - const Tensor* emission_exps, const Tensor* trans_weights, - const Tensor* trans_weight_exps, const Tensor* label, - Tensor* alpha) const { - const T* x = emission->data(); - const T* x_row_max = emission_row_max->data(); - const T* x_exps = emission_exps->data(); - const T* w = trans_weights->data(); - const T* w_exps = trans_weight_exps->data(); - T* alpha_value = alpha->data(); - - auto x_dims = emission->dims(); - const size_t seq_length = x_dims[0]; - const size_t tag_num = x_dims[1]; - // The 1st row of w are transition weights for start mask. - // The 2nd row of w are transition weights for end mask. - // Transition weights among other tags begin from the 3rd row of w. - const size_t state_trans_base_idx = 2; - - for (size_t i = 0; i < tag_num; ++i) { - alpha_value[i] = w_exps[i] * x_exps[i]; - } - T ll = -x_row_max[0] - std::log(NormalizeL1(alpha_value, tag_num)); - - for (size_t k = 1; k < seq_length; ++k) { - for (size_t i = 0; i < tag_num; ++i) { - T sum = 0.; - for (size_t j = 0; j < tag_num; ++j) { - sum += alpha_value[(k - 1) * tag_num + j] * - w_exps[(j + state_trans_base_idx) * tag_num + i]; - } - alpha_value[k * tag_num + i] = x_exps[k * tag_num + i] * sum; - } - // NormalizeL1 is to avoid underflow or overflow at (*). - ll -= x_row_max[k] + - std::log(NormalizeL1(alpha_value + k * tag_num, tag_num)); - } - T sum = 0.; - for (size_t i = 0; i < tag_num; ++i) { - sum += alpha_value[(seq_length - 1) * tag_num + i] * w_exps[tag_num + i]; - } - ll -= std::log(sum); - // Now ll is equal to -log(Z). - - const int* lbl = label->data(); - PADDLE_ENFORCE_LT( - *std::max_element(lbl, lbl + seq_length), tag_num, - "An invalid tag label that execesses the largest tag number."); - - // Calculate the nominator part, which depends on the label sequence. - ll += w[lbl[0]] /*start transition*/ + x[lbl[0]] + - w[tag_num + lbl[seq_length - 1]] /*end transition*/; - for (size_t k = 1; k < seq_length; ++k) { - ll += x[k * tag_num + lbl[k]] + - w[(lbl[k - 1] + state_trans_base_idx) * tag_num + lbl[k]]; - } - return -ll; - } -}; - class LinearChainCRFGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -357,11 +198,6 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("LogLikelihood")), "Input(LogLikelihood@GRAD) shoudl be not null."); - PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Emission")), - "Output(Emission@GRAD) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Transition")), - "Output(Transition@GRAD) should be not null."); - auto emission_exps_dims = ctx->GetInputDim("EmissionExps"); PADDLE_ENFORCE_EQ(emission_exps_dims.size(), 2UL, "The Input(EmissionExps) should be a 2-D tensor."); @@ -390,168 +226,24 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel { "The height of Input(EmissionExps) and the height of Input(Label) " "should be the same."); - ctx->SetOutputDim(framework::GradVarName("Emission"), emission_exps_dims); - ctx->SetOutputDim(framework::GradVarName("Transition"), - transition_exps_dims); + if (ctx->HasOutput(framework::GradVarName("Emission"))) { + ctx->SetOutputDim(framework::GradVarName("Emission"), emission_exps_dims); + } + if (ctx->HasOutput(framework::GradVarName("Transition"))) { + ctx->SetOutputDim(framework::GradVarName("Transition"), + transition_exps_dims); + } } protected: // Explicitly set that the data type of output of the linear_chain_crf_grad - // operator is determined by its input "EmissionExps". + // operator is determined by its input: graidents of LogLikelihood. framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType(ctx.Input("LogLikelihood")->type()); } }; -template -class LinearChainCRFGradOpKernel - : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(platform::CPUPlace()), - "This kernel only runs on CPU."); - auto* label = ctx.Input("Label"); - auto* emission_exps = ctx.Input("EmissionExps"); - auto* transition_exps = ctx.Input("TransitionExps"); - auto* alpha = ctx.Input("Alpha"); - const T* ll_grad = - ctx.Input(framework::GradVarName("LogLikelihood"))->data(); - - auto* emission_grad = - ctx.Output(framework::GradVarName("Emission")); - emission_grad->mutable_data(platform::CPUPlace()); - - auto* trans_grad = ctx.Output(framework::GradVarName("Transition")); - if (trans_grad) trans_grad->mutable_data(platform::CPUPlace()); - - auto emission_dims = emission_exps->dims(); - - // Beta is the memo table used in dynamic programming to calculate the - // backwark vectors. For a backward vector i (the i-th row of beta), it - // captures the unnormalized probabilities of partial sequences starting at - // position i. - Tensor beta; - beta.mutable_data(emission_dims, platform::CPUPlace()); - - const size_t level = 0; // currently, only support sequence. - auto lod = label->lod(); - PADDLE_ENFORCE(lod.size(), "Input(Label) is not a sequence."); - - for (size_t i = 0; i < lod[level].size() - 1; ++i) { - int start_pos = static_cast(lod[level][i]); - int end_pos = static_cast(lod[level][i + 1]); - if (end_pos == start_pos) continue; - - const Tensor one_seq_emission_exps = - emission_exps->Slice(start_pos, end_pos); - const Tensor one_seq_label = label->Slice(start_pos, end_pos); - const Tensor one_seq_alpha = alpha->Slice(start_pos, end_pos); - Tensor one_seq_beta = beta.Slice(start_pos, end_pos); - Tensor one_seq_emission_grad = emission_grad->Slice(start_pos, end_pos); - - BackwardOneSequence(ctx.device_context(), ll_grad[i], - &one_seq_emission_exps, transition_exps, - &one_seq_alpha, &one_seq_label, &one_seq_beta, - trans_grad, &one_seq_emission_grad); - } - } - - protected: - void BackwardOneSequence(const platform::DeviceContext& ctx, const T ll_grad, - const Tensor* emission_exps, - const Tensor* transition_exps, const Tensor* alpha, - const Tensor* label, Tensor* beta, - Tensor* transition_grad, - Tensor* emission_grad) const { - const T* w_exps = transition_exps->data(); - const T* x_exps = emission_exps->data(); - const int* label_value = label->data(); - T* beta_value = beta->data(); - - auto x_dims = emission_exps->dims(); - const size_t seq_length = x_dims[0]; - const size_t tag_num = x_dims[1]; - const size_t state_trans_base_idx = 2; - - // Calculate the backward vectors: beta. - // First, calculate the initialition state. - for (size_t i = 0; i < tag_num; ++i) { - beta_value[(seq_length - 1) * tag_num + i] = w_exps[tag_num + i]; - } - NormalizeL1(beta_value + (seq_length - 1) * tag_num, tag_num); - - for (int k = static_cast(seq_length) - 2; k >= 0; --k) { - for (size_t i = 0; i < tag_num; ++i) { - T sum = 0.; - for (size_t j = 0; j < tag_num; ++j) { - sum += w_exps[(i + state_trans_base_idx) * tag_num + j] * - x_exps[(k + 1) * tag_num + j] * - beta_value[(k + 1) * tag_num + j]; - } - beta_value[k * tag_num + i] = sum; - } - // NormalizeL1 is to avoid underflow or overflow at (**). - NormalizeL1(beta_value + k * tag_num, tag_num); - } - - auto alpha_mat = EigenMatrix::From(*alpha); - auto beta_mat = EigenMatrix::From(*beta); - auto x_grad_mat = EigenMatrix::From(*emission_grad); - auto* place = ctx.GetEigenDevice(); - auto prob = alpha_mat * beta_mat; - auto row_sum = prob.sum(Eigen::DSizes(1)) - .reshape(Eigen::DSizes(seq_length, 1)) - .broadcast(Eigen::DSizes(1, tag_num)); - x_grad_mat.device(*place) = prob / row_sum; - - for (size_t k = 0; k < seq_length; ++k) { - x_grad_mat(k, label_value[k]) -= static_cast(1.); - } - - if (transition_grad) { - T* trans_grad = transition_grad->data(); - for (size_t k = 0; k < tag_num; ++k) { - trans_grad[k] += x_grad_mat(/*from start state*/ 0, k); - trans_grad[tag_num + k] += - x_grad_mat(/*to end state*/ seq_length - 1, k); - } - - auto x_exps_mat = EigenMatrix::From(*emission_exps); - - // TODO(caoying): Fix this to avoid using this local variable. - Tensor tmp; - tmp.mutable_data(beta->dims(), platform::CPUPlace()); - auto tmp_mat = EigenMatrix::From(tmp); - auto prob = beta_mat * x_exps_mat; - auto row_sum = prob.sum(Eigen::DSizes(1)) - .reshape(Eigen::DSizes(seq_length, 1)) - .broadcast(Eigen::DSizes(1, tag_num)); - tmp_mat.device(*place) = prob / row_sum; - - for (size_t k = 1; k < seq_length; ++k) { - T sum = 0.; - for (size_t i = 0; i < tag_num; ++i) { - for (size_t j = 0; j < tag_num; ++j) { - sum += w_exps[(i + state_trans_base_idx) * tag_num + j] * // (**) - alpha_mat(k - 1, i) * tmp_mat(k, j); - } - } - sum = 1. / sum; - for (size_t i = 0; i < tag_num; ++i) { - for (size_t j = 0; j < tag_num; ++j) { - trans_grad[(i + state_trans_base_idx) * tag_num + j] += - sum * w_exps[(i + state_trans_base_idx) * tag_num + j] * - alpha_mat(k - 1, i) * tmp_mat(k, j); - } - } - trans_grad[(label_value[k - 1] + state_trans_base_idx) * tag_num + - label_value[k]] -= static_cast(1.); - } - } - } -}; - } // namespace operators } // namespace paddle diff --git a/paddle/operators/linear_chain_crf_op.h b/paddle/operators/linear_chain_crf_op.h index 3175252c66..f028b6554e 100644 --- a/paddle/operators/linear_chain_crf_op.h +++ b/paddle/operators/linear_chain_crf_op.h @@ -19,6 +19,25 @@ limitations under the License. */ namespace paddle { namespace operators { +namespace { +template +T NormalizeL1(T* x, size_t len) { + T sum = 0.; + for (size_t i = 0; i < len; ++i) sum += x[i]; + // (This comment is from the old LinearChainCRFLayer.) + // Right now, we just bet that sum won't be zero. If this really happens, we + // will figure out what should be done then. + PADDLE_ENFORCE(sum, + "The unnormalized probabilities of all possible unfinished " + "sequences must be greater than 0."); + T s = 1. / sum; + for (size_t i = 0; i < len; ++i) x[i] *= s; + return sum; +} +} // namespace + +using framework::LoDTensor; +using framework::LoD; using framework::Tensor; template @@ -27,27 +46,285 @@ using EigenMatrix = framework::EigenMatrix; template class LinearChainCRFOpKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override; + void Compute(const framework::ExecutionContext& ctx) const override { + auto* emission_weights = ctx.Input("Emission"); + auto* transition_weights = ctx.Input("Transition"); + auto* emission_exps = ctx.Output("EmissionExps"); + emission_exps->mutable_data(ctx.GetPlace()); + auto* transition_exps = ctx.Output("TransitionExps"); + transition_exps->mutable_data(ctx.GetPlace()); + auto* label = ctx.Input("Label"); + + auto in_lod = emission_weights->lod(); + PADDLE_ENFORCE(in_lod.size(), "Input(Emission) is not a sequence."); + + // TODO(caoying) The checks related to LoD information should be + // moved into InferShape once after the InferShape is refactored. + PADDLE_ENFORCE_EQ(emission_weights->NumLevels(), 1UL, + "The Input(Emission) should be a sequence."); + PADDLE_ENFORCE_EQ(label->NumLevels(), 1UL, + "The Input(Label) should be a sequence."); + const size_t level = 0; + + auto emission_dims = emission_weights->dims(); + const size_t batch_size = emission_dims[0]; + const size_t tag_num = emission_dims[1]; + const size_t seq_num = in_lod[level].size() - 1; + + Tensor emission_row_max; + emission_row_max.mutable_data( + framework::make_ddim({static_cast(batch_size), 1}), + ctx.GetPlace()); + + auto place = ctx.GetEigenDevice(); + auto x = EigenMatrix::From(*emission_weights); + auto x_row_max = EigenMatrix::From(emission_row_max); + x_row_max.device(place) = + x.maximum(Eigen::DSizes(1)) + .reshape(Eigen::DSizes(int(batch_size), 1)); + + auto x_exps = EigenMatrix::From(*emission_exps); + x_exps.device(place) = + (x - x_row_max.broadcast(Eigen::DSizes(1, tag_num))).exp(); + + auto w = EigenMatrix::From(*transition_weights); + auto w_exps = EigenMatrix::From(*transition_exps); + w_exps.device(place) = w.exp(); + + auto* alpha = ctx.Output("Alpha"); + alpha->mutable_data(ctx.GetPlace()); + auto* ll = ctx.Output("LogLikelihood"); + // resize the output tensor to the correct dimension. + ll->Resize({static_cast(seq_num), 1}); + T* log_likelihood = ll->mutable_data(ctx.GetPlace()); + for (size_t i = 0; i < seq_num; ++i) { + int start_pos = static_cast(in_lod[level][i]); + int end_pos = static_cast(in_lod[level][i + 1]); + if (end_pos == start_pos) { + // If an empty input sequence is given, pad 0 for its cost. + log_likelihood[i] = 0.; + continue; + } + + const Tensor one_seq = emission_weights->Slice(start_pos, end_pos); + Tensor one_seq_row_max = emission_row_max.Slice(start_pos, end_pos); + Tensor one_seq_exps = emission_exps->Slice(start_pos, end_pos); + const Tensor one_seq_label = label->Slice(start_pos, end_pos); + Tensor one_seq_alpha = alpha->Slice(start_pos, end_pos); + + log_likelihood[i] = ForwardOneSequence( + one_seq, one_seq_row_max, one_seq_exps, *transition_weights, + *transition_exps, one_seq_label, &one_seq_alpha); + } + }; protected: - T ForwardOneSequence(const Tensor* emission, const Tensor* emission_row_max, - const Tensor* emission_exps, const Tensor* trans_weights, - const Tensor* trans_weight_exps, const Tensor* label, - Tensor* alpha) const; + T ForwardOneSequence(const Tensor& emission, const Tensor& emission_row_max, + const Tensor& emission_exps, const Tensor& trans_weights, + const Tensor& trans_weight_exps, const Tensor& label, + Tensor* alpha) const { + const T* x = emission.data(); + const T* x_row_max = emission_row_max.data(); + const T* x_exps = emission_exps.data(); + const T* w = trans_weights.data(); + const T* w_exps = trans_weight_exps.data(); + T* alpha_value = alpha->data(); + + auto x_dims = emission.dims(); + const size_t seq_length = x_dims[0]; + const size_t tag_num = x_dims[1]; + // The 1st row of w are transition weights for start mask. + // The 2nd row of w are transition weights for end mask. + // Transition weights between other tags begin from the 3rd row of w. + const size_t state_trans_base_idx = 2; + + for (size_t i = 0; i < tag_num; ++i) { + alpha_value[i] = w_exps[i] * x_exps[i]; + } + T ll = -x_row_max[0] - std::log(NormalizeL1(alpha_value, tag_num)); + + for (size_t k = 1; k < seq_length; ++k) { + for (size_t i = 0; i < tag_num; ++i) { + T sum = 0.; + for (size_t j = 0; j < tag_num; ++j) { + sum += alpha_value[(k - 1) * tag_num + j] * + w_exps[(j + state_trans_base_idx) * tag_num + i]; + } + alpha_value[k * tag_num + i] = x_exps[k * tag_num + i] * sum; + } + // NormalizeL1 is to avoid underflow or overflow at (*). + ll -= x_row_max[k] + + std::log(NormalizeL1(alpha_value + k * tag_num, tag_num)); + } + T sum = 0.; + for (size_t i = 0; i < tag_num; ++i) { + sum += alpha_value[(seq_length - 1) * tag_num + i] * w_exps[tag_num + i]; + } + ll -= std::log(sum); + // Now ll is equal to -log(Z). + + const int* lbl = label.data(); + PADDLE_ENFORCE_LT( + *std::max_element(lbl, lbl + seq_length), tag_num, + "An invalid tag label that execesses the largest tag number."); + + // Calculate the nominator part, which depends on the label sequence. + ll += w[lbl[0]] /*start transition*/ + x[lbl[0]] + + w[tag_num + lbl[seq_length - 1]] /*end transition*/; + for (size_t k = 1; k < seq_length; ++k) { + ll += x[k * tag_num + lbl[k]] + + w[(lbl[k - 1] + state_trans_base_idx) * tag_num + lbl[k]]; + } + return -ll; + }; }; template class LinearChainCRFGradOpKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override; + void Compute(const framework::ExecutionContext& ctx) const override { + auto* label = ctx.Input("Label"); + auto* emission_exps = ctx.Input("EmissionExps"); + auto* transition_exps = ctx.Input("TransitionExps"); + auto* alpha = ctx.Input("Alpha"); + const T* ll_grad = + ctx.Input(framework::GradVarName("LogLikelihood"))->data(); + + auto place = ctx.GetPlace(); + auto* emission_grad = + ctx.Output(framework::GradVarName("Emission")); + emission_grad->mutable_data(place); + + auto* trans_grad = ctx.Output(framework::GradVarName("Transition")); + if (trans_grad) { + trans_grad->mutable_data(place); + } + + auto emission_dims = emission_exps->dims(); + + // Beta is the memo table used in dynamic programming to calculate the + // backwark vectors. For a backward vector i (the i-th row of beta), it + // captures the unnormalized probabilities of partial sequences starting at + // position i. + Tensor beta; + beta.mutable_data(emission_dims, place); + + const size_t level = 0; // currently, only support sequence. + auto lod = label->lod(); + PADDLE_ENFORCE(lod.size(), "Input(Label) is not a sequence."); + + for (size_t i = 0; i < lod[level].size() - 1; ++i) { + int start_pos = static_cast(lod[level][i]); + int end_pos = static_cast(lod[level][i + 1]); + if (end_pos == start_pos) continue; + + const Tensor one_seq_emission_exps = + emission_exps->Slice(start_pos, end_pos); + const Tensor one_seq_label = label->Slice(start_pos, end_pos); + const Tensor one_seq_alpha = alpha->Slice(start_pos, end_pos); + Tensor one_seq_beta = beta.Slice(start_pos, end_pos); + Tensor one_seq_emission_grad = emission_grad->Slice(start_pos, end_pos); + + BackwardOneSequence(ctx.device_context(), ll_grad[i], + one_seq_emission_exps, *transition_exps, + one_seq_alpha, one_seq_label, &one_seq_beta, + trans_grad, &one_seq_emission_grad); + } + }; protected: void BackwardOneSequence(const platform::DeviceContext& ctx, const T ll_grad, - const Tensor* emission_exps, - const Tensor* transition_exps, const Tensor* alpha, - const Tensor* label, Tensor* beta, + const Tensor& emission_exps, + const Tensor& transition_exps, const Tensor& alpha, + const Tensor& label, Tensor* beta, Tensor* transition_grad, - Tensor* emission_grad) const; + Tensor* emission_grad) const { + const T* w_exps = transition_exps.data(); + const T* x_exps = emission_exps.data(); + const int* label_value = label.data(); + T* beta_value = beta->data(); + + auto x_dims = emission_exps.dims(); + const size_t seq_length = x_dims[0]; + const size_t tag_num = x_dims[1]; + const size_t state_trans_base_idx = 2; + + // Calculate the backward vectors: beta. + // First, calculate the initialition state. + for (size_t i = 0; i < tag_num; ++i) { + beta_value[(seq_length - 1) * tag_num + i] = w_exps[tag_num + i]; + } + NormalizeL1(beta_value + (seq_length - 1) * tag_num, tag_num); + + for (int k = static_cast(seq_length) - 2; k >= 0; --k) { + for (size_t i = 0; i < tag_num; ++i) { + T sum = 0.; + for (size_t j = 0; j < tag_num; ++j) { + sum += w_exps[(i + state_trans_base_idx) * tag_num + j] * + x_exps[(k + 1) * tag_num + j] * + beta_value[(k + 1) * tag_num + j]; + } + beta_value[k * tag_num + i] = sum; + } + // NormalizeL1 is to avoid underflow or overflow at (**). + NormalizeL1(beta_value + k * tag_num, tag_num); + } + + auto alpha_mat = EigenMatrix::From(alpha); + auto beta_mat = EigenMatrix::From(*beta); + auto x_grad_mat = EigenMatrix::From(*emission_grad); + auto* place = ctx.GetEigenDevice(); + auto prob = alpha_mat * beta_mat; + auto row_sum = prob.sum(Eigen::DSizes(1)) + .reshape(Eigen::DSizes(seq_length, 1)) + .broadcast(Eigen::DSizes(1, tag_num)); + x_grad_mat.device(*place) = prob / row_sum; + + for (size_t k = 0; k < seq_length; ++k) { + x_grad_mat(k, label_value[k]) -= static_cast(1.); + } + + if (transition_grad) { + T* trans_grad = transition_grad->data(); + for (size_t k = 0; k < tag_num; ++k) { + trans_grad[k] += x_grad_mat(/*from start state*/ 0, k); + trans_grad[tag_num + k] += + x_grad_mat(/*to end state*/ seq_length - 1, k); + } + + auto x_exps_mat = EigenMatrix::From(emission_exps); + + // TODO(caoying): Fix this to avoid using this local variable. + Tensor tmp; + tmp.mutable_data(beta->dims(), ctx.GetPlace()); + auto tmp_mat = EigenMatrix::From(tmp); + auto prob = beta_mat * x_exps_mat; + auto row_sum = prob.sum(Eigen::DSizes(1)) + .reshape(Eigen::DSizes(seq_length, 1)) + .broadcast(Eigen::DSizes(1, tag_num)); + tmp_mat.device(*place) = prob / row_sum; + + for (size_t k = 1; k < seq_length; ++k) { + T sum = 0.; + for (size_t i = 0; i < tag_num; ++i) { + for (size_t j = 0; j < tag_num; ++j) { + sum += w_exps[(i + state_trans_base_idx) * tag_num + j] * // (**) + alpha_mat(k - 1, i) * tmp_mat(k, j); + } + } + sum = 1. / sum; + for (size_t i = 0; i < tag_num; ++i) { + for (size_t j = 0; j < tag_num; ++j) { + trans_grad[(i + state_trans_base_idx) * tag_num + j] += + sum * w_exps[(i + state_trans_base_idx) * tag_num + j] * + alpha_mat(k - 1, i) * tmp_mat(k, j); + } + } + trans_grad[(label_value[k - 1] + state_trans_base_idx) * tag_num + + label_value[k]] -= static_cast(1.); + } + } + }; }; } // namespace operators From b50c33fd002bd19a0eb2db8c0df83c469dd69eda Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Fri, 27 Oct 2017 22:06:36 +0800 Subject: [PATCH 15/35] Use fixed activation in the lstm kernel, since there is some bug in the activation function pointer. It will be fixed later. --- paddle/operators/lstm_op.cc | 14 +++++ .../operators/math/detail/lstm_cpu_kernel.h | 23 ++------ .../operators/math/detail/lstm_gpu_kernel.h | 28 +++------ paddle/operators/math/detail/lstm_kernel.h | 59 ++++++++++++++++--- .../paddle/v2/framework/tests/test_lstm_op.py | 9 +-- 5 files changed, 84 insertions(+), 49 deletions(-) diff --git a/paddle/operators/lstm_op.cc b/paddle/operators/lstm_op.cc index 73ab9b18dc..10b60e3de6 100644 --- a/paddle/operators/lstm_op.cc +++ b/paddle/operators/lstm_op.cc @@ -82,6 +82,13 @@ class LSTMOp : public framework::OperatorWithKernel { ctx->ShareLoD("Input", "Hidden"); ctx->ShareLoD("Input", "Cell"); } + + protected: + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType( + ctx.Input("Input")->type()); + } }; class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { @@ -239,6 +246,13 @@ class LSTMGradOp : public framework::OperatorWithKernel { if (ctx->HasOutput(b_g_name)) ctx->SetOutputDim(b_g_name, ctx->GetInputDim("Bias")); } + + protected: + framework::DataType IndicateDataType( + const framework::ExecutionContext& ctx) const override { + return framework::ToDataType( + ctx.Input("Input")->type()); + } }; } // namespace operators diff --git a/paddle/operators/math/detail/lstm_cpu_kernel.h b/paddle/operators/math/detail/lstm_cpu_kernel.h index 74d51d7bc9..d0ed55ea16 100644 --- a/paddle/operators/math/detail/lstm_cpu_kernel.h +++ b/paddle/operators/math/detail/lstm_cpu_kernel.h @@ -26,10 +26,7 @@ namespace detail { template void naive_lstm_forward_one_sequence(Op op, LstmMetaValue value, - int frameSize, - activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + int frameSize) { T rValueIn; T rValueIg; T rValueFg; @@ -60,10 +57,8 @@ void naive_lstm_forward_one_sequence(Op op, LstmMetaValue value, rPrevState = value.prevStateValue[i]; } - hppl::cpu::ForwardAct act; op(rValueIn, rValueIg, rValueFg, rValueOg, rPrevState, rState, rStateAtv, - rOut, rCheckI, rCheckF, rCheckO, act(active_node), act(active_gate), - act(active_state)); + rOut, rCheckI, rCheckF, rCheckO); valueIn[i] = rValueIn; valueIg[i] = rValueIg; @@ -77,10 +72,7 @@ void naive_lstm_forward_one_sequence(Op op, LstmMetaValue value, template void naive_lstm_backward_one_sequence(Op op, LstmMetaValue value, - LstmMetaGrad grad, int frameSize, - activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + LstmMetaGrad grad, int frameSize) { T rValueIn; T rValueIg; T rValueFg; @@ -127,11 +119,10 @@ void naive_lstm_backward_one_sequence(Op op, LstmMetaValue value, rPrevState = value.prevStateValue[i]; } - hppl::cpu::BackwardAct act; op(rValueIn, rValueIg, rValueFg, rValueOg, rGradIn, rGradIg, rGradFg, rGradOg, rPrevState, rPrevStateGrad, rState, rStateGrad, rStateAtv, rOutputGrad, rCheckI, rCheckF, rCheckO, rCheckIGrad, rCheckFGrad, - rCheckOGrad, act(active_node), act(active_gate), act(active_state)); + rCheckOGrad); gradIn[i] = rGradIn; gradIg[i] = rGradIg; @@ -283,8 +274,7 @@ void cpu_lstm_forward(Op op, LstmMetaValue value, int frameSize, avx_lstm_forward_one_sequence(op, value, frameSize, active_node, active_gate, active_state); } else { - naive_lstm_forward_one_sequence(op, value, frameSize, active_node, - active_gate, active_state); + naive_lstm_forward_one_sequence(op, value, frameSize); } } @@ -297,8 +287,7 @@ void cpu_lstm_backward(Op op, LstmMetaValue value, LstmMetaGrad grad, avx_lstm_backward_one_sequence(op, value, grad, frameSize, active_node, active_gate, active_state); } else { - naive_lstm_backward_one_sequence(op, value, grad, frameSize, active_node, - active_gate, active_state); + naive_lstm_backward_one_sequence(op, value, grad, frameSize); } } diff --git a/paddle/operators/math/detail/lstm_gpu_kernel.h b/paddle/operators/math/detail/lstm_gpu_kernel.h index 9573eaefb6..c06f164f84 100644 --- a/paddle/operators/math/detail/lstm_gpu_kernel.h +++ b/paddle/operators/math/detail/lstm_gpu_kernel.h @@ -32,9 +32,7 @@ namespace detail { */ template __global__ void KeLstmForward(Op op, LstmMetaValue value, int frameSize, - int batchSize, activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + int batchSize) { const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; if (frameIdx >= frameSize) return; @@ -70,10 +68,8 @@ __global__ void KeLstmForward(Op op, LstmMetaValue value, int frameSize, rPrevState = value.prevStateValue[frameIdx]; } - hppl::gpu::ForwardAct act; op(rValueIn, rValueIg, rValueFg, rValueOg, rPrevState, rState, rStateAtv, - rOut, rCheckI, rCheckF, rCheckO, act(active_node), act(active_gate), - act(active_state)); + rOut, rCheckI, rCheckF, rCheckO); value.gateValue[frameIdx] = rValueIn; value.gateValue[frameIdx + frameSize] = rValueIg; @@ -92,9 +88,7 @@ __global__ void KeLstmForward(Op op, LstmMetaValue value, int frameSize, template __global__ void KeLstmBackward(Op op, LstmMetaValue value, LstmMetaGrad grad, int frameSize, - int batchSize, activation_mode_t active_node, - activation_mode_t active_gate, - activation_mode_t active_state) { + int batchSize) { const int frameIdx = blockIdx.x * blockDim.x + threadIdx.x; if (frameIdx >= frameSize) return; @@ -145,11 +139,9 @@ __global__ void KeLstmBackward(Op op, LstmMetaValue value, rPrevState = value.prevStateValue[frameIdx]; } - hppl::gpu::BackwardAct act; op(rValueIn, rValueIg, rValueFg, rValueOg, rGradIn, rGradIg, rGradFg, rGradOg, rPrevState, rPrevStateGrad, rState, rStateGrad, rStateAtv, rOutputGrad, - rCheckI, rCheckF, rCheckO, rCheckIGrad, rCheckFGrad, rCheckOGrad, - act(active_node), act(active_gate), act(active_state)); + rCheckI, rCheckF, rCheckO, rCheckIGrad, rCheckFGrad, rCheckOGrad); grad.gateGrad[frameIdx] = rGradIn; grad.gateGrad[frameIdx + frameSize] = rGradIg; @@ -205,13 +197,11 @@ void gpu_lstm_forward(const platform::DeviceContext& context, Op op, if (batchSize == 1) { KeLstmForward<<>>( - op, value, frameSize, batchSize, active_node, active_gate, - active_state); + op, value, frameSize, batchSize); } else { KeLstmForward<<>>( - op, value, frameSize, batchSize, active_node, active_gate, - active_state); + op, value, frameSize, batchSize); } } @@ -240,13 +230,11 @@ void gpu_lstm_backward(const platform::DeviceContext& context, Op op, if (batchSize == 1) { KeLstmBackward<<>>( - op, value, grad, frameSize, batchSize, active_node, active_gate, - active_state); + op, value, grad, frameSize, batchSize); } else { KeLstmBackward<<>>( - op, value, grad, frameSize, batchSize, active_node, active_gate, - active_state); + op, value, grad, frameSize, batchSize); } } diff --git a/paddle/operators/math/detail/lstm_kernel.h b/paddle/operators/math/detail/lstm_kernel.h index 6f3ead2397..461039a4d5 100644 --- a/paddle/operators/math/detail/lstm_kernel.h +++ b/paddle/operators/math/detail/lstm_kernel.h @@ -24,15 +24,29 @@ namespace detail { namespace forward { +template +DEVICE inline T sigmoid(const T a) { + const T min = SIGMOID_THRESHOLD_MIN; + const T max = SIGMOID_THRESHOLD_MAX; + T tmp = (a < min) ? min : ((a > max) ? max : a); + return static_cast(1.0) / (static_cast(1.0) + exp(-tmp)); +} + +template +DEVICE inline T tanh(const T a) { + T tmp = -2.0 * a; + tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; + return (2.0 / (1.0 + exp(tmp))) - 1.0; +} + template class lstm { public: HOSTDEVICE void operator()(T &valueIn, T &valueIg, T &valueFg, T &valueOg, T &prevState, T &state, T &stateAtv, T &output, - T &checkI, T &checkF, T &checkO, - typename hppl::ForwardActType::type actInput, - typename hppl::ForwardActType::type actGate, - typename hppl::ForwardActType::type actState) { + T &checkI, T &checkF, T &checkO) { +#if 0 + // TODO(qingqing) support to activation speficed by users valueIn = actInput(valueIn); valueIg = actGate(valueIg + prevState * checkI); valueFg = actGate(valueFg + prevState * checkF); @@ -40,6 +54,15 @@ class lstm { valueOg = actGate(valueOg + state * checkO); stateAtv = actState(state); output = valueOg * stateAtv; +#else + valueIn = tanh(valueIn); + valueIg = sigmoid(valueIg + prevState * checkI); + valueFg = sigmoid(valueFg + prevState * checkF); + state = valueIn * valueIg + prevState * valueFg; + valueOg = sigmoid(valueOg + state * checkO); + stateAtv = tanh(state); + output = valueOg * stateAtv; +#endif } #ifndef __NVCC__ #ifndef __AVX__ // If not compiled with AVX instructs. Disable AVX by default @@ -72,6 +95,16 @@ class lstm { namespace backward { +template +DEVICE inline T sigmoid(const T a, const T b) { + return a * b * (1.0 - b); +} + +template +DEVICE inline T tanh(const T a, const T b) { + return a * (1.0 - b * b); +} + template class lstm { public: @@ -80,10 +113,9 @@ class lstm { T &prevState, T &prevStateGrad, T &state, T &stateGrad, T &stateAtv, T &outputGrad, T &checkI, T &checkF, T &checkO, T &checkIGrad, - T &checkFGrad, T &checkOGrad, - typename hppl::BackwardActType::type actInput, - typename hppl::BackwardActType::type actGate, - typename hppl::BackwardActType::type actState) { + T &checkFGrad, T &checkOGrad) { +#if 0 + // TODO(qingqing) support to activation speficed by users gradOg = actGate(outputGrad * stateAtv, valueOg); stateGrad += actState(outputGrad * valueOg, stateAtv) + gradOg * checkO; gradIn = actInput(stateGrad * valueIg, valueIn); @@ -93,6 +125,17 @@ class lstm { checkIGrad = gradIg * prevState; checkFGrad = gradFg * prevState; checkOGrad = gradOg * state; +#else + gradOg = sigmoid(outputGrad * stateAtv, valueOg); + stateGrad += tanh(outputGrad * valueOg, stateAtv) + gradOg * checkO; + gradIn = tanh(stateGrad * valueIg, valueIn); + gradIg = sigmoid(stateGrad * valueIn, valueIg); + gradFg = sigmoid(stateGrad * prevState, valueFg); + prevStateGrad = gradIg * checkI + gradFg * checkF + stateGrad * valueFg; + checkIGrad = gradIg * prevState; + checkFGrad = gradFg * prevState; + checkOGrad = gradOg * state; +#endif } #ifndef __NVCC__ #ifndef __AVX__ // If not compiled with AVX instructs. Disable AVX by default diff --git a/python/paddle/v2/framework/tests/test_lstm_op.py b/python/paddle/v2/framework/tests/test_lstm_op.py index 7f428cd617..f308ba82fa 100644 --- a/python/paddle/v2/framework/tests/test_lstm_op.py +++ b/python/paddle/v2/framework/tests/test_lstm_op.py @@ -110,7 +110,7 @@ def lstm( class TestLstmOp(OpTest): def set_argument(self): - self.lod = [[0, 2, 6]] + self.lod = [[0, 2, 5, 7]] self.D = 16 self.act_gate = 'sigmoid' @@ -164,12 +164,13 @@ class TestLstmOp(OpTest): # TODO(qingqing) remove folowing two lines after the check_grad is refined. self.outputs['BatchGate'] = None self.outputs['BatchCellPreAct'] = None - self.check_grad(['Input', 'Weight', 'Bias'], ['Hidden']) + self.check_grad( + ['Input', 'Weight', 'Bias'], ['Hidden'], max_relative_error=0.02) class TestLstmOpHasNoInitial(TestLstmOp): def set_argument(self): - self.lod = [[0, 2, 6]] + self.lod = [[0, 2, 5, 7]] self.D = 16 self.act_gate = 'sigmoid' @@ -182,7 +183,7 @@ class TestLstmOpHasNoInitial(TestLstmOp): class TestLstmOpRerverse(TestLstmOp): def set_argument(self): - self.lod = [[0, 2, 6]] + self.lod = [[0, 2, 5, 7]] self.D = 16 self.act_gate = 'sigmoid' From 86fd6b63737cda4cb90b1bbbbc863edbcd849b8e Mon Sep 17 00:00:00 2001 From: caoying03 Date: Sun, 29 Oct 2017 23:33:06 +0800 Subject: [PATCH 16/35] add gpu kernel by copying inputs/outputs between cpu and gpu. --- paddle/framework/operator.cc | 20 +- paddle/framework/tensor_impl.h | 7 +- paddle/operators/linear_chain_crf_op.cc | 6 +- paddle/operators/linear_chain_crf_op.cu | 26 ++ paddle/operators/linear_chain_crf_op.h | 304 ++++++++++++++++++++---- 5 files changed, 295 insertions(+), 68 deletions(-) create mode 100644 paddle/operators/linear_chain_crf_op.cu diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index a67625fa88..3a6d1b6a29 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -38,7 +38,7 @@ const Tensor* GetTensorFromVar(const Variable* var) { return &var->Get(); } PADDLE_ENFORCE(var->IsType(), - "The Input must be LoDTensor or Tensor."); + "The Input must be a LoDTensor or a Tensor."); return &var->Get(); } @@ -47,39 +47,39 @@ Tensor* GetTensorFromVar(Variable* var) { return var->GetMutable(); } PADDLE_ENFORCE(var->IsType(), - "The Input must be LoDTensor or Tensor."); + "The Input must be a LoDTensor or a Tensor."); return var->GetMutable(); } std::string OperatorBase::Input(const std::string& name) const { auto& ins = Inputs(name); PADDLE_ENFORCE_LE(ins.size(), 1UL, - "Op %s input %s should contain only one variable", type_, - name); + "Operator %s's input %s should contain only one variable.", + type_, name); return ins.empty() ? kEmptyVarName : ins[0]; } const std::vector& OperatorBase::Inputs( const std::string& name) const { auto it = inputs_.find(name); - PADDLE_ENFORCE(it != inputs_.end(), "Op %s do not have input %s", type_, - name); + PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.", + type_, name); return it->second; } std::string OperatorBase::Output(const std::string& name) const { auto& outs = Outputs(name); PADDLE_ENFORCE_LE(outs.size(), 1UL, - "Op %s output %s should contain only one variable", type_, - name); + "Operator %s's output %s should contain only one variable.", + type_, name); return outs.empty() ? kEmptyVarName : outs[0]; } const std::vector& OperatorBase::Outputs( const std::string& name) const { auto it = outputs_.find(name); - PADDLE_ENFORCE(it != outputs_.end(), "Op %s does not have output called %s", - type_, name); + PADDLE_ENFORCE(it != outputs_.end(), + "Operator %s does not have an output called %s.", type_, name); return it->second; } diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 4097f92e02..d6ef0a80de 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -108,9 +108,10 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { if (holder_ != nullptr) { holder_->set_type(type); } - PADDLE_ENFORCE_GT(numel(), 0, - "Tensor's numel must be larger than zero to call " - "Tensor::mutable_data. Call Tensor::set_dim first."); + PADDLE_ENFORCE_GT( + numel(), 0, + "When calling this method, the Tensor's numel must be larger than zero. " + "Please check Tensor::Resize has been called first."); int64_t size = numel() * SizeOfType(type); /* some versions of boost::variant don't have operator!= */ if (holder_ == nullptr || !(holder_->place() == place) || diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc index 65bbfff0f8..06d71d26be 100644 --- a/paddle/operators/linear_chain_crf_op.cc +++ b/paddle/operators/linear_chain_crf_op.cc @@ -204,8 +204,7 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(emission_exps_dims[0], "An empty mini-batch is not allowed."); - auto transition_exps_dims = - ctx->GetInputDim(framework::GradVarName("TransitionExps")); + auto transition_exps_dims = ctx->GetInputDim("TransitionExps"); PADDLE_ENFORCE_EQ(transition_exps_dims.size(), 2UL, "The Input(TransitionExps) should be a 2-D tensor."); PADDLE_ENFORCE_EQ( @@ -240,7 +239,8 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel { // operator is determined by its input: graidents of LogLikelihood. framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { - return framework::ToDataType(ctx.Input("LogLikelihood")->type()); + return framework::ToDataType( + ctx.Input(framework::GradVarName("LogLikelihood"))->type()); } }; diff --git a/paddle/operators/linear_chain_crf_op.cu b/paddle/operators/linear_chain_crf_op.cu new file mode 100644 index 0000000000..6fc8995f4c --- /dev/null +++ b/paddle/operators/linear_chain_crf_op.cu @@ -0,0 +1,26 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/linear_chain_crf_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_GPU_KERNEL( + linear_chain_crf, + ops::LinearChainCRFOpKernel, + ops::LinearChainCRFOpKernel); +REGISTER_OP_GPU_KERNEL( + linear_chain_crf_grad, + ops::LinearChainCRFGradOpKernel, + ops::LinearChainCRFGradOpKernel); diff --git a/paddle/operators/linear_chain_crf_op.h b/paddle/operators/linear_chain_crf_op.h index f028b6554e..81b36dd95d 100644 --- a/paddle/operators/linear_chain_crf_op.h +++ b/paddle/operators/linear_chain_crf_op.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" +#include "paddle/operators/math/math_function.h" namespace paddle { namespace operators { @@ -47,36 +48,90 @@ template class LinearChainCRFOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* emission_weights = ctx.Input("Emission"); - auto* transition_weights = ctx.Input("Transition"); - auto* emission_exps = ctx.Output("EmissionExps"); - emission_exps->mutable_data(ctx.GetPlace()); - auto* transition_exps = ctx.Output("TransitionExps"); - transition_exps->mutable_data(ctx.GetPlace()); - auto* label = ctx.Input("Label"); - - auto in_lod = emission_weights->lod(); - PADDLE_ENFORCE(in_lod.size(), "Input(Emission) is not a sequence."); - // TODO(caoying) The checks related to LoD information should be // moved into InferShape once after the InferShape is refactored. - PADDLE_ENFORCE_EQ(emission_weights->NumLevels(), 1UL, + PADDLE_ENFORCE_EQ(ctx.Input("Emission")->NumLevels(), 1UL, "The Input(Emission) should be a sequence."); - PADDLE_ENFORCE_EQ(label->NumLevels(), 1UL, + PADDLE_ENFORCE_EQ(ctx.Input("Label")->NumLevels(), 1UL, "The Input(Label) should be a sequence."); + auto in_lod = ctx.Input("Label")->lod(); + PADDLE_ENFORCE(in_lod.size(), "Input(Label) must be a sequence."); const size_t level = 0; + const size_t seq_num = in_lod[level].size() - 1; + + // These local variables hold the inputs and outputs, garanteeing them on + // CPU memory, to provide a consistent reference. + // TODO(caoying) Fix this by moving all these local variables into the + // class's data members once we can profile the whole training process. + LoDTensor* emission_weights = nullptr; + LoDTensor emission_weight_tensor; + Tensor* transition_weights = nullptr; + Tensor transition_weight_tensor; + LoDTensor* label = nullptr; + LoDTensor label_tensor; + + Tensor* emission_exps = nullptr; + Tensor emission_exps_tensor; + Tensor* transition_exps = nullptr; + Tensor transition_exps_tensor; + Tensor* alpha = nullptr; + Tensor alpha_tensor; + Tensor* ll = nullptr; + Tensor ll_tensor; + + if (platform::is_gpu_place(ctx.GetPlace())) { + emission_weights = &emission_weight_tensor; + transition_weights = &transition_weight_tensor; + label = &label_tensor; + + CopyInputsToCpuMemory( + ctx.device_context(), *ctx.Input("Emission"), + *ctx.Input("Transition"), *ctx.Input("Label"), + emission_weights, transition_weights, label); + + emission_exps = &emission_exps_tensor; + emission_exps->Resize(emission_weights->dims()); + + transition_exps = &transition_exps_tensor; + transition_exps->Resize(transition_weights->dims()); + + alpha = &alpha_tensor; + alpha->Resize(ctx.Output("Alpha")->dims()); + + ll = &ll_tensor; + } else { + emission_weights = + const_cast(ctx.Input("Emission")); + transition_weights = const_cast(ctx.Input("Transition")); + label = const_cast(ctx.Input("Label")); + + emission_exps = ctx.Output("EmissionExps"); + transition_exps = ctx.Output("TransitionExps"); + alpha = ctx.Output("Alpha"); + ll = ctx.Output("LogLikelihood"); + } + // Because the computation codes only runs on CPU, here the memory for all + // the outputs is FIXED to be allocated on the CPU memory. + emission_exps->mutable_data(platform::CPUPlace()); + transition_exps->mutable_data(platform::CPUPlace()); + alpha->mutable_data(platform::CPUPlace()); + + // Resize the output tensor to its correct dimension. + ll->Resize({static_cast(seq_num), 1}); + ll->mutable_data(platform::CPUPlace()); + + // Now, all the inputs and outputs should be on the CPU memory. auto emission_dims = emission_weights->dims(); const size_t batch_size = emission_dims[0]; const size_t tag_num = emission_dims[1]; - const size_t seq_num = in_lod[level].size() - 1; Tensor emission_row_max; emission_row_max.mutable_data( framework::make_ddim({static_cast(batch_size), 1}), - ctx.GetPlace()); + platform::CPUPlace()); - auto place = ctx.GetEigenDevice(); + auto place = ctx.GetEigenDevice(); auto x = EigenMatrix::From(*emission_weights); auto x_row_max = EigenMatrix::From(emission_row_max); x_row_max.device(place) = @@ -91,12 +146,7 @@ class LinearChainCRFOpKernel : public framework::OpKernel { auto w_exps = EigenMatrix::From(*transition_exps); w_exps.device(place) = w.exp(); - auto* alpha = ctx.Output("Alpha"); - alpha->mutable_data(ctx.GetPlace()); - auto* ll = ctx.Output("LogLikelihood"); - // resize the output tensor to the correct dimension. - ll->Resize({static_cast(seq_num), 1}); - T* log_likelihood = ll->mutable_data(ctx.GetPlace()); + T* log_likelihood = ll->data(); for (size_t i = 0; i < seq_num; ++i) { int start_pos = static_cast(in_lod[level][i]); int end_pos = static_cast(in_lod[level][i + 1]); @@ -116,9 +166,61 @@ class LinearChainCRFOpKernel : public framework::OpKernel { one_seq, one_seq_row_max, one_seq_exps, *transition_weights, *transition_exps, one_seq_label, &one_seq_alpha); } + + if (platform::is_gpu_place(ctx.GetPlace())) { + CopyOutputsToGpuMemory( + ctx.device_context(), *emission_exps, *transition_exps, *alpha, *ll, + ctx.Output("EmissionExps"), + ctx.Output("TransitionExps"), ctx.Output("Alpha"), + ctx.Output("LogLikelihood")); + } + }; + + private: + void CopyInputsToCpuMemory(const platform::DeviceContext& ctx, + const LoDTensor& emission_weights_src, + const Tensor& transition_weights_src, + const LoDTensor& label_src, + LoDTensor* emission_weights_dst, + Tensor* transition_weights_dst, + LoDTensor* label_dst) const { + // Copy the inputs from GPU memory to CPU memory if this operators runs on + // GPU device. + auto copyLoDTensor = [](const platform::DeviceContext& ctx, + const LoDTensor& src, LoDTensor* dst) { + dst->mutable_data(src.dims(), platform::CPUPlace()); + dst->CopyFrom(src, platform::CPUPlace(), ctx); + + }; + copyLoDTensor(ctx, emission_weights_src, emission_weights_dst); + copyLoDTensor(ctx, label_src, label_dst); + + transition_weights_dst->mutable_data(transition_weights_src.dims(), + platform::CPUPlace()); + transition_weights_dst->CopyFrom(transition_weights_src, + platform::CPUPlace(), ctx); + } + + void CopyOutputsToGpuMemory(const platform::DeviceContext& ctx, + const Tensor& emission_exps_src, + const Tensor& transition_exps_src, + const Tensor& alpha_src, const Tensor& ll_src, + Tensor* emission_exps_dst, + Tensor* transition_exps_dst, Tensor* alpha_dst, + Tensor* ll_dst) const { + // Copy the forward results from CPU memory to GPU memory if this + // operators runs on GPU device. + auto copyTensor = [](const platform::DeviceContext& ctx, const Tensor& src, + Tensor* dst) { + dst->mutable_data(platform::GPUPlace()); + dst->CopyFrom(src, platform::GPUPlace(), ctx); + }; + copyTensor(ctx, emission_exps_src, emission_exps_dst); + copyTensor(ctx, transition_exps_src, transition_exps_dst); + copyTensor(ctx, alpha_src, alpha_dst); + copyTensor(ctx, ll_src, ll_dst); }; - protected: T ForwardOneSequence(const Tensor& emission, const Tensor& emission_row_max, const Tensor& emission_exps, const Tensor& trans_weights, const Tensor& trans_weight_exps, const Tensor& label, @@ -183,35 +285,84 @@ template class LinearChainCRFGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* label = ctx.Input("Label"); - auto* emission_exps = ctx.Input("EmissionExps"); - auto* transition_exps = ctx.Input("TransitionExps"); - auto* alpha = ctx.Input("Alpha"); - const T* ll_grad = - ctx.Input(framework::GradVarName("LogLikelihood"))->data(); - - auto place = ctx.GetPlace(); - auto* emission_grad = - ctx.Output(framework::GradVarName("Emission")); - emission_grad->mutable_data(place); - - auto* trans_grad = ctx.Output(framework::GradVarName("Transition")); - if (trans_grad) { - trans_grad->mutable_data(place); + const size_t level = 0; // currently, only support sequence. + auto lod = ctx.Input("Label")->lod(); + PADDLE_ENFORCE(lod.size(), "Input(Label) must be a sequence."); + + // These local variables hold the inputs and outputs, garanteeing them on + // CPU memory, to provide a consistent reference. + // TODO(caoying) Fix this by moving all these local variables into the + // class's data members once we can profile the training process. + Tensor* label = nullptr; + Tensor label_tensor; + Tensor* emission_exps = nullptr; + Tensor emission_exps_tensor; + Tensor* transition_exps = nullptr; + Tensor transition_exps_tensor; + Tensor* alpha = nullptr; + Tensor alpha_tensor; + Tensor ll_grad_tensor; + T* ll_grad = nullptr; + + Tensor* emission_grad = nullptr; + Tensor emission_grad_tensor; + Tensor* transition_grad = nullptr; + Tensor transition_grad_tensor; + + if (platform::is_gpu_place(ctx.GetPlace())) { + label = &label_tensor; + emission_exps = &emission_exps_tensor; + transition_exps = &transition_exps_tensor; + alpha = &alpha_tensor; + CopyInputsToCpuMemory( + ctx.device_context(), *ctx.Input("Label"), + *ctx.Input("EmissionExps"), + *ctx.Input("TransitionExps"), *ctx.Input("Alpha"), + *ctx.Input(framework::GradVarName("LogLikelihood")), label, + emission_exps, transition_exps, alpha, &ll_grad_tensor); + ll_grad = ll_grad_tensor.data(); + + if (ctx.Output(framework::GradVarName("Emission"))) { + emission_grad = &emission_grad_tensor; + emission_grad->Resize(emission_exps->dims()); + } + + if (ctx.Output(framework::GradVarName("Transition"))) { + transition_grad = &transition_grad_tensor; + transition_grad->Resize(transition_exps->dims()); + } + } else { + label = const_cast(ctx.Input("Label")); + emission_exps = const_cast(ctx.Input("EmissionExps")); + transition_exps = + const_cast(ctx.Input("TransitionExps")); + alpha = const_cast(ctx.Input("Alpha")); + ll_grad = const_cast( + ctx.Input(framework::GradVarName("LogLikelihood"))) + ->data(); + + emission_grad = ctx.Output(framework::GradVarName("Emission")); + transition_grad = + ctx.Output(framework::GradVarName("Transition")); + } + PADDLE_ENFORCE(emission_grad, "Output(Emission@Grad) should not be null."); + emission_grad->mutable_data(platform::CPUPlace()); + math::SetConstant()(ctx.device_context(), + emission_grad, 0.); + if (transition_grad) { + transition_grad->mutable_data(platform::CPUPlace()); + math::SetConstant()(ctx.device_context(), + transition_grad, 0.); } + // Now, all the inputs and outputs should be on the CPU memory. auto emission_dims = emission_exps->dims(); - // Beta is the memo table used in dynamic programming to calculate the // backwark vectors. For a backward vector i (the i-th row of beta), it - // captures the unnormalized probabilities of partial sequences starting at - // position i. + // captures the unnormalized probabilities of partial sequences starting + // at position i. Tensor beta; - beta.mutable_data(emission_dims, place); - - const size_t level = 0; // currently, only support sequence. - auto lod = label->lod(); - PADDLE_ENFORCE(lod.size(), "Input(Label) is not a sequence."); + beta.mutable_data(emission_dims, platform::CPUPlace()); for (size_t i = 0; i < lod[level].size() - 1; ++i) { int start_pos = static_cast(lod[level][i]); @@ -228,11 +379,60 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { BackwardOneSequence(ctx.device_context(), ll_grad[i], one_seq_emission_exps, *transition_exps, one_seq_alpha, one_seq_label, &one_seq_beta, - trans_grad, &one_seq_emission_grad); + transition_grad, &one_seq_emission_grad); + } + + if (platform::is_gpu_place(ctx.GetPlace())) { + CopyOutputsToGpuMemory( + ctx.device_context(), emission_grad, transition_grad, + ctx.Output(framework::GradVarName("Emission")), + ctx.Output(framework::GradVarName("Transition"))); } }; - protected: + private: + void CopyInputsToCpuMemory(const platform::DeviceContext& ctx, + const LoDTensor& label_src, + const Tensor& emission_exps_src, + const Tensor& transition_exps_src, + const Tensor& alpha_src, const Tensor& ll_grad_src, + Tensor* label_dst, Tensor* emission_exps_dst, + Tensor* transition_exps_dst, Tensor* alpha_dst, + Tensor* ll_grad_dst) const { + // Copy the inputs from GPU memory to CPU memory when this operators runs on + // GPU device. + label_dst->mutable_data(label_src.dims(), platform::CPUPlace()); + label_dst->CopyFrom(label_src, platform::CPUPlace(), ctx); + + auto copyTensor = [](const platform::DeviceContext& ctx, const Tensor& src, + Tensor* dst) { + dst->mutable_data(src.dims(), platform::CPUPlace()); + dst->CopyFrom(src, platform::CPUPlace(), ctx); + }; + copyTensor(ctx, emission_exps_src, emission_exps_dst); + copyTensor(ctx, transition_exps_src, transition_exps_dst); + copyTensor(ctx, alpha_src, alpha_dst); + copyTensor(ctx, ll_grad_src, ll_grad_dst); + }; + + void CopyOutputsToGpuMemory(const platform::DeviceContext& ctx, + const Tensor* emission_grad_src, + const Tensor* transition_grad_src, + Tensor* emission_grad_dst, + Tensor* transition_grad_dst) const { + // Copy the backward results from CPU memory to GPU + // memory if this operators runs on GPU device. + auto copyTensor = [](const platform::DeviceContext& ctx, const Tensor* src, + Tensor* dst) { + if (src && dst) { + dst->mutable_data(platform::GPUPlace()); + dst->CopyFrom(*src, platform::GPUPlace(), ctx); + } + }; + copyTensor(ctx, emission_grad_src, emission_grad_dst); + copyTensor(ctx, transition_grad_src, transition_grad_dst); + }; + void BackwardOneSequence(const platform::DeviceContext& ctx, const T ll_grad, const Tensor& emission_exps, const Tensor& transition_exps, const Tensor& alpha, @@ -255,7 +455,6 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { beta_value[(seq_length - 1) * tag_num + i] = w_exps[tag_num + i]; } NormalizeL1(beta_value + (seq_length - 1) * tag_num, tag_num); - for (int k = static_cast(seq_length) - 2; k >= 0; --k) { for (size_t i = 0; i < tag_num; ++i) { T sum = 0.; @@ -270,10 +469,11 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { NormalizeL1(beta_value + k * tag_num, tag_num); } + auto x_grad_mat = EigenMatrix::From(*emission_grad); auto alpha_mat = EigenMatrix::From(alpha); auto beta_mat = EigenMatrix::From(*beta); - auto x_grad_mat = EigenMatrix::From(*emission_grad); - auto* place = ctx.GetEigenDevice(); + + auto* place = ctx.GetEigenDevice(); auto prob = alpha_mat * beta_mat; auto row_sum = prob.sum(Eigen::DSizes(1)) .reshape(Eigen::DSizes(seq_length, 1)) @@ -296,7 +496,7 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { // TODO(caoying): Fix this to avoid using this local variable. Tensor tmp; - tmp.mutable_data(beta->dims(), ctx.GetPlace()); + tmp.mutable_data(beta->dims(), platform::CPUPlace()); auto tmp_mat = EigenMatrix::From(tmp); auto prob = beta_mat * x_exps_mat; auto row_sum = prob.sum(Eigen::DSizes(1)) From a328ae3b9ba2b4089e491253e985874f2c1cf147 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Tue, 31 Oct 2017 17:47:25 +0800 Subject: [PATCH 17/35] Use posix_memalign to allocate aligned memory, since some SIMD instructions require the alignment of memory accesses. --- paddle/memory/detail/system_allocator.cc | 11 ++++++++++- paddle/operators/reshape_op.cc | 2 +- paddle/operators/save_load_op_test.cc | 6 +++--- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/paddle/memory/detail/system_allocator.cc b/paddle/memory/detail/system_allocator.cc index 33166d9ce2..6b4e46f56a 100644 --- a/paddle/memory/detail/system_allocator.cc +++ b/paddle/memory/detail/system_allocator.cc @@ -41,7 +41,16 @@ void* CPUAllocator::Alloc(size_t& index, size_t size) { index = 0; // unlock memory - void* p = malloc(size); + void* p; + +#ifdef PADDLE_USE_MKLDNN + // refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp + // memory alignment + PADDLE_ENFORCE_EQ(posix_memalign(&p, 4096ul, size), 0); +#else + PADDLE_ENFORCE_EQ(posix_memalign(&p, 32ul, size), 0); +#endif + PADDLE_ENFORCE(p, "Fail to allocate CPU memory: size = %d .", size); if (p != nullptr) { if (FLAGS_use_pinned_memory) { diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index eda8226480..9213cc7a85 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -36,7 +36,7 @@ class ReshapeOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(shape.size() > 0, "Attr(shape) shouldn't be empty."); auto x_dims = ctx->GetInputDim("X"); // TODO(qiao) change batch_size - for (int i = 1; i < shape.size(); ++i) { + for (size_t i = 1; i < shape.size(); ++i) { PADDLE_ENFORCE(shape[i] > 0, "Each dimension of shape " "must be positiv except the first."); diff --git a/paddle/operators/save_load_op_test.cc b/paddle/operators/save_load_op_test.cc index fe2b15ec09..a57466a48d 100644 --- a/paddle/operators/save_load_op_test.cc +++ b/paddle/operators/save_load_op_test.cc @@ -34,7 +34,7 @@ TEST(SaveLoadOp, CPU) { tensor->set_lod(expect_lod); int* expect = tensor->mutable_data(place); - for (size_t i = 0; i < paddle::framework::product(tensor->dims()); ++i) { + for (int64_t i = 0; i < tensor->numel(); ++i) { expect[i] = static_cast(i); } paddle::framework::AttributeMap attrs; @@ -50,7 +50,7 @@ TEST(SaveLoadOp, CPU) { "load", {}, {{"Out", {"out_var"}}}, attrs); load_op->Run(scope, ctx); int* actual = target->data(); - for (size_t i = 0; i < paddle::framework::product(tensor->dims()); ++i) { + for (int64_t i = 0; i < tensor->numel(); ++i) { EXPECT_EQ(expect[i], actual[i]); } auto& actual_lod = target->lod(); @@ -60,4 +60,4 @@ TEST(SaveLoadOp, CPU) { EXPECT_EQ(expect_lod[i][j], actual_lod[i][j]); } } -} \ No newline at end of file +} From e88e1964eb79a2ea14d093ce888c702eab6a85ab Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Tue, 31 Oct 2017 18:10:21 +0800 Subject: [PATCH 18/35] Fix compiling warning. --- paddle/operators/nccl_op_test.cu | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/operators/nccl_op_test.cu b/paddle/operators/nccl_op_test.cu index 80c50a28a9..e5927d56ae 100644 --- a/paddle/operators/nccl_op_test.cu +++ b/paddle/operators/nccl_op_test.cu @@ -185,7 +185,7 @@ TEST_F(NCCLTester, ncclAllReduceOp) { recv_tensor.numel() * sizeof(float), static_cast(dev_ctxs[i])->stream()); - for (size_t j = 0; j < f::product(kDims); ++j) { + for (int64_t j = 0; j < f::product(kDims); ++j) { ASSERT_NEAR(ct[j], result, 1e-5); } } @@ -234,7 +234,7 @@ TEST_F(NCCLTester, ncclReduceOp) { recv_tensor.numel() * sizeof(float), static_cast(dev_ctxs[kRoot])->stream()); - for (int j = 0; j < f::product(kDims); ++j) { + for (int64_t j = 0; j < f::product(kDims); ++j) { ASSERT_NEAR(ct[j], result, 1e-5); } } @@ -282,7 +282,7 @@ TEST_F(NCCLTester, ncclBcastOp) { recv_tensor.numel() * sizeof(float), static_cast(dev_ctxs[idx])->stream()); - for (size_t j = 0; j < f::product(kDims); ++j) { + for (int64_t j = 0; j < f::product(kDims); ++j) { ASSERT_NEAR(ct[j], result, 1e-5); } } From 6f658bb7d9a0f8586812799375d9cf113e51f75e Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Mon, 30 Oct 2017 16:19:58 +0800 Subject: [PATCH 19/35] Clean code and update doc. --- paddle/operators/lstm_op.cc | 10 +++++----- paddle/operators/lstm_op.h | 14 +------------- python/paddle/v2/framework/tests/test_lstm_op.py | 12 +++++++----- 3 files changed, 13 insertions(+), 23 deletions(-) diff --git a/paddle/operators/lstm_op.cc b/paddle/operators/lstm_op.cc index 10b60e3de6..94342d9407 100644 --- a/paddle/operators/lstm_op.cc +++ b/paddle/operators/lstm_op.cc @@ -126,11 +126,11 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { " - Bias = {b_c, b_i, b_f, b_o, W_ic, W_fc, W_oc}.") .AsDispensable(); AddOutput("Hidden", - "(LoDTensor) the hidden state lod tensor of LSTM operator. " - "The shape and lod is the same with the `Input`."); + "(LoDTensor) the hidden state of LSTM operator. " + "The shape is (T x D), and lod is the same with the `Input`."); AddOutput("Cell", - "(LoDTensor) the cell state lod tensor of LSTM operator. " - "The shape and lod is the same with the `Input`."); + "(LoDTensor) the cell state of LSTM operator. " + "The shape is (T x D), and lod is the same with the `Input`."); AddOutput("BatchGate", "(LoDTensor) This LoDTensor contains input gate, forget gate " "and output gate after the nonlinear computation. This " @@ -141,7 +141,7 @@ class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { "in the raw input.") .AsIntermediate(); AddOutput("BatchCellPreAct", - "(LoDTensor) This LoDTensor is get in the forward and used " + "(LoDTensor) This LoDTensor is got in the forward and used " "in the backward.") .AsIntermediate(); AddAttr("usePeepholes", diff --git a/paddle/operators/lstm_op.h b/paddle/operators/lstm_op.h index d147b84aef..af088b80b4 100644 --- a/paddle/operators/lstm_op.h +++ b/paddle/operators/lstm_op.h @@ -155,7 +155,6 @@ class LSTMGradKernel : public framework::OpKernel { auto* batch_cell_pre_act = ctx.Input("BatchCellPreAct"); auto* hidden_g = ctx.Input(framework::GradVarName("Hidden")); - // auto* cell_g = ctx.Input(framework::GradVarName("Cell")); auto* in_g = ctx.Output(framework::GradVarName("Input")); auto* weight_g = ctx.Output(framework::GradVarName("Weight")); @@ -251,7 +250,7 @@ class LSTMGradKernel : public framework::OpKernel { lstm_grad.gateGrad = gate_g.data(); lstm_grad.outputGrad = out_g.data(); - if (n != 0) { + if (n) { int bstart_pre = static_cast(batch_starts[n - 1]); Tensor cell_pre = batch_cell.Slice(bstart_pre, bstart); Tensor cell_pre_g = batch_cell_g.Slice(bstart_pre, bstart); @@ -292,17 +291,6 @@ class LSTMGradKernel : public framework::OpKernel { } if (bias && bias_g) { /* backward bias */ - // Following Eigen computation failed for double type on GPU device. - // bias_g->mutable_data(ctx.GetPlace()); - // Tensor bias_mat; - // bias_mat.ShareDataWith(*bias_g); - // bias_mat.Resize({1, 4 * frame_size}); - - // auto bias_g_e = EigenVector::Flatten(bias_mat); - // auto gate_g_e = EigenMatrix::From(batch_gate_g); - // Eigen::array dims{{0}}; - // bias_g_e.device(ctx.GetEigenDevice()) = gate_g_e.sum(dims); - int m = static_cast(batch_gate_g.dims()[0]); int n = static_cast(batch_gate_g.dims()[1]); diff --git a/python/paddle/v2/framework/tests/test_lstm_op.py b/python/paddle/v2/framework/tests/test_lstm_op.py index f308ba82fa..ff75160083 100644 --- a/python/paddle/v2/framework/tests/test_lstm_op.py +++ b/python/paddle/v2/framework/tests/test_lstm_op.py @@ -157,15 +157,17 @@ class TestLstmOp(OpTest): } def test_check_output(self): - self.check_output() + self.check_output(atol=1e-8) #TODO(qingqing) add more unit testing case def test_check_grad(self): - # TODO(qingqing) remove folowing two lines after the check_grad is refined. - self.outputs['BatchGate'] = None - self.outputs['BatchCellPreAct'] = None + # TODO(qingqing) remove folowing lines after the check_grad is refined. + N = len(self.lod[0]) - 1 + self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') + self.outputs['BatchCellPreAct'] = np.zeros( + (N, self.D)).astype('float64') self.check_grad( - ['Input', 'Weight', 'Bias'], ['Hidden'], max_relative_error=0.02) + ['Input', 'Weight', 'Bias'], ['Hidden'], max_relative_error=5e-4) class TestLstmOpHasNoInitial(TestLstmOp): From 2ac9a3d8dcc64ed06c09c42bf55e5be15b7ca329 Mon Sep 17 00:00:00 2001 From: caoying03 Date: Tue, 31 Oct 2017 18:38:23 +0800 Subject: [PATCH 20/35] follow comments. --- paddle/framework/tensor_impl.h | 2 +- paddle/operators/linear_chain_crf_op.cc | 25 ++++++++++--------- paddle/operators/linear_chain_crf_op.h | 14 +++++++---- .../tests/test_linear_chain_crf_op.py | 3 +++ 4 files changed, 26 insertions(+), 18 deletions(-) diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 46dc6fbdff..bcccdd5881 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -235,7 +235,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is out of bound."); PADDLE_ENFORCE_LT( begin_idx, end_idx, - "The start row index must be smaller than the end row index."); + "The start row index must be lesser than the end row index."); if (dims_[0] == 1) { return *this; diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/operators/linear_chain_crf_op.cc index 06d71d26be..605dbba5af 100644 --- a/paddle/operators/linear_chain_crf_op.cc +++ b/paddle/operators/linear_chain_crf_op.cc @@ -26,9 +26,8 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { "Emission", "(LoDTensor, default: LoDTensor). " "The unscaled emission weight matrix for the linear chain CRF. " - "This input is a LoDTensor with shape [N x D] where N is the total " - "element number of all input squences in a mini-batch, " - "and D is the total tag number."); + "This input is a LoDTensor with shape [N x D] where N is the size of " + "the mini-batch and D is the total tag number."); AddInput( "Transition", "(Tensor, default: Tensor). A Tensor with shape [(D + 2) x D]. " @@ -36,7 +35,7 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { "See more details in the operator's comments."); AddInput( "Label", - "(LoDTensor, default: LoDTensor). The groundtruth which is a 2-D " + "(LoDTensor, default: LoDTensor). The ground truth which is a 2-D " "LoDTensor with shape [N x 1], where N is the total element number in " "a mini-batch."); AddOutput( @@ -77,12 +76,13 @@ variables. CRF learns the conditional probability \f$P(Y|X)\f$, where Linear chain CRF is a special case of CRF that is useful for sequence labeling task. Sequence labeling tasks do not assume a lot of conditional -independences among inputs. They only concern about the input and the output -being linear sequences. Thus, the graph model of such a CRF is a simple chain -or a line, which results in the linear chain CRF. +independences among inputs. The only constraint they impose is that the input +and output must be linear sequences. Thus, the graph of such a CRF is a simple +chain or a line, which results in the linear chain CRF. This operator implements the Forward-Backward algorithm for the linear chain -CRF. Please see http://www.cs.columbia.edu/~mcollins/fb.pdf for reference. +CRF. Please see http://www.cs.columbia.edu/~mcollins/fb.pdf and +http://cseweb.ucsd.edu/~elkan/250Bwinter2012/loglinearCRFs.pdf for reference. Equation: @@ -111,7 +111,7 @@ NOTE: transition features. The emission feature weights are NOT computed in this operator. They MUST be computed first before this operator is called. -2. Because this operator performs globally normaliztion over all possible +2. Because this operator performs global normalization over all possible sequences internally, it expects UNSCALED emission feature weights. Please do not call this op with the emission feature being output of any nonlinear activation. @@ -171,9 +171,10 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Alpha", emission_dims); ctx->SetOutputDim("EmissionExps", emission_dims); ctx->SetOutputDim("TransitionExps", transition_dims); - // (TODO caoying) This is tricky. The 1st dimension of Output(LogLikelihood) + // TODO(caoying) This is tricky. The 1st dimension of Output(LogLikelihood) // is the sequence number in a mini-batch. The dimension set here should be - // resized to its correct size in the function Compute. + // resized to its correct size in the function Compute. Fix this once we can + // get LoD information in the InferShape interface. ctx->SetOutputDim("LogLikelihood", {emission_dims[0], 1}); } @@ -236,7 +237,7 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel { protected: // Explicitly set that the data type of output of the linear_chain_crf_grad - // operator is determined by its input: graidents of LogLikelihood. + // operator is determined by its input: gradients of LogLikelihood. framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { return framework::ToDataType( diff --git a/paddle/operators/linear_chain_crf_op.h b/paddle/operators/linear_chain_crf_op.h index e14672c78a..24c8b4052d 100644 --- a/paddle/operators/linear_chain_crf_op.h +++ b/paddle/operators/linear_chain_crf_op.h @@ -188,7 +188,6 @@ class LinearChainCRFOpKernel : public framework::OpKernel { const LoDTensor& src, LoDTensor* dst) { dst->mutable_data(src.dims(), platform::CPUPlace()); dst->CopyFrom(src, platform::CPUPlace(), ctx); - }; copyLoDTensor(ctx, emission_weights_src, emission_weights_dst); @@ -248,7 +247,7 @@ class LinearChainCRFOpKernel : public framework::OpKernel { for (size_t i = 0; i < tag_num; ++i) { T sum = 0.; for (size_t j = 0; j < tag_num; ++j) { - sum += alpha_value[(k - 1) * tag_num + j] * + sum += alpha_value[(k - 1) * tag_num + j] * // (*) w_exps[(j + state_trans_base_idx) * tag_num + i]; } alpha_value[k * tag_num + i] = x_exps[k * tag_num + i] * sum; @@ -291,7 +290,8 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { // These local variables hold the inputs and outputs, garanteeing them on // CPU memory, to provide a consistent reference. // TODO(caoying) Fix this by moving all these local variables into the - // class's data members once we can profile the training process. + // class's data members once we can profile the training process, or + // implementing a real GPU kernel for CRF. Tensor* label = nullptr; Tensor label_tensor; Tensor* emission_exps = nullptr; @@ -344,6 +344,9 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { transition_grad = ctx.Output(framework::GradVarName("Transition")); } + + // TODO(caoying) Fix this constraint. When the Input(Emission) is from the + // data reader operator, it can have no gradients. PADDLE_ENFORCE(emission_grad, "Output(Emission@Grad) should not be null."); emission_grad->mutable_data(platform::CPUPlace()); math::SetConstant()(ctx.device_context(), @@ -458,7 +461,7 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { for (size_t i = 0; i < tag_num; ++i) { T sum = 0.; for (size_t j = 0; j < tag_num; ++j) { - sum += w_exps[(i + state_trans_base_idx) * tag_num + j] * + sum += w_exps[(i + state_trans_base_idx) * tag_num + j] * // (**) x_exps[(k + 1) * tag_num + j] * beta_value[(k + 1) * tag_num + j]; } @@ -493,7 +496,8 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { auto x_exps_mat = EigenMatrix::From(emission_exps); - // TODO(caoying): Fix this to avoid using this local variable. + // TODO(caoying): Fix this to avoid using this local variable if when can + // profiling the training process. Tensor tmp; tmp.mutable_data(beta->dims(), platform::CPUPlace()); auto tmp_mat = EigenMatrix::From(tmp); diff --git a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py index 1cc6dc1aaa..6f06a66c82 100644 --- a/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py +++ b/python/paddle/v2/framework/tests/test_linear_chain_crf_op.py @@ -83,6 +83,9 @@ class LinearChainCrfForward(object): class TestLinearChainCrfOp(OpTest): def set_test_data(self): + # TODO(caoying) Fix the unittest by: add the boundary cases when + # sequence lengths are 1, 2, and 3. + SEQ_NUM = 3 TAG_NUM = 17 MAX_SEQ_LEN = 5 From ebd992ec7923d7230bb33efa02e2d3544d514947 Mon Sep 17 00:00:00 2001 From: caoying03 Date: Tue, 31 Oct 2017 23:13:37 +0800 Subject: [PATCH 21/35] backpropagate gradients the CRF operator receives. --- paddle/operators/linear_chain_crf_op.h | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/paddle/operators/linear_chain_crf_op.h b/paddle/operators/linear_chain_crf_op.h index 24c8b4052d..56fb0c9102 100644 --- a/paddle/operators/linear_chain_crf_op.h +++ b/paddle/operators/linear_chain_crf_op.h @@ -35,6 +35,14 @@ static inline T NormalizeL1(T* x, size_t len) { return sum; } +template +struct ScalarMul { + explicit ScalarMul(const T& scalar) : scalar(scalar) {} + T operator()(const T& val) const { return val * scalar; } + + T scalar; +}; + using framework::LoDTensor; using framework::LoD; using framework::Tensor; @@ -349,8 +357,6 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { // data reader operator, it can have no gradients. PADDLE_ENFORCE(emission_grad, "Output(Emission@Grad) should not be null."); emission_grad->mutable_data(platform::CPUPlace()); - math::SetConstant()(ctx.device_context(), - emission_grad, 0.); if (transition_grad) { transition_grad->mutable_data(platform::CPUPlace()); math::SetConstant()(ctx.device_context(), @@ -480,15 +486,18 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { auto row_sum = prob.sum(Eigen::DSizes(1)) .reshape(Eigen::DSizes(seq_length, 1)) .broadcast(Eigen::DSizes(1, tag_num)); - x_grad_mat.device(*place) = prob / row_sum; + x_grad_mat.device(*place) = + (prob / row_sum).unaryExpr(ScalarMul(ll_grad)); for (size_t k = 0; k < seq_length; ++k) { - x_grad_mat(k, label_value[k]) -= static_cast(1.); + x_grad_mat(k, label_value[k]) -= static_cast(ll_grad); } if (transition_grad) { T* trans_grad = transition_grad->data(); for (size_t k = 0; k < tag_num; ++k) { + // Do not multiply by the output gradient here, because x_grad_mat has + // alrealy done this. trans_grad[k] += x_grad_mat(/*from start state*/ 0, k); trans_grad[tag_num + k] += x_grad_mat(/*to end state*/ seq_length - 1, k); @@ -496,8 +505,8 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { auto x_exps_mat = EigenMatrix::From(emission_exps); - // TODO(caoying): Fix this to avoid using this local variable if when can - // profiling the training process. + // TODO(caoying): Fix this to avoid using this local variable if we can + // profile the training process. Tensor tmp; tmp.mutable_data(beta->dims(), platform::CPUPlace()); auto tmp_mat = EigenMatrix::From(tmp); @@ -520,11 +529,11 @@ class LinearChainCRFGradOpKernel : public framework::OpKernel { for (size_t j = 0; j < tag_num; ++j) { trans_grad[(i + state_trans_base_idx) * tag_num + j] += sum * w_exps[(i + state_trans_base_idx) * tag_num + j] * - alpha_mat(k - 1, i) * tmp_mat(k, j); + alpha_mat(k - 1, i) * tmp_mat(k, j) * ll_grad; } } trans_grad[(label_value[k - 1] + state_trans_base_idx) * tag_num + - label_value[k]] -= static_cast(1.); + label_value[k]] -= static_cast(ll_grad); } } } From a75437a20c450cd88f3f900d3b82a11b9ffb7c37 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 31 Oct 2017 10:06:44 -0700 Subject: [PATCH 22/35] fix bug (#5233) --- python/paddle/v2/dataset/imdb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/dataset/imdb.py b/python/paddle/v2/dataset/imdb.py index 93dd3e8f7d..cfc1c886e1 100644 --- a/python/paddle/v2/dataset/imdb.py +++ b/python/paddle/v2/dataset/imdb.py @@ -116,7 +116,7 @@ def reader_creator(pos_pattern, neg_pattern, word_idx, buffer_size): yield [word_idx.get(w, UNK) for w in doc], i % 2 doc = qs[i % 2].get() - return reader() + return reader def train(word_idx): From 9b70b6a1bbe641c64e6e42baa6d057346bf3306f Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Tue, 31 Oct 2017 10:11:35 -0700 Subject: [PATCH 23/35] Fix/sequence pool (#5229) * "modify layers.py" * "fix pool interface" * "add export type to layers" * "fix based on comment" --- python/paddle/v2/framework/layers.py | 75 +++++++++++++++------------- python/paddle/v2/framework/nets.py | 9 +--- 2 files changed, 43 insertions(+), 41 deletions(-) diff --git a/python/paddle/v2/framework/layers.py b/python/paddle/v2/framework/layers.py index 6451d11e2b..5fdad52f21 100644 --- a/python/paddle/v2/framework/layers.py +++ b/python/paddle/v2/framework/layers.py @@ -5,7 +5,8 @@ import re __all__ = [ 'fc', 'data', 'cross_entropy', 'conv2d', 'pool2d', 'embedding', 'concat', - 'StaticRNN', 'cast', 'sequence_conv', 'sequence_pool', 'accuracy' + 'StaticRNN', 'cast', 'sequence_conv', 'sequence_pool', 'sums', 'cos_sim', + 'batch_norm', 'accuracy' ] @@ -165,18 +166,6 @@ _create_op_func_('dropout') _create_op_func_('reshape') -def cast(x, data_type, program=None): - helper = LayerHelper('cast', **locals()) - out = helper.create_tmp_variable(dtype=data_type) - helper.append_op( - type='cast', - inputs={'X': [x]}, - outputs={'Out': [out]}, - attrs={'in_data_type': x.data_type, - 'out_data_type': out.data_type}) - return out - - def cast(x, data_type, program=None): helper = LayerHelper('cast', **locals()) out = helper.create_tmp_variable(dtype=data_type) @@ -191,9 +180,7 @@ def cast(x, data_type, program=None): def concat(input, axis, program=None, init_program=None): helper = LayerHelper('concat', **locals()) - if not isinstance(input, list) and not isinstance(input, tuple): - input = [input] - out = helper.create_tmp_variable(dtype=input[0].data_type) + out = helper.create_tmp_variable(dtype=helper.input_dtype()) helper.append_op( type='concat', inputs={'X': input}, @@ -202,6 +189,28 @@ def concat(input, axis, program=None, init_program=None): return out +def sums(input, program=None, init_program=None): + helper = LayerHelper('sum', **locals()) + out = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op(type='sum', inputs={'X': [input]}, outputs={'Out': out}) + return out + + +def cos_sim(X, Y, program=None, init_program=None): + helper = LayerHelper('cos_sim', **locals()) + out = helper.create_tmp_variable(dtype=helper.input_dtype("X")) + xnorm = helper.create_tmp_variable(dtype=helper.input_dtype("X")) + ynorm = helper.create_tmp_variable(dtype=helper.input_dtype("X")) + helper.append_op( + type='cos_sim', + inputs={'X': [X], + 'Y': [Y]}, + outputs={'Out': [out], + 'XNorm': [xnorm], + 'YNorm': [ynorm]}) + return out, xnorm, ynorm + + def cross_entropy(input, label, **kwargs): helper = LayerHelper('cross_entropy', **kwargs) out = helper.create_tmp_variable(dtype=input.data_type) @@ -254,9 +263,7 @@ def accuracy(input, label, k=1, **kwargs): def sequence_conv(input, num_filters, - name=None, filter_size=3, - act=None, stride=1, padding=None, bias_attr=None, @@ -270,7 +277,7 @@ def sequence_conv(input, helper = LayerHelper('sequence_conv', **locals()) dtype = helper.input_dtype() - filter_shape = [num_filters, filter_size] + filter_shape = [filter_size * input.shape[1], num_filters] filter = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype) pre_bias = helper.create_tmp_variable(dtype) @@ -279,7 +286,7 @@ def sequence_conv(input, type='sequence_conv', inputs={ 'X': [input], - 'Filter': filter, + 'Filter': [filter], }, outputs={"Out": pre_bias}, attrs={ @@ -287,7 +294,6 @@ def sequence_conv(input, 'context_start': 0, 'context_length': filter_size }) - pre_act = helper.append_bias_op(pre_bias) return helper.append_activation(pre_act) @@ -344,31 +350,32 @@ def conv2d(input, return helper.append_activation(pre_act) -def sequence_pool(input, - pool_size, - pool_type, - pool_stride=1, - pool_padding=0, - global_pooling=False, - program=None, - init_program=None): +def sequence_pool(input, pool_type, program=None, init_program=None): # FIXME(dzh) : want to unify the argument of python layer # function. So we ignore some unecessary attributes - ENUM_POOL_TYPE = set(["max", "avg", "sqrt", "last", "first"]) - if pool_type not in ENUM_POOL_TYPE: + ENUM_POOL_TYPE = dict({ + "AVERAGE": 0, + "SUM": 1, + "SQRT": 2, + "MAX": 3, + "LAST": 4, + "FIRST": 5 + }) + if pool_type.upper() not in ENUM_POOL_TYPE: raise ValueError("Unknown pool_type: '%s'. It can only be %s.", - str(pool_type), " ".join(ENUM_POOL_TYPE)) + str(pool_type), " ".join(ENUM_POOL_TYPE.keys())) helper = LayerHelper('sequence_pool', **locals()) dtype = helper.input_dtype() pool_out = helper.create_tmp_variable(dtype) + # FIXME(dzh): strategy helper.append_op( type="sequence_pool", inputs={"X": [input]}, - outputs={"Out": pool_out}, - attrs={"strategy": pool_type}) + outputs={"Out": [pool_out]}, + attrs={"strategy": ENUM_POOL_TYPE[pool_type.upper()]}) return pool_out diff --git a/python/paddle/v2/framework/nets.py b/python/paddle/v2/framework/nets.py index a9998073e1..8191b5ef44 100644 --- a/python/paddle/v2/framework/nets.py +++ b/python/paddle/v2/framework/nets.py @@ -101,24 +101,19 @@ def img_conv_group(input, def sequence_conv_pool(input, num_filters, filter_size, - pool_size, - pool_stride, - act, + pool_type="max", program=None, init_program=None): conv_out = layers.sequence_conv( input=input, num_filters=num_filters, filter_size=filter_size, - act=act, program=program, init_program=init_program) pool_out = layers.sequence_pool( input=conv_out, - pool_size=pool_size, - pool_type='max', - pool_stride=pool_stride, + pool_type=pool_type, program=program, init_program=init_program) return pool_out From 61eafbe09de00186fb8cb5eb2a46ab7135531efe Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Tue, 31 Oct 2017 10:40:57 -0700 Subject: [PATCH 24/35] Adding a framework for variable initializers (#5232) --- python/paddle/v2/framework/framework.py | 19 +-- python/paddle/v2/framework/initializer.py | 109 ++++++++++++++++++ python/paddle/v2/framework/layer_helper.py | 19 +-- python/paddle/v2/framework/layers.py | 26 ++--- .../tests/test_recognize_digits_mlp.py | 10 +- 5 files changed, 128 insertions(+), 55 deletions(-) create mode 100644 python/paddle/v2/framework/initializer.py diff --git a/python/paddle/v2/framework/framework.py b/python/paddle/v2/framework/framework.py index f8d2f67410..b3493fc378 100644 --- a/python/paddle/v2/framework/framework.py +++ b/python/paddle/v2/framework/framework.py @@ -354,8 +354,8 @@ class Block(object): def create_var(self, *args, **kwargs): var = Variable(self, *args, **kwargs) - if 'init_attr' in kwargs: - self._prepend_initialize_ops_(var, kwargs['init_attr']) + if 'initializer' in kwargs: + kwargs['initializer'](var, self) return var def has_var(self, name): @@ -364,8 +364,8 @@ class Block(object): def create_parameter(self, *args, **kwargs): global_block = self.program.global_block() param = Parameter(global_block, *args, **kwargs) - if 'init_attr' in kwargs: - self._prepend_initialize_ops_(param, kwargs['init_attr']) + if 'initializer' in kwargs: + kwargs['initializer'](param, self) return param def append_op(self, *args, **kwargs): @@ -424,17 +424,6 @@ class Block(object): for index in range(len(self.ops)): assert self.ops[index].desc == ops_in_cpp[index] - def _prepend_initialize_ops_(self, param, init_attr): - op_type = init_attr['type'] - init_attr['shape'] = param.shape - init_attr['data_type'] = int(param.data_type) - op = self.prepend_op( - type=op_type, - inputs=None, - outputs={'Out': [param]}, - attrs=init_attr) - param.op = op - class Program(object): def __init__(self): diff --git a/python/paddle/v2/framework/initializer.py b/python/paddle/v2/framework/initializer.py new file mode 100644 index 0000000000..377d332713 --- /dev/null +++ b/python/paddle/v2/framework/initializer.py @@ -0,0 +1,109 @@ +import paddle.v2.framework.framework as framework + +__all__ = ['ConstantInitializer', 'UniformInitializer'] + + +class Initializer(object): + """Base class for variable initializers + + Defines the common interface of variable initializers. + They add operations to the init program that are used + to initialize variables. Users should not use this class + directly, but need to use one of its implementations. + """ + + def __init_(self): + pass + + def __call__(self, param, block): + """Add corresponding initialization operations to the network + """ + raise NotImplementedError() + + +class ConstantInitializer(Initializer): + """Implements the constant initializer + """ + + def __init__(self, value=0.0): + """Constructor for ConstantInitializer + + Args: + value: constant value to initialize the variable + """ + assert value is not None + super(ConstantInitializer, self).__init__() + self._value = value + + def __call__(self, var, block): + """Add constant initialization ops for a variable + + Args: + var: Variable that needs to be initialized + block: The block in which initialization ops + should be added + + Returns: + the initialization op + """ + assert isinstance(var, framework.Variable) + assert isinstance(block, framework.Block) + # Initialization Ops should be prepended and not appended + op = block.prepend_op( + type="fill_constant", + outputs={"Out": var}, + attrs={ + "shape": var.shape, + "data_type": int(var.data_type), + "value": self._value + }) + var.op = op + return op + + +class UniformInitializer(Initializer): + """Implements for random uniform distribution initializer + """ + + def __init__(self, low=-1.0, high=1.0, seed=0): + """Constructor for UniformInitializer + + Args: + low: lower boundary of the uniform distribution + high: upper boundary of the uniform distribution + seed: random seed + """ + assert low is not None + assert high is not None + assert seed is not None + super(UniformInitializer, self).__init__() + self._low = low + self._high = high + self._seed = seed + + def __call__(self, var, block): + """Add uniform distribution initialization ops for a variable + + Args: + var: Variable that needs to be initialized + block: The block in which initialization ops + should be added + + Returns: + the initialization op + """ + assert isinstance(var, framework.Variable) + assert isinstance(block, framework.Block) + # Initialization Ops should be prepended and not appended + op = block.prepend_op( + type="uniform_random", + outputs={"Out": var}, + attrs={ + "shape": var.shape, + "data_type": int(var.data_type), + "min": self._low, + "max": self._high, + "seed": self._seed + }) + var.op = op + return op diff --git a/python/paddle/v2/framework/layer_helper.py b/python/paddle/v2/framework/layer_helper.py index d96dbe172c..c57776441c 100644 --- a/python/paddle/v2/framework/layer_helper.py +++ b/python/paddle/v2/framework/layer_helper.py @@ -5,6 +5,8 @@ import paddle.v2.framework.core as core from paddle.v2.framework.framework import Variable, g_program, \ g_init_program +from paddle.v2.framework.initializer import ConstantInitializer, \ + UniformInitializer def unique_name(prefix): @@ -66,14 +68,7 @@ class LayerHelper(object): @property def param_attr(self): - default = { - 'name': None, - 'init_attr': { - 'type': 'uniform_random', - 'min': -1.0, - 'max': 1.0 - } - } + default = {'name': None, 'initializer': UniformInitializer()} actual = self.kwargs.get('param_attr', None) if actual is None: actual = default @@ -83,13 +78,7 @@ class LayerHelper(object): return actual def bias_attr(self): - default = { - 'name': None, - 'init_attr': { - 'type': 'fill_constant', - 'value': 0.0 - } - } + default = {'name': None, 'initializer': ConstantInitializer()} bias_attr = self.kwargs.get('bias_attr', None) if bias_attr is True: bias_attr = default diff --git a/python/paddle/v2/framework/layers.py b/python/paddle/v2/framework/layers.py index 5fdad52f21..dab72f0195 100644 --- a/python/paddle/v2/framework/layers.py +++ b/python/paddle/v2/framework/layers.py @@ -1,6 +1,7 @@ from paddle.v2.framework.layer_helper import LayerHelper, unique_name import paddle.v2.framework.core as core from paddle.v2.framework.framework import OpProtoHolder, Variable, Program +from paddle.v2.framework.initializer import ConstantInitializer import re __all__ = [ @@ -440,26 +441,12 @@ def batch_norm(input, else: raise ValueError("unsupported data layout:" + data_layout) - def get_init_attr(value): - if not isinstance(value, float): - raise ValueError("attr value should be a float") - return {'type': 'fill_constant', 'value': value} - - def prepend_init_op(var, init_attr): - assert isinstance(var, Variable) - op_type = init_attr['type'] - init_attr['shape'] = var.shape - init_attr['data_type'] = int(var.data_type) - op = var.block.prepend_op( - type=op_type, inputs=None, outputs={'Out': [var]}, attrs=init_attr) - return op - - def create_persistable_var(dtype, shape, init_attr=None): + def create_persistable_var(dtype, shape, initializer=None): name = unique_name(".".join([helper.name, "xxxx"])) var = init_program.global_block().create_var( dtype=dtype, shape=shape, name=name, persistable=True) - if 'init_attr' is not None: - prepend_init_op(var, init_attr) + if initializer is not None: + initializer(var, var.block) return program.global_block().create_var( name=name, dtype=dtype, shape=shape, persistable=True) @@ -472,8 +459,9 @@ def batch_norm(input, attr=helper.param_attr, shape=param_shape, dtype=dtype) # create input - mean = create_persistable_var(dtype, param_shape, get_init_attr(0.0)) - variance = create_persistable_var(dtype, param_shape, get_init_attr(1.0)) + mean = create_persistable_var(dtype, param_shape, ConstantInitializer(0.0)) + variance = create_persistable_var(dtype, param_shape, + ConstantInitializer(1.0)) # create output # mean and mean_out share the same memory diff --git a/python/paddle/v2/framework/tests/test_recognize_digits_mlp.py b/python/paddle/v2/framework/tests/test_recognize_digits_mlp.py index a8a34b2a95..9916569d04 100644 --- a/python/paddle/v2/framework/tests/test_recognize_digits_mlp.py +++ b/python/paddle/v2/framework/tests/test_recognize_digits_mlp.py @@ -3,9 +3,10 @@ import paddle.v2.framework.layers as layers import paddle.v2.framework.core as core import paddle.v2.framework.optimizer as optimizer -from paddle.v2.framework.framework import Program, g_program +from paddle.v2.framework.framework import Program from paddle.v2.framework.executor import Executor from paddle.v2.framework.regularizer import L2DecayRegularizer +from paddle.v2.framework.initializer import UniformInitializer import numpy as np @@ -21,11 +22,8 @@ image = layers.data( param_attr = { 'name': None, - 'init_attr': { - 'type': 'uniform_random', - 'min': -1.0, - 'max': 1.0 - }, + 'initializer': UniformInitializer( + low=-1.0, high=1.0), 'regularization': L2DecayRegularizer(0.0005 * BATCH_SIZE) } From 2e91c7da2bff114fd5c8219babbc3abb06a80095 Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Wed, 1 Nov 2017 02:48:45 +0800 Subject: [PATCH 25/35] memory log level change from 3 to 10 (#5231) --- paddle/memory/detail/buddy_allocator.cc | 55 +++++++++++++------------ paddle/memory/detail/meta_cache.cc | 2 +- paddle/memory/memory.cc | 17 ++++---- 3 files changed, 38 insertions(+), 36 deletions(-) diff --git a/paddle/memory/detail/buddy_allocator.cc b/paddle/memory/detail/buddy_allocator.cc index e212f7737a..64ee538038 100644 --- a/paddle/memory/detail/buddy_allocator.cc +++ b/paddle/memory/detail/buddy_allocator.cc @@ -27,11 +27,11 @@ BuddyAllocator::BuddyAllocator(SystemAllocator* system_allocator, system_allocator_(std::move(system_allocator)) {} BuddyAllocator::~BuddyAllocator() { - VLOG(3) << "BuddyAllocator Disconstructor makes sure that all of these " - "have actually been freed"; + VLOG(10) << "BuddyAllocator Disconstructor makes sure that all of these " + "have actually been freed"; while (!pool_.empty()) { auto block = static_cast(std::get<2>(*pool_.begin())); - VLOG(3) << "Free from block (" << block << ", " << max_chunk_size_ << ")"; + VLOG(10) << "Free from block (" << block << ", " << max_chunk_size_ << ")"; system_allocator_->Free(block, max_chunk_size_, block->index(cache_)); cache_.invalidate(block); @@ -51,11 +51,12 @@ void* BuddyAllocator::Alloc(size_t unaligned_size) { // acquire the allocator lock std::lock_guard lock(mutex_); - VLOG(3) << "Allocate " << unaligned_size << " bytes from chunk size " << size; + VLOG(10) << "Allocate " << unaligned_size << " bytes from chunk size " + << size; // if the allocation is huge, send directly to the system allocator if (size > max_chunk_size_) { - VLOG(3) << "Allocate from system allocator."; + VLOG(10) << "Allocate from system allocator."; return SystemAlloc(size); } @@ -70,9 +71,9 @@ void* BuddyAllocator::Alloc(size_t unaligned_size) { return nullptr; } } else { - VLOG(3) << "Allocation from existing memory block " << std::get<2>(*it) - << " at address " - << reinterpret_cast(std::get<2>(*it))->data(); + VLOG(10) << "Allocation from existing memory block " << std::get<2>(*it) + << " at address " + << reinterpret_cast(std::get<2>(*it))->data(); } total_used_ += size; @@ -89,10 +90,10 @@ void BuddyAllocator::Free(void* p) { // Acquire the allocator lock std::lock_guard lock(mutex_); - VLOG(3) << "Free from address " << block; + VLOG(10) << "Free from address " << block; if (block->type(cache_) == MemoryBlock::HUGE_CHUNK) { - VLOG(3) << "Free directly from system allocator"; + VLOG(10) << "Free directly from system allocator"; system_allocator_->Free(block, block->total_size(cache_), block->index(cache_)); @@ -109,8 +110,8 @@ void BuddyAllocator::Free(void* p) { // Trying to merge the right buddy if (block->has_right_buddy(cache_)) { - VLOG(3) << "Merging this block " << block << " with its right buddy " - << block->right_buddy(cache_); + VLOG(10) << "Merging this block " << block << " with its right buddy " + << block->right_buddy(cache_); auto right_buddy = block->right_buddy(cache_); @@ -127,8 +128,8 @@ void BuddyAllocator::Free(void* p) { // Trying to merge the left buddy if (block->has_left_buddy(cache_)) { - VLOG(3) << "Merging this block " << block << " with its left buddy " - << block->left_buddy(cache_); + VLOG(10) << "Merging this block " << block << " with its left buddy " + << block->left_buddy(cache_); auto left_buddy = block->left_buddy(cache_); @@ -144,8 +145,8 @@ void BuddyAllocator::Free(void* p) { } // Dumping this block into pool - VLOG(3) << "Inserting free block (" << block << ", " - << block->total_size(cache_) << ")"; + VLOG(10) << "Inserting free block (" << block << ", " + << block->total_size(cache_) << ")"; pool_.insert( IndexSizeAddress(block->index(cache_), block->total_size(cache_), block)); @@ -164,7 +165,7 @@ void* BuddyAllocator::SystemAlloc(size_t size) { size_t index = 0; void* p = system_allocator_->Alloc(index, size); - VLOG(3) << "Allocated " << p << " from system allocator."; + VLOG(10) << "Allocated " << p << " from system allocator."; if (p == nullptr) return nullptr; @@ -190,8 +191,8 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() { if (p == nullptr) return pool_.end(); - VLOG(3) << "Creating and inserting new block " << p - << " from system allocator"; + VLOG(10) << "Creating and inserting new block " << p + << " from system allocator"; static_cast(p)->init(cache_, MemoryBlock::FREE_CHUNK, index, max_chunk_size_, nullptr, nullptr); @@ -235,19 +236,19 @@ void* BuddyAllocator::SplitToAlloc(BuddyAllocator::PoolSet::iterator it, auto block = static_cast(std::get<2>(*it)); pool_.erase(it); - VLOG(3) << "Split block (" << block << ", " << block->total_size(cache_) - << ") into"; + VLOG(10) << "Split block (" << block << ", " << block->total_size(cache_) + << ") into"; block->split(cache_, size); - VLOG(3) << "Left block (" << block << ", " << block->total_size(cache_) - << ")"; + VLOG(10) << "Left block (" << block << ", " << block->total_size(cache_) + << ")"; block->set_type(cache_, MemoryBlock::ARENA_CHUNK); // the rest of memory if exist if (block->has_right_buddy(cache_)) { if (block->right_buddy(cache_)->type(cache_) == MemoryBlock::FREE_CHUNK) { - VLOG(3) << "Insert right block (" << block->right_buddy(cache_) << ", " - << block->right_buddy(cache_)->total_size(cache_) << ")"; + VLOG(10) << "Insert right block (" << block->right_buddy(cache_) << ", " + << block->right_buddy(cache_)->total_size(cache_) << ")"; pool_.insert( IndexSizeAddress(block->right_buddy(cache_)->index(cache_), @@ -274,7 +275,7 @@ void BuddyAllocator::CleanIdleFallBackAlloc() { return; } - VLOG(3) << "Return block " << block << " to fallback allocator."; + VLOG(10) << "Return block " << block << " to fallback allocator."; system_allocator_->Free(block, max_chunk_size_, block->index(cache_)); cache_.invalidate(block); @@ -310,7 +311,7 @@ void BuddyAllocator::CleanIdleNormalAlloc() { MemoryBlock* block = static_cast(std::get<2>(*pool)); - VLOG(3) << "Return block " << block << " to base allocator."; + VLOG(10) << "Return block " << block << " to base allocator."; system_allocator_->Free(block, max_chunk_size_, block->index(cache_)); cache_.invalidate(block); diff --git a/paddle/memory/detail/meta_cache.cc b/paddle/memory/detail/meta_cache.cc index f0721c3b94..7e2f92b00c 100644 --- a/paddle/memory/detail/meta_cache.cc +++ b/paddle/memory/detail/meta_cache.cc @@ -30,7 +30,7 @@ Metadata MetadataCache::load(const MemoryBlock* block) { return existing_metadata->second; } else { auto* meta = reinterpret_cast(block); - VLOG(3) << "Load MetaData type=" << meta->type; + VLOG(10) << "Load MetaData type=" << meta->type; PADDLE_ASSERT(meta->check_guards()); return *reinterpret_cast(block); } diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 0b648642f9..5eb1c44eb6 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -39,15 +39,15 @@ BuddyAllocator* GetCPUBuddyAllocator() { template <> void* Alloc(platform::CPUPlace place, size_t size) { - VLOG(3) << "Allocate " << size << " bytes on " << platform::Place(place); + VLOG(10) << "Allocate " << size << " bytes on " << platform::Place(place); void* p = GetCPUBuddyAllocator()->Alloc(size); - VLOG(3) << " pointer=" << p; + VLOG(10) << " pointer=" << p; return p; } template <> void Free(platform::CPUPlace place, void* p) { - VLOG(3) << "Free pointer=" << p << " on " << platform::Place(place); + VLOG(10) << "Free pointer=" << p << " on " << platform::Place(place); GetCPUBuddyAllocator()->Free(p); } @@ -69,11 +69,12 @@ BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { platform::GpuMinChunkSize(), platform::GpuMaxChunkSize()); } - VLOG(3) << "\n\nNOTE: each GPU device use " - << FLAGS_fraction_of_gpu_memory_to_use * 100 << "% of GPU memory.\n" - << "You can set environment variable '" - << platform::kEnvFractionGpuMemoryToUse - << "' to change the fraction of GPU usage.\n\n"; + VLOG(10) << "\n\nNOTE: each GPU device use " + << FLAGS_fraction_of_gpu_memory_to_use * 100 + << "% of GPU memory.\n" + << "You can set environment variable '" + << platform::kEnvFractionGpuMemoryToUse + << "' to change the fraction of GPU usage.\n\n"; } platform::SetDeviceId(gpu_id); return as[gpu_id]; From 0b76c7352c18fce3c89cd32021d296701da9867a Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 31 Oct 2017 12:03:07 -0700 Subject: [PATCH 26/35] AddBiasOp does not care num_flatten_dims (#5200) * AddBiasOp does not care num_flatten_dims * Add comments --- python/paddle/v2/framework/layer_helper.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/python/paddle/v2/framework/layer_helper.py b/python/paddle/v2/framework/layer_helper.py index c57776441c..45d9cf3f48 100644 --- a/python/paddle/v2/framework/layer_helper.py +++ b/python/paddle/v2/framework/layer_helper.py @@ -142,8 +142,24 @@ class LayerHelper(object): return self.program.global_block().create_var( *args, persistable=False, **kwargs) - def append_bias_op(self, input_var): - size = list(input_var.shape[1:]) + def append_bias_op(self, input_var, num_flatten_dims=None): + """ + Append bias operator and return its output. If the user does not set + bias_attr, append_bias_op will return input_var + + :param input_var: the input variable. The len(input_var.shape) is larger + or equal than 2. + :param num_flatten_dims: The input tensor will be flatten as a matrix + when adding bias. + `matrix.shape = product(input_var.shape[0:num_flatten_dims]), product( + input_var.shape[num_flatten_dims:])` + """ + if num_flatten_dims is None: + num_flatten_dims = self.kwargs.get('num_flatten_dims', None) + if num_flatten_dims is None: + num_flatten_dims = 1 + + size = list(input_var.shape[num_flatten_dims:]) bias_attr = self.bias_attr() if not bias_attr: return input_var From db3b9438b7d273198dda76f6b30ab5bb678d2778 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Tue, 31 Oct 2017 13:28:48 -0700 Subject: [PATCH 27/35] Adding Normal distribution initializer and unit tests for python initializers (#5256) --- paddle/operators/gaussian_random_op.cc | 12 +- python/paddle/v2/framework/initializer.py | 51 +++++++- .../tests/test_gaussian_random_op.py | 2 +- .../v2/framework/tests/test_initializer.py | 120 ++++++++++++++++++ 4 files changed, 177 insertions(+), 8 deletions(-) create mode 100644 python/paddle/v2/framework/tests/test_initializer.py diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index 04dfdf7c48..be7f542a7a 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -45,14 +45,14 @@ class GaussianRandomOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of GaussianRandomOp should not be null."); - auto dims = ctx->Attrs().Get>("dims"); + auto shape = ctx->Attrs().Get>("shape"); std::vector temp; - temp.reserve(dims.size()); - for (auto dim : dims) { + temp.reserve(shape.size()); + for (auto dim : shape) { temp.push_back(static_cast(dim)); } - PADDLE_ENFORCE(dims.size() > 0UL, - "dims can be one int or array. dims must be set."); + PADDLE_ENFORCE(shape.size() > 0UL, + "shape can be one int or array. shape must be set."); ctx->SetOutputDim("Out", framework::make_ddim(temp)); } @@ -74,7 +74,7 @@ GaussianRandom operator. Use to initialize tensor with gaussian random generator. )DOC"); - AddAttr>("dims", "The dimension of random tensor."); + AddAttr>("shape", "The dimension of random tensor."); AddAttr("mean", "mean of random tensor.").SetDefault(.0f); AddAttr("std", "std of random tensor.").SetDefault(1.0f); AddAttr("seed", diff --git a/python/paddle/v2/framework/initializer.py b/python/paddle/v2/framework/initializer.py index 377d332713..507fd16062 100644 --- a/python/paddle/v2/framework/initializer.py +++ b/python/paddle/v2/framework/initializer.py @@ -62,7 +62,7 @@ class ConstantInitializer(Initializer): class UniformInitializer(Initializer): - """Implements for random uniform distribution initializer + """Implements the random uniform distribution initializer """ def __init__(self, low=-1.0, high=1.0, seed=0): @@ -75,6 +75,7 @@ class UniformInitializer(Initializer): """ assert low is not None assert high is not None + assert high >= low assert seed is not None super(UniformInitializer, self).__init__() self._low = low @@ -107,3 +108,51 @@ class UniformInitializer(Initializer): }) var.op = op return op + + +class NormalInitializer(Initializer): + """Implements the random Normal(Gaussian) distribution initializer + """ + + def __init__(self, loc=0.0, scale=1.0, seed=0): + """Constructor for NormalInitializer + + Args: + loc: mean of the normal distribution + scale: standard deviation of the normal distribution + seed: random seed + """ + assert loc is not None + assert scale is not None + assert seed is not None + super(NormalInitializer, self).__init__() + self._mean = loc + self._std_dev = scale + self._seed = seed + + def __call__(self, var, block): + """Add normal distribution initialization ops for a variable + + Args: + var: Variable that needs to be initialized + block: The block in which initialization ops + should be added + + Returns: + the initialization op + """ + assert isinstance(var, framework.Variable) + assert isinstance(block, framework.Block) + # Initialization Ops should be prepended and not appended + op = block.prepend_op( + type="gaussian_random", + outputs={"Out": var}, + attrs={ + "shape": var.shape, + "data_type": int(var.data_type), + "mean": self._mean, + "std": self._std_dev, + "seed": self._seed + }) + var.op = op + return op diff --git a/python/paddle/v2/framework/tests/test_gaussian_random_op.py b/python/paddle/v2/framework/tests/test_gaussian_random_op.py index 8b7779667d..0dc7e091a5 100644 --- a/python/paddle/v2/framework/tests/test_gaussian_random_op.py +++ b/python/paddle/v2/framework/tests/test_gaussian_random_op.py @@ -19,7 +19,7 @@ class TestGaussianRandomOp(unittest.TestCase): op = Operator( "gaussian_random", Out='Out', - dims=[1000, 784], + shape=[1000, 784], mean=.0, std=1., seed=10) diff --git a/python/paddle/v2/framework/tests/test_initializer.py b/python/paddle/v2/framework/tests/test_initializer.py new file mode 100644 index 0000000000..f28fc8a86c --- /dev/null +++ b/python/paddle/v2/framework/tests/test_initializer.py @@ -0,0 +1,120 @@ +import unittest + +import paddle.v2.framework.framework as framework +import paddle.v2.framework.initializer as initializer + +DELTA = 0.00001 + + +class TestConstantInitializer(unittest.TestCase): + def test_constant_initializer_default_value(self): + """Test the constant initializer with default value + """ + program = framework.Program() + block = program.global_block() + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.ConstantInitializer()) + self.assertEqual(len(block.ops), 1) + init_op = block.ops[0] + self.assertEqual(init_op.type, 'fill_constant') + self.assertAlmostEqual(init_op.attr('value'), 0.0, delta=DELTA) + + def test_constant_initializer(self): + """Test constant initializer with supplied value + """ + program = framework.Program() + block = program.global_block() + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.ConstantInitializer(2.3)) + self.assertEqual(len(block.ops), 1) + init_op = block.ops[0] + self.assertEqual(init_op.type, 'fill_constant') + self.assertAlmostEqual(init_op.attr('value'), 2.3, delta=DELTA) + + +class TestUniformInitializer(unittest.TestCase): + def test_uniform_initializer_default_value(self): + """Test the uniform initializer with default value + """ + program = framework.Program() + block = program.global_block() + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.UniformInitializer()) + self.assertEqual(len(block.ops), 1) + init_op = block.ops[0] + self.assertEqual(init_op.type, 'uniform_random') + self.assertAlmostEqual(init_op.attr('min'), -1.0, delta=DELTA) + self.assertAlmostEqual(init_op.attr('max'), 1.0, delta=DELTA) + self.assertEqual(init_op.attr('seed'), 0) + + def test_uniform_initializer(self): + """Test uniform initializer with supplied attributes + """ + program = framework.Program() + block = program.global_block() + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.UniformInitializer(-4.2, 3.1, 123)) + self.assertEqual(len(block.ops), 1) + init_op = block.ops[0] + self.assertEqual(init_op.type, 'uniform_random') + self.assertAlmostEqual(init_op.attr('min'), -4.2, delta=DELTA) + self.assertAlmostEqual(init_op.attr('max'), 3.1, delta=DELTA) + self.assertEqual(init_op.attr('seed'), 123) + + +class TestNormalInitializer(unittest.TestCase): + def test_normal_initializer_default_value(self): + """Test the normal initializer with default value + """ + program = framework.Program() + block = program.global_block() + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.NormalInitializer()) + self.assertEqual(len(block.ops), 1) + init_op = block.ops[0] + self.assertEqual(init_op.type, 'gaussian_random') + self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA) + self.assertAlmostEqual(init_op.attr('std'), 1.0, delta=DELTA) + self.assertEqual(init_op.attr('seed'), 0) + + def test_normal_initializer(self): + """Test normal initializer with supplied attributes + """ + program = framework.Program() + block = program.global_block() + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.NormalInitializer(2.3, 1.9, 123)) + self.assertEqual(len(block.ops), 1) + init_op = block.ops[0] + self.assertEqual(init_op.type, 'gaussian_random') + self.assertAlmostEqual(init_op.attr('mean'), 2.3, delta=DELTA) + self.assertAlmostEqual(init_op.attr('std'), 1.9, delta=DELTA) + self.assertEqual(init_op.attr('seed'), 123) + + +if __name__ == '__main__': + unittest.main() From 9074a60c510cd9e64ebf0c7139a6531997ac1651 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 31 Oct 2017 13:36:51 -0700 Subject: [PATCH 28/35] Refine lookup_table_op (#5257) 1. Change some `auto` to `auto*` 2. Change `Tensor` to `LoDTensor` --- paddle/operators/lookup_table_op.cc | 4 ++-- paddle/operators/lookup_table_op.cu | 24 ++++++++++++------------ paddle/operators/lookup_table_op.h | 28 ++++++++++++++-------------- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/paddle/operators/lookup_table_op.cc b/paddle/operators/lookup_table_op.cc index 8fdd42352e..0b361e20f2 100644 --- a/paddle/operators/lookup_table_op.cc +++ b/paddle/operators/lookup_table_op.cc @@ -43,7 +43,7 @@ class LookupTableOp : public framework::OperatorWithKernel { protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { - return framework::ToDataType(ctx.Input("W")->type()); + return framework::ToDataType(ctx.Input("W")->type()); } }; @@ -93,7 +93,7 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { protected: framework::DataType IndicateDataType( const framework::ExecutionContext& ctx) const override { - return framework::ToDataType(ctx.Input("W")->type()); + return framework::ToDataType(ctx.Input("W")->type()); } }; diff --git a/paddle/operators/lookup_table_op.cu b/paddle/operators/lookup_table_op.cu index 837b2a1f4c..2c826872be 100644 --- a/paddle/operators/lookup_table_op.cu +++ b/paddle/operators/lookup_table_op.cu @@ -61,16 +61,16 @@ template class LookupTableCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto table_t = context.Input("W"); - auto ids_t = context.Input("Ids"); - auto output_t = context.Output("Out"); + auto* table_t = context.Input("W"); + auto* ids_t = context.Input("Ids"); + auto* output_t = context.Output("Out"); size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; size_t K = ids_t->numel(); - auto ids = ids_t->data(); - auto table = table_t->data(); - auto output = output_t->mutable_data(context.GetPlace()); + auto* ids = ids_t->data(); + auto* table = table_t->data(); + auto* output = output_t->mutable_data(context.GetPlace()); dim3 threads(128, 8); dim3 grids(8, 1); @@ -87,9 +87,9 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { bool is_sparse = context.Attr("is_sparse"); if (is_sparse) { - auto* ids = context.Input("Ids"); - auto* table = context.Input("W"); - auto* d_output = context.Input(framework::GradVarName("Out")); + auto* ids = context.Input("Ids"); + auto* table = context.Input("W"); + auto* d_output = context.Input(framework::GradVarName("Out")); auto* d_table = context.Output(framework::GradVarName("W")); auto* ids_data = ids->data(); @@ -119,9 +119,9 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { d_output->numel(), stream); } else { - auto ids_t = context.Input("Ids"); - auto d_output_t = context.Input(framework::GradVarName("Out")); - auto d_table_t = context.Output(framework::GradVarName("W")); + auto ids_t = context.Input("Ids"); + auto d_output_t = context.Input(framework::GradVarName("Out")); + auto d_table_t = context.Output(framework::GradVarName("W")); int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; diff --git a/paddle/operators/lookup_table_op.h b/paddle/operators/lookup_table_op.h index 54067cd01d..ea3289d273 100644 --- a/paddle/operators/lookup_table_op.h +++ b/paddle/operators/lookup_table_op.h @@ -19,22 +19,22 @@ namespace paddle { namespace operators { -using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; using SelectedRows = framework::SelectedRows; template class LookupTableKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto table_t = context.Input("W"); // float tensor - auto ids_t = context.Input("Ids"); // int tensor - auto output_t = context.Output("Out"); // float tensor + auto* table_t = context.Input("W"); // float tensor + auto* ids_t = context.Input("Ids"); // int tensor + auto* output_t = context.Output("Out"); // float tensor int N = table_t->dims()[0]; int D = table_t->dims()[1]; - auto ids = ids_t->data(); - auto table = table_t->data(); - auto output = output_t->mutable_data(context.GetPlace()); + auto* ids = ids_t->data(); + auto* table = table_t->data(); + auto* output = output_t->mutable_data(context.GetPlace()); for (int64_t i = 0; i < ids_t->numel(); ++i) { PADDLE_ENFORCE_LT(ids[i], N); PADDLE_ENFORCE_GE(ids[i], 0); @@ -49,9 +49,9 @@ class LookupTableGradKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { bool is_sparse = context.Attr("is_sparse"); if (is_sparse) { - auto* ids = context.Input("Ids"); - auto* table = context.Input("W"); - auto* d_output = context.Input(framework::GradVarName("Out")); + auto* ids = context.Input("Ids"); + auto* table = context.Input("W"); + auto* d_output = context.Input(framework::GradVarName("Out")); auto* d_table = context.Output(framework::GradVarName("W")); auto* ids_data = ids->data(); @@ -76,10 +76,10 @@ class LookupTableGradKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output->dims()); memcpy(d_table_data, d_output_data, sizeof(T) * d_output->numel()); } else { - auto* ids = context.Input("Ids"); - auto* d_output = context.Input(framework::GradVarName("Out")); - auto* d_table = context.Output(framework::GradVarName("W")); - auto* table = context.Input("W"); + auto* ids = context.Input("Ids"); + auto* d_output = context.Input(framework::GradVarName("Out")); + auto* d_table = context.Output(framework::GradVarName("W")); + auto* table = context.Input("W"); auto* ids_data = ids->data(); auto ids_dim = ids->dims(); From 360cb18321b8401916cb9c50cb123bdb3ac2d94b Mon Sep 17 00:00:00 2001 From: QI JUN Date: Tue, 31 Oct 2017 13:39:47 -0700 Subject: [PATCH 29/35] fix bug in lookup table grad operator (#5228) --- paddle/operators/lookup_table_op.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/lookup_table_op.cu b/paddle/operators/lookup_table_op.cu index 2c826872be..c7ba172066 100644 --- a/paddle/operators/lookup_table_op.cu +++ b/paddle/operators/lookup_table_op.cu @@ -116,7 +116,7 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { auto* d_output_data = d_output->data(); PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output->dims()); memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data, - d_output->numel(), stream); + d_output->numel() * sizeof(T), stream); } else { auto ids_t = context.Input("Ids"); From ee11f00642afe00cfc14346d5c4791efa3405802 Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Wed, 1 Nov 2017 05:24:04 +0800 Subject: [PATCH 30/35] add shareLod (#5259) * add shareLod * fix sequence_conv grad infershape --- paddle/framework/op_desc.cc | 16 ++++++++++++++++ paddle/framework/operator.cc | 14 ++++++++++++++ paddle/framework/shape_inference.cc | 3 --- paddle/framework/shape_inference.h | 5 ++--- paddle/operators/sequence_conv_op.cc | 2 +- 5 files changed, 33 insertions(+), 7 deletions(-) diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index c2d6f124ad..a4747e7c7c 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -52,6 +52,22 @@ class CompileTimeInferShapeContext : public InferShapeContext { const std::vector &Outputs( const std::string &name) const override; + void ShareLoD(const std::string &in, const std::string &out, size_t i = 0, + size_t j = 0) const override { + PADDLE_ENFORCE_LT(i, Inputs(in).size()); + PADDLE_ENFORCE_LT(j, Outputs(out).size()); + auto *in_var = block_.FindVarRecursive(Inputs(in)[i]); + auto *out_var = block_.FindVarRecursive(Outputs(out)[j]); + if (in_var->GetType() != VarDesc::LOD_TENSOR) { + VLOG(3) << "input " << in << "is not LodTensor"; + return; + } + PADDLE_ENFORCE_EQ(in_var->GetType(), VarDesc::LOD_TENSOR, + "The %d-th output of Output(%s) must be LoDTensor.", j, + out); + in_var->SetLoDLevel(out_var->GetLodLevel()); + } + private: DDim GetDim(const std::string &name) const override; diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 222a252dc4..aa46829fdd 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -351,6 +351,20 @@ class RuntimeInferShapeContext : public InferShapeContext { return op_.Outputs(name); } + void ShareLoD(const std::string& in, const std::string& out, size_t i = 0, + size_t j = 0) const override { + PADDLE_ENFORCE_LT(i, Inputs(in).size()); + PADDLE_ENFORCE_LT(j, Outputs(out).size()); + Variable* in_var = scope_.FindVar(Inputs(in)[i]); + Variable* out_var = scope_.FindVar(Outputs(out)[j]); + if (!in_var->IsType()) return; + PADDLE_ENFORCE(out_var->IsType(), + "The %d-th output of Output(%s) must be LoDTensor.", j, out); + auto in_tensor = in_var->Get(); + auto* out_tensor = out_var->GetMutable(); + out_tensor->set_lod(in_tensor.lod()); + } + private: DDim GetDim(const std::string& name) const override { Variable* var = scope_.FindVar(name); diff --git a/paddle/framework/shape_inference.cc b/paddle/framework/shape_inference.cc index 33a1d0b9b2..8169df8e46 100644 --- a/paddle/framework/shape_inference.cc +++ b/paddle/framework/shape_inference.cc @@ -28,9 +28,6 @@ void InferShapeContext::SetOutputsDim( SetDims(names, dims); } -void InferShapeContext::ShareLoD(const std::string &in, const std::string &out, - size_t i, size_t j) const {} - std::vector InferShapeContext::GetDims( const std::vector &names) const { std::vector ret; diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index f1f1e44bcc..6f19900ef1 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -43,9 +43,8 @@ class InferShapeContext { virtual const std::vector &Outputs( const std::string &name) const = 0; - // TODO(qiao) implement this function - void ShareLoD(const std::string &in, const std::string &out, size_t i = 0, - size_t j = 0) const; + virtual void ShareLoD(const std::string &in, const std::string &out, + size_t i = 0, size_t j = 0) const = 0; protected: virtual framework::DDim GetDim(const std::string &name) const = 0; diff --git a/paddle/operators/sequence_conv_op.cc b/paddle/operators/sequence_conv_op.cc index bdb52265a5..a3f2ed1443 100644 --- a/paddle/operators/sequence_conv_op.cc +++ b/paddle/operators/sequence_conv_op.cc @@ -89,7 +89,7 @@ class SequenceConvGradOp : public framework::OperatorWithKernel { } if (ctx->HasOutput(framework::GradVarName("X"))) { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); - ctx->ShareLoD(framework::GradVarName("X"), "X"); + ctx->ShareLoD("X", framework::GradVarName("X")); } if (ctx->HasOutput(framework::GradVarName("Filter"))) { ctx->SetOutputDim(framework::GradVarName("Filter"), From 1363ddb6d724a19880b55cbefc0e62819a25a7d5 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 31 Oct 2017 14:37:00 -0700 Subject: [PATCH 31/35] Feature/executor use program bind (#5196) * Init commit * Make executor use ProgramDescBind * Change Attribute from BlockDesc to BlockDescBind * Since we will get the program desc in RNN, just BlockDesc is not enough. --- paddle/framework/attribute.cc | 10 ++---- paddle/framework/attribute.h | 2 +- paddle/framework/backward.cc | 6 ++-- paddle/framework/backward_test.cc | 14 ++++---- paddle/framework/block_desc.cc | 2 +- paddle/framework/executor.cc | 27 +++++++------- paddle/framework/executor.h | 4 +-- paddle/framework/op_desc.cc | 12 ++++--- paddle/framework/op_registry.cc | 8 +++-- paddle/framework/op_registry.h | 3 +- paddle/framework/op_registry_test.cc | 12 +++---- paddle/framework/operator_test.cc | 6 ++-- paddle/framework/program_desc.h | 4 ++- paddle/framework/program_desc_test.cc | 8 ++--- paddle/framework/prune_test.cc | 10 +++--- paddle/framework/type_defs.h | 2 +- paddle/framework/var_type_inference_test.cc | 36 ++++++++++--------- paddle/operators/dynamic_recurrent_op_test.cc | 2 +- paddle/pybind/protobuf.cc | 3 +- paddle/pybind/pybind.cc | 15 ++++---- 20 files changed, 94 insertions(+), 92 deletions(-) diff --git a/paddle/framework/attribute.cc b/paddle/framework/attribute.cc index 29fe352ca4..b1e1793641 100644 --- a/paddle/framework/attribute.cc +++ b/paddle/framework/attribute.cc @@ -19,7 +19,7 @@ limitations under the License. */ namespace paddle { namespace framework { -Attribute GetAttrValue(const OpDesc::Attr& attr_desc, ProgramDesc* program) { +Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { switch (attr_desc.type()) { case framework::AttrType::BOOLEAN: { return attr_desc.b(); @@ -61,13 +61,9 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc, ProgramDesc* program) { } return val; } - case framework::AttrType::BLOCK: { - PADDLE_ENFORCE(program != nullptr, - "Need to specify ProgramDesc when get a block attr"); - return program->mutable_blocks(attr_desc.block_idx()); - } + default: + PADDLE_THROW("Unsupport attr type %d", attr_desc.type()); } - PADDLE_ENFORCE(false, "Unknown OpDesc::AttrDesc::type !"); return boost::blank(); } diff --git a/paddle/framework/attribute.h b/paddle/framework/attribute.h index 9744662b8f..0641907d6f 100644 --- a/paddle/framework/attribute.h +++ b/paddle/framework/attribute.h @@ -32,7 +32,7 @@ inline AttrType AttrTypeID() { return static_cast(tmp.which() - 1); } -Attribute GetAttrValue(const OpDesc::Attr& attr_desc, ProgramDesc* desc); +Attribute GetAttrValue(const OpDesc::Attr& attr_desc); class AttrReader { public: diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 150c152367..9759bb2cf9 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -368,7 +368,7 @@ std::vector> MakeBlockBackward( ProgramDescBind& program_desc, int block_idx, std::unordered_set* no_grad_vars, std::unordered_map* grad_to_var) { - BlockDescBind* cur_block = program_desc.Block(block_idx); + BlockDescBind* cur_block = program_desc.MutableBlock(block_idx); std::vector op_descs = cur_block->AllOps(); std::unordered_map> dup_out_ops; size_t grad_desc_idx = 0; @@ -443,7 +443,7 @@ ParamGradInfoMap AppendBackward( } const int root_block_idx = 0; - auto root_block = program_desc.Block(root_block_idx); + auto root_block = program_desc.MutableBlock(root_block_idx); // insert fill one op for target // TODO(qiao) add some check to the target. @@ -492,7 +492,7 @@ ParamGradInfoMap AppendBackward( CreateGradVarInBlock(forward_op_num, grad_to_var, root_block, &retv); for (size_t block_index = forward_block_num; block_index < program_desc.Size(); ++block_index) { - CreateGradVarInBlock(0, grad_to_var, program_desc.Block(block_index), + CreateGradVarInBlock(0, grad_to_var, program_desc.MutableBlock(block_index), &retv); } return retv; diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 421f132194..4e8d630c26 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -499,7 +499,7 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { TEST(Backward, simple_single_op) { f::ProgramDescBind program; - f::BlockDescBind *block = program.Block(0); + f::BlockDescBind *block = program.MutableBlock(0); f::OpDescBind *op = block->AppendOp(); op->SetType("rowwise_add"); @@ -535,7 +535,7 @@ TEST(Backward, simple_single_op) { TEST(Backward, default_attribute) { f::ProgramDescBind program; - f::BlockDescBind *block = program.Block(0); + f::BlockDescBind *block = program.MutableBlock(0); f::OpDescBind *op = block->AppendOp(); op->SetType("mul"); op->SetInput("X", {"x"}); @@ -561,7 +561,7 @@ TEST(Backward, default_attribute) { TEST(Backward, simple_mult_op) { f::ProgramDescBind program; - f::BlockDescBind *block = program.Block(0); + f::BlockDescBind *block = program.MutableBlock(0); f::OpDescBind *op1 = block->AppendOp(); op1->SetType("rowwise_add"); op1->SetInput("X", {"x1"}); @@ -644,7 +644,7 @@ TEST(Backward, simple_mult_op) { TEST(Backward, intermedia_var_no_grad) { f::ProgramDescBind program; - f::BlockDescBind *block = program.Block(0); + f::BlockDescBind *block = program.MutableBlock(0); f::OpDescBind *op1 = block->AppendOp(); op1->SetType("rowwise_add"); op1->SetInput("X", {"x1"}); @@ -714,7 +714,7 @@ TEST(Backward, intermedia_var_no_grad) { TEST(Backward, var_no_grad) { f::ProgramDescBind program; - f::BlockDescBind *block = program.Block(0); + f::BlockDescBind *block = program.MutableBlock(0); f::OpDescBind *op1 = block->AppendOp(); op1->SetType("mult_in_out"); op1->SetInput("X", {"x1"}); @@ -790,7 +790,7 @@ TEST(Backward, var_no_grad) { TEST(Backward, shared_var) { f::ProgramDescBind program; - f::BlockDescBind *block = program.Block(0); + f::BlockDescBind *block = program.MutableBlock(0); f::OpDescBind *op1 = block->AppendOp(); op1->SetType("rowwise_add"); op1->SetInput("X", {"x1"}); @@ -880,7 +880,7 @@ TEST(Backward, shared_var) { TEST(Backward, half_backward) { f::ProgramDescBind program; - f::BlockDescBind *block = program.Block(0); + f::BlockDescBind *block = program.MutableBlock(0); auto *op1 = block->AppendOp(); op1->SetType("minus"); op1->SetInput("X", {"a"}); diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index b73a20cc89..9e3d597f3a 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -113,7 +113,7 @@ BlockDescBind *BlockDescBind::ParentBlock() const { if (this->desc_->parent_idx() == kNoneBlockIndex) { return nullptr; } - return prog_->Block(static_cast(this->desc_->parent_idx())); + return prog_->MutableBlock(static_cast(this->desc_->parent_idx())); } BlockDesc *BlockDescBind::Proto() { diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 3e9d8b3084..9bf2311dc8 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -73,33 +73,32 @@ static void CreateTensor(Variable* var, VarDesc::VarType var_type) { } } -void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { +void Executor::Run(const ProgramDescBind& pdesc, Scope* scope, int block_id) { // TODO(tonyyang-svail): // - only runs on the first device (i.e. no interdevice communication) // - will change to use multiple blocks for RNN op and Cond Op - PADDLE_ENFORCE_GT(pdesc.blocks_size(), block_id); - auto& block = pdesc.blocks(block_id); + PADDLE_ENFORCE_LT(block_id, pdesc.Size()); + auto& block = pdesc.Block(block_id); auto& device = device_contexts_[0]; Scope& local_scope = scope->NewScope(); - for (auto& var : block.vars()) { - if (var.persistable()) { - auto* ptr = scope->Var(var.name()); - CreateTensor(ptr, var.type()); - VLOG(3) << "Create Variable " << var.name() + for (auto& var : block.AllVars()) { + if (var->Persistable()) { + auto* ptr = scope->Var(var->Name()); + CreateTensor(ptr, var->GetType()); + VLOG(3) << "Create Variable " << var->Name() << " global, which pointer is " << ptr; } else { - auto* ptr = local_scope.Var(var.name()); - CreateTensor(ptr, var.type()); - VLOG(3) << "Create Variable " << var.name() + auto* ptr = local_scope.Var(var->Name()); + CreateTensor(ptr, var->GetType()); + VLOG(3) << "Create Variable " << var->Name() << " locally, which pointer is " << ptr; } } - for (auto& op_desc : block.ops()) { - auto op = paddle::framework::OpRegistry::CreateOp( - op_desc, const_cast(&pdesc)); + for (auto& op_desc : block.AllOps()) { + auto op = paddle::framework::OpRegistry::CreateOp(*op_desc); op->Run(local_scope, *device); } diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index 793ee954e2..c78bfe8f9f 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/framework.pb.h" #include "paddle/framework/op_info.h" +#include "paddle/framework/program_desc.h" #include "paddle/framework/scope.h" #include "paddle/framework/tensor.h" @@ -34,7 +34,7 @@ class Executor { * ProgramDesc * Scope */ - void Run(const ProgramDesc&, Scope*, int); + void Run(const ProgramDescBind&, Scope*, int); private: std::vector device_contexts_; diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index a4747e7c7c..0779137639 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -114,7 +114,12 @@ OpDescBind::OpDescBind(const OpDesc &desc, ProgramDescBind *prog) // restore attrs_ for (const OpDesc::Attr &attr : desc_.attrs()) { std::string attr_name = attr.name(); - attrs_[attr_name] = GetAttrValue(attr, prog->Proto()); + if (attr.type() != AttrType::BLOCK) { + attrs_[attr_name] = GetAttrValue(attr); + } else { + auto bid = attr.block_idx(); + attrs_[attr_name] = prog->MutableBlock(bid); + } } } @@ -188,8 +193,7 @@ void OpDescBind::SetAttr(const std::string &name, const Attribute &v) { } void OpDescBind::SetBlockAttr(const std::string &name, BlockDescBind &block) { - BlockDesc *desc = block.Proto(); - this->attrs_[name] = desc; + this->attrs_[name] = █ need_update_ = true; } @@ -208,7 +212,7 @@ Attribute OpDescBind::GetAttr(const std::string &name) const { int OpDescBind::GetBlockAttr(const std::string &name) const { auto it = attrs_.find(name); PADDLE_ENFORCE(it != attrs_.end(), "Attribute %s is not found", name); - return boost::get(it->second)->idx(); + return boost::get(it->second)->ID(); } const std::unordered_map &OpDescBind::GetAttrMap() diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index c2f2438edf..8dedd873aa 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -43,13 +43,15 @@ static VariableNameMap ConvertOpDescVarsToVarNameMap( return ret_val; } -std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc, - ProgramDesc* program) { +std::unique_ptr OpRegistry::CreateOp(const OpDesc& op_desc) { + VLOG(1) << "CreateOp directly from OpDesc is deprecated. It should only be" + "used in unit tests. Use CreateOp(const OpDescBind& op_desc) " + "instead."; VariableNameMap inputs = ConvertOpDescVarsToVarNameMap(op_desc.inputs()); VariableNameMap outputs = ConvertOpDescVarsToVarNameMap(op_desc.outputs()); AttributeMap attrs; for (auto& attr : op_desc.attrs()) { - attrs[attr.name()] = GetAttrValue(attr, program); + attrs[attr.name()] = GetAttrValue(attr); } return CreateOp(op_desc.type(), inputs, outputs, attrs); diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 19a9fc3802..2bb5e0e8ec 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -77,8 +77,7 @@ class OpRegistry { const VariableNameMap& outputs, AttributeMap attrs); - static std::unique_ptr CreateOp(const OpDesc& op_desc, - ProgramDesc* program); + static std::unique_ptr CreateOp(const OpDesc& op_desc); static std::unique_ptr CreateOp(const OpDescBind& op_desc); }; diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index 6289125d7c..b860fe6cac 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -74,7 +74,7 @@ TEST(OpRegistry, CreateOp) { attr->set_type(paddle::framework::AttrType::FLOAT); attr->set_f(scale); - auto op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); @@ -95,7 +95,7 @@ TEST(OpRegistry, IllegalAttr) { bool caught = false; try { - paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); + paddle::framework::OpRegistry::CreateOp(op_desc); } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = "larger_than check fail"; @@ -115,7 +115,7 @@ TEST(OpRegistry, DefaultValue) { ASSERT_TRUE(op_desc.IsInitialized()); - auto op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); @@ -131,7 +131,7 @@ TEST(OpRegistry, CustomChecker) { // attr 'test_attr' is not set bool caught = false; try { - paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); + paddle::framework::OpRegistry::CreateOp(op_desc); } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = "Attribute 'test_attr' is required!"; @@ -149,7 +149,7 @@ TEST(OpRegistry, CustomChecker) { attr->set_i(3); caught = false; try { - paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); + paddle::framework::OpRegistry::CreateOp(op_desc); } catch (paddle::platform::EnforceNotMet err) { caught = true; std::string msg = "'test_attr' must be even!"; @@ -166,7 +166,7 @@ TEST(OpRegistry, CustomChecker) { attr->set_name("test_attr"); attr->set_type(paddle::framework::AttrType::INT); attr->set_i(4); - auto op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc); paddle::platform::CPUDeviceContext dev_ctx; paddle::framework::Scope scope; op->Run(scope, dev_ctx); diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index 3c07621293..42e0d52eed 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -83,7 +83,7 @@ TEST(OperatorBase, all) { paddle::platform::CPUDeviceContext device_context; paddle::framework::Scope scope; - auto op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc); scope.Var("OUT1"); ASSERT_EQ(paddle::framework::op_run_num, 0); op->Run(scope, device_context); @@ -208,7 +208,7 @@ TEST(OpKernel, all) { paddle::platform::CPUDeviceContext cpu_device_context; paddle::framework::Scope scope; - auto op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc); ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 0); op->Run(scope, cpu_device_context); ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 1); @@ -244,7 +244,7 @@ TEST(OpKernel, multi_inputs) { scope.Var("y0")->GetMutable(); scope.Var("y1")->GetMutable(); - auto op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc); op->Run(scope, cpu_device_context); } diff --git a/paddle/framework/program_desc.h b/paddle/framework/program_desc.h index ce1721472d..b1cb086de4 100644 --- a/paddle/framework/program_desc.h +++ b/paddle/framework/program_desc.h @@ -37,7 +37,9 @@ class ProgramDescBind { BlockDescBind *AppendBlock(const BlockDescBind &parent); - BlockDescBind *Block(size_t idx) { return blocks_[idx].get(); } + BlockDescBind *MutableBlock(size_t idx) { return blocks_[idx].get(); } + + const BlockDescBind &Block(size_t idx) const { return *blocks_[idx]; } size_t Size() const { return blocks_.size(); } diff --git a/paddle/framework/program_desc_test.cc b/paddle/framework/program_desc_test.cc index d28c2a0bff..83e7286e0e 100644 --- a/paddle/framework/program_desc_test.cc +++ b/paddle/framework/program_desc_test.cc @@ -20,7 +20,7 @@ namespace paddle { namespace framework { TEST(ProgramDesc, copy_ctor) { ProgramDescBind program; - auto* global_block = program.Block(0); + auto* global_block = program.MutableBlock(0); auto* x = global_block->Var("X"); x->SetType(VarDesc_VarType_LOD_TENSOR); x->SetLoDLevel(0); @@ -44,7 +44,7 @@ TEST(ProgramDesc, copy_ctor) { ProgramDescBind program_copy(program); - auto* global_block_copy = program_copy.Block(0); + auto* global_block_copy = program_copy.MutableBlock(0); ASSERT_NE(global_block, global_block_copy); auto assert_same_var = [&](const std::string& name, VarDescBind* var_before) { @@ -82,7 +82,7 @@ TEST(ProgramDesc, copy_ctor) { TEST(ProgramDescBind, serialize_and_deserialize) { ProgramDescBind program_origin; - auto* global_block = program_origin.Block(0); + auto* global_block = program_origin.MutableBlock(0); auto* x = global_block->Var("X"); x->SetType(VarDesc_VarType_LOD_TENSOR); x->SetLoDLevel(0); @@ -108,7 +108,7 @@ TEST(ProgramDescBind, serialize_and_deserialize) { program_origin.Proto()->SerializeToString(&binary_str); ProgramDescBind program_restored(binary_str); - auto* global_block_restored = program_restored.Block(0); + auto* global_block_restored = program_restored.MutableBlock(0); ASSERT_NE(global_block, global_block_restored); auto assert_same_var = [&](const std::string& name, VarDescBind* var_before) { diff --git a/paddle/framework/prune_test.cc b/paddle/framework/prune_test.cc index cadd114fbc..5988874809 100644 --- a/paddle/framework/prune_test.cc +++ b/paddle/framework/prune_test.cc @@ -52,7 +52,7 @@ void AddOp(const std::string &type, const f::VariableNameMap &inputs, TEST(Prune, one_operator) { f::ProgramDescBind program; - f::BlockDescBind *block = program.Block(0); + f::BlockDescBind *block = program.MutableBlock(0); AddOp("one_one", {{"input", {"a"}}}, {{"output", {"b"}}}, {}, block); @@ -69,7 +69,7 @@ TEST(Prune, one_operator) { TEST(Prune, forward) { f::ProgramDescBind program; - f::BlockDescBind *block = program.Block(0); + f::BlockDescBind *block = program.MutableBlock(0); AddOp("one_one", {{"input", {"a"}}}, {{"output", {"b"}}}, {}, block); AddOp("one_one", {{"input", {"b"}}}, {{"output", {"c"}}}, {}, block); @@ -88,7 +88,7 @@ TEST(Prune, forward) { TEST(Prune, multi_input_op) { f::ProgramDescBind program; - f::BlockDescBind *block = program.Block(0); + f::BlockDescBind *block = program.MutableBlock(0); AddOp("one_one", {{"input", {"a0"}}}, {{"output", {"b0"}}}, {}, block); AddOp("one_one", {{"input", {"a1"}}}, {{"output", {"b1"}}}, {}, block); @@ -106,7 +106,7 @@ TEST(Prune, multi_input_op) { TEST(Prune, multi_output_op) { f::ProgramDescBind program; - f::BlockDescBind *block = program.Block(0); + f::BlockDescBind *block = program.MutableBlock(0); AddOp("one_two", {{"input", {"a"}}}, {{"output", {"b", "c"}}}, {}, block); AddOp("one_one", {{"input", {"b"}}}, {{"output", {"b1"}}}, {}, block); @@ -122,7 +122,7 @@ TEST(Prune, multi_output_op) { TEST(Prune, multi_target) { f::ProgramDescBind program; - f::BlockDescBind *block = program.Block(0); + f::BlockDescBind *block = program.MutableBlock(0); AddOp("one_two", {{"input", {"a"}}}, {{"output", {"b", "c"}}}, {}, block); AddOp("one_one", {{"input", {"b"}}}, {{"output", {"b1"}}}, {}, block); diff --git a/paddle/framework/type_defs.h b/paddle/framework/type_defs.h index c38c4a8ae9..afeeb1914a 100644 --- a/paddle/framework/type_defs.h +++ b/paddle/framework/type_defs.h @@ -36,7 +36,7 @@ using VariableNameMap = std::map>; using Attribute = boost::variant, std::vector, std::vector, bool, - std::vector, BlockDesc*>; + std::vector, BlockDescBind*>; using AttributeMap = std::unordered_map; diff --git a/paddle/framework/var_type_inference_test.cc b/paddle/framework/var_type_inference_test.cc index 918de1fd05..9035e63fa4 100644 --- a/paddle/framework/var_type_inference_test.cc +++ b/paddle/framework/var_type_inference_test.cc @@ -63,41 +63,43 @@ namespace framework { TEST(InferVarType, sum_op) { ProgramDescBind prog; - auto *op = prog.Block(0)->AppendOp(); + auto *op = prog.MutableBlock(0)->AppendOp(); op->SetType("sum"); op->SetInput("X", {"test_a", "test_b", "test_c"}); op->SetOutput("Out", {"test_out"}); - prog.Block(0)->Var("test_a")->SetType(VarDesc::SELECTED_ROWS); - prog.Block(0)->Var("test_b")->SetType(VarDesc::SELECTED_ROWS); - prog.Block(0)->Var("test_c")->SetType(VarDesc::SELECTED_ROWS); - prog.Block(0)->Var("test_out"); + prog.MutableBlock(0)->Var("test_a")->SetType(VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test_b")->SetType(VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test_c")->SetType(VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test_out"); - op->InferVarType(prog.Block(0)); + op->InferVarType(prog.MutableBlock(0)); - ASSERT_EQ(VarDesc::SELECTED_ROWS, prog.Block(0)->Var("test_out")->GetType()); + ASSERT_EQ(VarDesc::SELECTED_ROWS, + prog.MutableBlock(0)->Var("test_out")->GetType()); - prog.Block(0)->Var("test_b")->SetType(VarDesc::LOD_TENSOR); - op->InferVarType(prog.Block(0)); - ASSERT_EQ(VarDesc::LOD_TENSOR, prog.Block(0)->Var("test_out")->GetType()); + prog.MutableBlock(0)->Var("test_b")->SetType(VarDesc::LOD_TENSOR); + op->InferVarType(prog.MutableBlock(0)); + ASSERT_EQ(VarDesc::LOD_TENSOR, + prog.MutableBlock(0)->Var("test_out")->GetType()); } TEST(InferVarType, sum_op_without_infer_var_type) { ProgramDescBind prog; - auto *op = prog.Block(0)->AppendOp(); + auto *op = prog.MutableBlock(0)->AppendOp(); op->SetType("sum_without_infer_var_type"); op->SetInput("X", {"test2_a", "test2_b", "test2_c"}); op->SetOutput("Out", {"test2_out"}); - prog.Block(0)->Var("test2_a")->SetType(VarDesc::SELECTED_ROWS); - prog.Block(0)->Var("test2_b")->SetType(VarDesc::SELECTED_ROWS); - prog.Block(0)->Var("test2_c")->SetType(VarDesc::SELECTED_ROWS); - prog.Block(0)->Var("test2_out"); + prog.MutableBlock(0)->Var("test2_a")->SetType(VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test2_b")->SetType(VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test2_c")->SetType(VarDesc::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test2_out"); - op->InferVarType(prog.Block(0)); + op->InferVarType(prog.MutableBlock(0)); ASSERT_EQ(VarDesc_VarType_LOD_TENSOR, - prog.Block(0)->Var("test2_out")->GetType()); + prog.MutableBlock(0)->Var("test2_out")->GetType()); } } // namespace framework diff --git a/paddle/operators/dynamic_recurrent_op_test.cc b/paddle/operators/dynamic_recurrent_op_test.cc index fff63efb24..8d840e259b 100644 --- a/paddle/operators/dynamic_recurrent_op_test.cc +++ b/paddle/operators/dynamic_recurrent_op_test.cc @@ -51,7 +51,7 @@ class RNNAlgorithmTestHelper : public ::testing::Test { CreateGlobalVariables(); auto op_desc = CreateOpDesc(); - op = paddle::framework::OpRegistry::CreateOp(op_desc, nullptr); + op = paddle::framework::OpRegistry::CreateOp(op_desc); dop = &(dynamic_cast(op.get())->rnn); InitCacheManually(); InitStepNet(); diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index 14adfa1f35..dcae426c7e 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -129,7 +129,8 @@ void BindProgramDesc(py::module &m) { } return retv; }) - .def("block", &ProgramDescBind::Block, py::return_value_policy::reference) + .def("block", &ProgramDescBind::MutableBlock, + py::return_value_policy::reference) .def("num_blocks", &ProgramDescBind::Size) .def("serialize_to_string", [](ProgramDescBind &program_desc) -> py::bytes { diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 2a0075356e..881df6ad32 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -275,7 +275,7 @@ All parameter, weight, gradient are variables in Paddle. const std::vector> &targets) { ProgramDescBind prog_with_targets(origin); for (const auto &t : targets) { - prog_with_targets.Block(t[0])->Op(t[1])->MarkAsTarget(); + prog_with_targets.MutableBlock(t[0])->Op(t[1])->MarkAsTarget(); } ProgramDesc pruned_desc; Prune(*prog_with_targets.Proto(), &pruned_desc); @@ -335,7 +335,7 @@ All parameter, weight, gradient are variables in Paddle. PADDLE_ENFORCE(desc.IsInitialized(), "User OpDesc is not initialized, reason %s", desc.InitializationErrorString()); - return OpRegistry::CreateOp(desc, nullptr); + return OpRegistry::CreateOp(desc); }) .def("backward", [](const OperatorBase &forwardOp, @@ -439,7 +439,7 @@ All parameter, weight, gradient are variables in Paddle. PADDLE_ENFORCE(desc.IsInitialized(), "User OpDesc is not initialized, reason %s", desc.InitializationErrorString()); - auto rnn_op = OpRegistry::CreateOp(desc, nullptr); + auto rnn_op = OpRegistry::CreateOp(desc); return static_cast(rnn_op.release()); }) .def("set_stepnet", [](operators::RecurrentOp &self, @@ -457,7 +457,7 @@ All parameter, weight, gradient are variables in Paddle. PADDLE_ENFORCE(desc.IsInitialized(), "User OpDesc is not initialized, reason %s", desc.InitializationErrorString()); - auto rnn_op = OpRegistry::CreateOp(desc, nullptr); + auto rnn_op = OpRegistry::CreateOp(desc); return static_cast( rnn_op.release()); }) @@ -484,7 +484,7 @@ All parameter, weight, gradient are variables in Paddle. PADDLE_ENFORCE(desc.IsInitialized(), "User OpDesc is not initialized, reason %s", desc.InitializationErrorString()); - auto cond_op = OpRegistry::CreateOp(desc, nullptr); + auto cond_op = OpRegistry::CreateOp(desc); return static_cast(cond_op.release()); }) .def("set_truenet", @@ -498,10 +498,7 @@ All parameter, weight, gradient are variables in Paddle. py::class_(m, "Executor") .def(py::init &>()) - .def("run", [](Executor &self, ProgramDescBind *program_bind, - Scope *scope, int block_id) { - self.Run(*program_bind->Proto(), scope, block_id); - }); + .def("run", &Executor::Run); m.def("unique_integer", UniqueIntegerGenerator); m.def("init_gflags", InitGflags); From 0318f47e1c26242df1b0ea0218a03a2ea5152535 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 31 Oct 2017 15:44:30 -0700 Subject: [PATCH 32/35] Enhance in backward (#5262) Set gradient's data type based on its forward variable --- paddle/framework/backward.cc | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 9759bb2cf9..dbd5a14f9f 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -18,6 +18,7 @@ #include #include #include +#include #include "paddle/framework/block_desc.h" #include "paddle/framework/op_registry.h" @@ -285,6 +286,15 @@ static bool AllGradInSet(const std::vector& names, return true; } +static std::string FwdName(const std::string& grad_name) { + auto pos = grad_name.find("@GRAD"); + if (pos == std::string::npos) { + return ""; + } else { + return grad_name.substr(0, pos); + } +} + static void CreateGradVarInBlock( size_t grad_op_start_index, const std::unordered_map& param_name_map, @@ -294,6 +304,7 @@ static void CreateGradVarInBlock( for (size_t op_index = grad_op_start_index; op_index < ops.size(); ++op_index) { bool need_infer_shape = false; + std::unordered_set new_vars; ForEachVarName(ops[op_index]->Outputs(), [&](const std::string& grad_var_name) { if (block_desc->HasVar(grad_var_name)) { @@ -301,8 +312,7 @@ static void CreateGradVarInBlock( } need_infer_shape = true; auto var = block_desc->Var(grad_var_name); - // FIXME(qiao) infer the datatype - var->SetDataType(framework::DataType::FP32); + new_vars.insert(var->Name()); auto it = param_name_map.find(grad_var_name); if (it == param_name_map.end()) { return false; @@ -316,6 +326,21 @@ static void CreateGradVarInBlock( }); if (need_infer_shape) { ops[op_index]->InferVarType(block_desc); + for (auto& arg : ops[op_index]->OutputArgumentNames()) { + if (new_vars.find(arg) == new_vars.end()) { + continue; + } + auto pname = FwdName(arg); + auto* param = block_desc->FindVar(pname); + auto* grad = block_desc->FindVar(arg); + if (param == nullptr) { + LOG(WARNING) << "Cannot find forward variable of " << arg + << ". Set its gradient to FP32"; + grad->SetDataType(DataType::FP32); + } else { + grad->SetDataType(param->GetDataType()); + } + } ops[op_index]->InferShape(*block_desc); } } From bcdedecb5755df1b42e4fa822498224d6d1baccd Mon Sep 17 00:00:00 2001 From: Haonan Date: Tue, 31 Oct 2017 16:23:13 -0700 Subject: [PATCH 33/35] handle non-sequence data in sequenceReshapeLayer (#5188) --- .../gserver/layers/SequenceReshapeLayer.cpp | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/paddle/gserver/layers/SequenceReshapeLayer.cpp b/paddle/gserver/layers/SequenceReshapeLayer.cpp index 433592953b..8229744072 100644 --- a/paddle/gserver/layers/SequenceReshapeLayer.cpp +++ b/paddle/gserver/layers/SequenceReshapeLayer.cpp @@ -70,11 +70,23 @@ void SequenceReshapeLayer::forward(PassType passType) { size_t outDim = getSize(); size_t numSequences = input.getNumSequences(); - auto startPositions = input.sequenceStartPositions->getVector(false); - const int* starts = startPositions->getData(); - CHECK_EQ(starts[numSequences], input.getBatchSize()); - CHECK_EQ(numSequences, startPositions->getSize() - 1); + // by default, we assume each instance as a sequence + IVectorPtr seqStarts; + IVector::resizeOrCreate(seqStarts, input.getBatchSize() + 1, false); + int* startsData = seqStarts->getData(); + for (int i = 0; i < input.getBatchSize() + 1; i++) { + startsData[i] = i; + } + const int* starts = startsData; + + // if there is sequence, then use start positions + if (input.sequenceStartPositions) { + auto startPositions = input.sequenceStartPositions->getVector(false); + starts = startPositions->getData(); + CHECK_EQ(starts[numSequences], input.getBatchSize()); + CHECK_EQ(numSequences, startPositions->getSize() - 1); + } for (size_t seqID = 0; seqID < numSequences; seqID++) { size_t inNumIns = starts[seqID + 1] - starts[seqID]; From 26492210c02a32cfdb229a4b02ef606335a52ca8 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Tue, 31 Oct 2017 16:59:37 -0700 Subject: [PATCH 34/35] Fix/sequence op (#5264) * "replace enum with string" * "fix layers" --- paddle/operators/sequence_pool_op.cc | 13 +- paddle/operators/sequence_pool_op.h | 114 +++++++----------- python/paddle/v2/framework/layers.py | 21 +--- .../v2/framework/tests/test_seq_pool.py | 33 ++--- 4 files changed, 68 insertions(+), 113 deletions(-) diff --git a/paddle/operators/sequence_pool_op.cc b/paddle/operators/sequence_pool_op.cc index 6d600c2727..29d19df108 100644 --- a/paddle/operators/sequence_pool_op.cc +++ b/paddle/operators/sequence_pool_op.cc @@ -39,15 +39,14 @@ class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "(Tensor), output of SequencePoolOp, which does not contain LoD " "infomation."); - AddAttr( - "strategy", - "(int, default AVERAGE) the pooling strategy of SequencePoolOp.") - .SetDefault(AVERAGE) - .InEnum({AVERAGE, SUM, SQRT, MAX, LAST, FIRST}); + AddAttr( + "pooltype", + "(int, default AVERAGE) the pooling pooltype of SequencePoolOp.") + .SetDefault("AVERAGE"); AddComment(R"DOC( SequencePoolOp pools features of all time-steps of each instance. - It supports six pooling strategy: + It supports six pooling pooltype: - AVERAGE: Out[i] = average_{for each instance in i-th sequence}{X[i]} - SUM: Out[i] = sum_{for each instance in i-th sequence}{X[i]} - SQRT: Out[i] = sum_{for each instance in i-th sequence}{X[i]} @@ -63,7 +62,7 @@ class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker { and the value of X = [[1, 3], [2, 4, 6], [5, 1]]. Thus, Out is a [3,1,1] Tensor without LoD infomation. - And for different strategy, the value of Out is as follows: + And for different pooltype, the value of Out is as follows: - AVERAGE: [2, 4, 3], where 2=(1+3)/2, 4=(2+4+6)/3, 3=(5+1)/2 - SUM: [4, 12, 6], where 4=1+3, 12=2+4+6, 6=5+1 diff --git a/paddle/operators/sequence_pool_op.h b/paddle/operators/sequence_pool_op.h index 07bf61df45..e0e0493fe0 100644 --- a/paddle/operators/sequence_pool_op.h +++ b/paddle/operators/sequence_pool_op.h @@ -29,22 +29,13 @@ template using EigenMatrix = framework::EigenMatrix; -enum SeqPoolType { - AVERAGE = 0, - SUM = 1, - SQRT = 2, // square_root_n - MAX = 3, - LAST = 4, - FIRST = 5 -}; - template class SequencePoolKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in = context.Input("X"); auto* out = context.Output("Out"); - int strategy = context.Attr("strategy"); + std::string pooltype = context.Attr("pooltype"); auto dims = in->dims(); auto lod = in->lod(); @@ -71,28 +62,21 @@ class SequencePoolKernel : public framework::OpKernel { auto in_e = EigenMatrix::From(in_t, framework::make_ddim({h, w})); auto out_e = EigenVector::Flatten(out_t); - switch (strategy) { - case AVERAGE: - out_e.device(place) = in_e.mean(Eigen::array({{0}})); - break; - case SUM: - out_e.device(place) = in_e.sum(Eigen::array({{0}})); - break; - case SQRT: - out_e.device(place) = in_e.sum(Eigen::array({{0}})) / - std::sqrt(static_cast(h)); - break; - case MAX: - out_e.device(place) = in_e.maximum(Eigen::array({{0}})); - break; - case LAST: - out_e.device(place) = in_e.chip(h - 1, 0); - break; - case FIRST: - out_e.device(place) = in_e.chip(0, 0); - break; - default: - PADDLE_THROW("unsupported pooling strategy"); + if (pooltype == "AVERAGE") { + out_e.device(place) = in_e.mean(Eigen::array({{0}})); + } else if (pooltype == "SUM") { + out_e.device(place) = in_e.sum(Eigen::array({{0}})); + } else if (pooltype == "SQRT") { + out_e.device(place) = in_e.sum(Eigen::array({{0}})) / + std::sqrt(static_cast(h)); + } else if (pooltype == "MAX") { + out_e.device(place) = in_e.maximum(Eigen::array({{0}})); + } else if (pooltype == "LAST") { + out_e.device(place) = in_e.chip(h - 1, 0); + } else if (pooltype == "FIRST") { + out_e.device(place) = in_e.chip(0, 0); + } else { + PADDLE_THROW("unsupported pooling pooltype"); } } } @@ -105,15 +89,15 @@ class SequencePoolGradKernel : public framework::OpKernel { auto* in = context.Input("X"); auto* in_g = context.Output(framework::GradVarName("X")); auto* out_g = context.Input(framework::GradVarName("Out")); - int strategy = context.Attr("strategy"); + std::string pooltype = context.Attr("pooltype"); auto dims = in->dims(); auto lod = in->lod()[0]; int64_t w = in->numel() / dims[0]; in_g->mutable_data(context.GetPlace()); - if (strategy == LAST || strategy == FIRST) { - // set X@Grad be zero at first when strategy is LAST/FIRST + if (pooltype == "LAST" || pooltype == "FIRST") { + // set X@Grad be zero at first when pooltype is LAST/FIRST math::SetConstant functor; functor(context.device_context(), in_g, 0); } @@ -127,41 +111,33 @@ class SequencePoolGradKernel : public framework::OpKernel { auto out_g_e = EigenMatrix::From(out_g_t, {1, w}); Eigen::DSizes bcast(h, 1); - switch (strategy) { - case AVERAGE: - in_g_e.device(place) = (out_g_e / static_cast(h)).broadcast(bcast); - break; - case SUM: - in_g_e.device(place) = (out_g_e).broadcast(bcast); - break; - case SQRT: - in_g_e.device(place) = - (out_g_e / std::sqrt(static_cast(h))).broadcast(bcast); - break; - case MAX: { - auto in_t = - in->Slice(static_cast(lod[i]), static_cast(lod[i + 1])); - Eigen::Map> - in_t_map(in_t.data(), h, w); - int row_id; - Eigen::array extents{{1, 1}}; - for (int col_id = 0; col_id < w; col_id++) { - in_t_map.col(col_id).maxCoeff(&row_id); - Eigen::array in_offsets{{row_id, col_id}}; - Eigen::array out_offsets{{0, col_id}}; - in_g_e.slice(in_offsets, extents).device(place) = - out_g_e.slice(out_offsets, extents); - } - break; + if (pooltype == "AVERAGE") { + in_g_e.device(place) = (out_g_e / static_cast(h)).broadcast(bcast); + } else if (pooltype == "SUM") { + in_g_e.device(place) = (out_g_e).broadcast(bcast); + } else if (pooltype == "SQRT") { + in_g_e.device(place) = + (out_g_e / std::sqrt(static_cast(h))).broadcast(bcast); + } else if (pooltype == "MAX") { + auto in_t = + in->Slice(static_cast(lod[i]), static_cast(lod[i + 1])); + Eigen::Map> + in_t_map(in_t.data(), h, w); + int row_id; + Eigen::array extents{{1, 1}}; + for (int col_id = 0; col_id < w; col_id++) { + in_t_map.col(col_id).maxCoeff(&row_id); + Eigen::array in_offsets{{row_id, col_id}}; + Eigen::array out_offsets{{0, col_id}}; + in_g_e.slice(in_offsets, extents).device(place) = + out_g_e.slice(out_offsets, extents); } - case LAST: - in_g_e.chip(h - 1, 0).device(place) = out_g_e; - break; - case FIRST: - in_g_e.chip(0, 0).device(place) = out_g_e; - break; - default: - PADDLE_THROW("unsupported pooling strategy"); + } else if (pooltype == "LAST") { + in_g_e.chip(h - 1, 0).device(place) = out_g_e; + } else if (pooltype == "FIRST") { + in_g_e.chip(0, 0).device(place) = out_g_e; + } else { + PADDLE_THROW("unsupported pooling pooltype"); } } } diff --git a/python/paddle/v2/framework/layers.py b/python/paddle/v2/framework/layers.py index dab72f0195..86a2c7bf08 100644 --- a/python/paddle/v2/framework/layers.py +++ b/python/paddle/v2/framework/layers.py @@ -351,32 +351,21 @@ def conv2d(input, return helper.append_activation(pre_act) -def sequence_pool(input, pool_type, program=None, init_program=None): - # FIXME(dzh) : want to unify the argument of python layer - # function. So we ignore some unecessary attributes - - ENUM_POOL_TYPE = dict({ - "AVERAGE": 0, - "SUM": 1, - "SQRT": 2, - "MAX": 3, - "LAST": 4, - "FIRST": 5 - }) +def sequence_pool(input, pool_type, **kwargs): + ENUM_POOL_TYPE = set(["MAX", "AVG", "SQRT", "LAST", "FIRST"]) if pool_type.upper() not in ENUM_POOL_TYPE: raise ValueError("Unknown pool_type: '%s'. It can only be %s.", - str(pool_type), " ".join(ENUM_POOL_TYPE.keys())) + str(pool_type), " ".join(ENUM_POOL_TYPE)) - helper = LayerHelper('sequence_pool', **locals()) + helper = LayerHelper('sequence_pool', **kwargs) dtype = helper.input_dtype() pool_out = helper.create_tmp_variable(dtype) - # FIXME(dzh): strategy helper.append_op( type="sequence_pool", inputs={"X": [input]}, outputs={"Out": [pool_out]}, - attrs={"strategy": ENUM_POOL_TYPE[pool_type.upper()]}) + attrs={"pooltype": pool_type.upper()}) return pool_out diff --git a/python/paddle/v2/framework/tests/test_seq_pool.py b/python/paddle/v2/framework/tests/test_seq_pool.py index 56602c57e6..efc4920124 100644 --- a/python/paddle/v2/framework/tests/test_seq_pool.py +++ b/python/paddle/v2/framework/tests/test_seq_pool.py @@ -3,15 +3,6 @@ import numpy as np from op_test import OpTest -class SeqPoolType(OpTest): - AVERAGE = 0 - SUM = 1 - SQRT = 2 - MAX = 3 - LAST = 4 - FIRST = 5 - - class TestSeqAvgPool(OpTest): def set_data(self): self.op_type = 'sequence_pool' @@ -25,7 +16,7 @@ class TestSeqAvgPool(OpTest): return x, lod, out def compute(self, x, lod, out): - self.attrs = {'strategy': SeqPoolType.AVERAGE} + self.attrs = {'pooltype': "AVERAGE"} for i in range(4): sub_x = x[lod[0][i]:lod[0][i + 1], :] out[i] = sub_x.mean(axis=0) @@ -54,7 +45,7 @@ class TestSeqAvgPool2D(TestSeqAvgPool): return x, lod, out def compute(self, x, lod, out): - self.attrs = {'strategy': SeqPoolType.AVERAGE} + self.attrs = {'pooltype': "AVERAGE"} for i in range(4): sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) out[i] = np.reshape(sub_x.mean(axis=0), (3, 17)) @@ -62,7 +53,7 @@ class TestSeqAvgPool2D(TestSeqAvgPool): class TestSeqSumPool(TestSeqAvgPool): def compute(self, x, lod, out): - self.attrs = {'strategy': SeqPoolType.SUM} + self.attrs = {'pooltype': "SUM"} for i in range(4): sub_x = x[lod[0][i]:lod[0][i + 1], :] out[i] = sub_x.sum(axis=0) @@ -70,7 +61,7 @@ class TestSeqSumPool(TestSeqAvgPool): class TestSeqSumPool2D(TestSeqAvgPool2D): def compute(self, x, lod, out): - self.attrs = {'strategy': SeqPoolType.SUM} + self.attrs = {'pooltype': "SUM"} for i in range(4): sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) out[i] = np.reshape(sub_x.sum(axis=0), (3, 17)) @@ -78,7 +69,7 @@ class TestSeqSumPool2D(TestSeqAvgPool2D): class TestSeqSqrtPool(TestSeqAvgPool): def compute(self, x, lod, out): - self.attrs = {'strategy': SeqPoolType.SQRT} + self.attrs = {'pooltype': "SQRT"} for i in range(4): sub_x = x[lod[0][i]:lod[0][i + 1], :] len = lod[0][i + 1] - lod[0][i] @@ -87,7 +78,7 @@ class TestSeqSqrtPool(TestSeqAvgPool): class TestSeqSqrtPool2D(TestSeqAvgPool2D): def compute(self, x, lod, out): - self.attrs = {'strategy': SeqPoolType.SQRT} + self.attrs = {'pooltype': "SQRT"} for i in range(4): sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) len = lod[0][i + 1] - lod[0][i] @@ -99,7 +90,7 @@ class TestSeqSqrtPool2D(TestSeqAvgPool2D): class TestSeqMaxPool(TestSeqAvgPool): def compute(self, x, lod, out): - self.attrs = {'strategy': SeqPoolType.MAX} + self.attrs = {'pooltype': "MAX"} for i in range(4): sub_x = x[lod[0][i]:lod[0][i + 1], :] out[i] = np.amax(sub_x, axis=0) @@ -111,7 +102,7 @@ class TestSeqMaxPool(TestSeqAvgPool): class TestSeqMaxPool2D(TestSeqAvgPool2D): def compute(self, x, lod, out): - self.attrs = {'strategy': SeqPoolType.MAX} + self.attrs = {'pooltype': "MAX"} for i in range(4): sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) out[i] = np.reshape(np.amax(sub_x, axis=0), (3, 17)) @@ -123,7 +114,7 @@ class TestSeqMaxPool2D(TestSeqAvgPool2D): class TestSeqLastPool(TestSeqAvgPool): def compute(self, x, lod, out): - self.attrs = {'strategy': SeqPoolType.LAST} + self.attrs = {'pooltype': "LAST"} for i in range(4): sub_x = x[lod[0][i]:lod[0][i + 1], :] out[i] = sub_x[-1, :] @@ -131,7 +122,7 @@ class TestSeqLastPool(TestSeqAvgPool): class TestSeqLastPool2D(TestSeqAvgPool2D): def compute(self, x, lod, out): - self.attrs = {'strategy': SeqPoolType.LAST} + self.attrs = {'pooltype': "LAST"} for i in range(4): sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) out[i] = np.reshape(sub_x[-1, :], (3, 17)) @@ -139,7 +130,7 @@ class TestSeqLastPool2D(TestSeqAvgPool2D): class TestSeqFirstPool(TestSeqAvgPool): def compute(self, x, lod, out): - self.attrs = {'strategy': SeqPoolType.FIRST} + self.attrs = {'pooltype': "FIRST"} for i in range(4): sub_x = x[lod[0][i]:lod[0][i + 1], :] out[i] = sub_x[0, :] @@ -147,7 +138,7 @@ class TestSeqFirstPool(TestSeqAvgPool): class TestSeqFirstPool2D(TestSeqAvgPool2D): def compute(self, x, lod, out): - self.attrs = {'strategy': SeqPoolType.FIRST} + self.attrs = {'pooltype': "FIRST"} for i in range(4): sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) out[i] = np.reshape(sub_x[0, :], (3, 17)) From d3b07a6ede4083baef2795a70f6952d222f09244 Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Wed, 1 Nov 2017 10:11:15 +0800 Subject: [PATCH 35/35] Add documentation of cross-compiling for iOS (#5239) * Add documentation of cross-compiling for iOS. * Correst the typo in documentation of cross-compiling for raspberry pi. * Set ANDROID_API to 21 when it is specified < 21 for arm64-v8a in build_android.sh. * Check the input and print the usage in MergeModel.cpp. --- .../cross_compiling_for_ios_cn.md | 99 +++++++++++++++++++ .../cross_compiling_for_raspberry_cn.md | 2 +- .../cross_compiling_for_raspberry_en.md | 2 +- paddle/scripts/docker/build_android.sh | 4 + paddle/trainer/MergeModel.cpp | 7 ++ 5 files changed, 112 insertions(+), 2 deletions(-) create mode 100644 doc/howto/cross_compiling/cross_compiling_for_ios_cn.md diff --git a/doc/howto/cross_compiling/cross_compiling_for_ios_cn.md b/doc/howto/cross_compiling/cross_compiling_for_ios_cn.md new file mode 100644 index 0000000000..32c490d9aa --- /dev/null +++ b/doc/howto/cross_compiling/cross_compiling_for_ios_cn.md @@ -0,0 +1,99 @@ +# 构建iOS平台上的PaddlePaddle库 +交叉编译iOS平台上适用的PaddlePaddle库,需要在MacOS系统上进行。本文的将介绍在MacOS上,从源码交叉编译iOS平台上适用的PaddlePaddle库。 + +## 准备交叉编译环境 +Apple官方为iOS开发提供了完整的交叉编译工具和集成开发环境,用户从App Store下载安装Xcode即可。也可自行前往官网下载,[Xcode](https://developer.apple.com/cn/xcode/)。安装完成之后,可在命令行执行`xcodebuild -version`,判断是否安装成功。 + +```bash +$ xcodebuild -version +Xcode 9.0 +Build version 9A235 +``` + +## 配置交叉编译参数 + +PaddlePaddle为交叉编译提供了工具链配置文档[cmake/cross_compiling/ios.cmake](https://github.com/PaddlePaddle/Paddle/blob/develop/cmake/cross_compiling/ios.cmake),以提供一些默认的编译器和编译参数配置。 + +交叉编译iOS版本的PaddlePaddle库时,有一些必须配置的参数: + +- `CMAKE_SYSTEM_NAME`,CMake编译的目标平台,必须设置为`iOS`。在设置`CMAKE_SYSTEM_NAME=iOS`后,PaddlePaddle的CMake系统会自动编译所有的第三方依赖库,并且强制设置一些PaddlePaddle参数的值(`WITH_C_API=ON`、`WITH_GPU=OFF`、`WITH_AVX=OFF`、`WITH_PYTHON=OFF`、`WITH_RDMA=OFF`)。 +- `WITH_C_API`,是否编译C-API预测库,必须设置为ON。在iOS平台上只支持使用C-API来预测。 +- `WITH_SWIG_PY`,必须设置为ON。在iOS平台上不支持通过swig调用来训练或者预测。 + +iOS平台可选配置参数: + +- `IOS_PLATFORM`,可设置为`OS/SIMULATOR`,默认值为`OS`。 + - `OS`,构建目标为`arm`架构的iPhone或者iPad等物理设备。 + - `SIMULATOR`,构建目标为`x86`架构的模拟器平台。 +- `IOS_ARCH`,目标架构。针对不同的`IOS_PLATFORM`,可设置的目标架构如下表所示: + + | IOS_PLATFORM | IOS_ARCH | + |--------------|----------------------| + | OS | armv7, armv7s, arm64 (默认) | + | SIMULATOR | i386, x86_64 (默认) | + +- `IOS_DEPLOYMENT_TARGET`,最小的iOS部署版本,默认值为`7.0`。 +- `IOS_ENABLE_BITCODE`,是否使能[Bitcode](https://developer.apple.com/library/content/documentation/IDEs/Conceptual/AppDistributionGuide/AppThinning/AppThinning.html#//apple_ref/doc/uid/TP40012582-CH35-SW3),可设置`ON/OFF`,默认值为`ON`。 +- `IOS_USE_VECLIB_FOR_BLAS`,是否使用[vecLib](https://developer.apple.com/documentation/accelerate/veclib)框架进行BLAS矩阵计算,可设置`ON/OFF`,默认值为`OFF`。 +- `IOS_DEVELOPMENT_ROOT`,`Developer`目录,可显式指定为`/path/to/platform/Developer`。若未显式指定,PaddlePaddle将会根据`IOS_PLATFORM`自动选择`Xcode`对应`platform`的`Developer`目录。 +- `IOS_SDK_ROOT`,所使用`SDK`的根目录,可显式指定为`/path/to/platform/Developer/SDKs/SDK`。若未显式指定,PaddlePaddle将会自动选择`IOS_DEVELOPMENT_ROOT`目录下最新的`SDK`版本。 + +其他配置参数: + +- `USE_EIGEN_FOR_BLAS`,是否使用Eigen库进行矩阵计算,在`IOS_USE_VECLIB_FOR_BLAS=OFF`时有效。可设置`ON/OFF`,默认值为`OFF`。 +- `HOST_C/CXX_COMPILER`,宿主机的C/C++编译器。默认值为环境变量`CC/CXX`的值;若环境变量`CC/CXX`未设置,则使用`cc/c++`编译器。 + +常用的cmake配置如下: + +```bash +cmake -DCMAKE_SYSTEM_NAME=iOS \ + -DIOS_PLATFORM=OS \ + -DIOS_ARCH="arm64" \ + -DIOS_ENABLE_BITCODE=ON \ + -DIOS_USE_VECLIB_FOR_BLAS=ON \ + -DCMAKE_INSTALL_PREFIX=your/path/to/install \ + -DWITH_C_API=ON \ + -DWITH_TESTING=OFF \ + -DWITH_SWIG_PY=OFF \ + .. +``` + +```bash +cmake -DCMAKE_SYSTEM_NAME=iOS \ + -DIOS_PLATFORM=SIMULATOR \ + -DIOS_ARCH="x86_64" \ + -DIOS_USE_VECLIB_FOR_BLAS=ON \ + -DCMAKE_INSTALL_PREFIX=your/path/to/install \ + -DWITH_C_API=ON \ + -DWITH_TESTING=OFF \ + -DWITH_SWIG_PY=OFF \ + .. +``` + +用户还可根据自己的需求设置其他编译参数。比如希望最小化生成库的大小,可以设置`CMAKE_BUILD_TYPE`为`MinSizeRel`;若希望得到最快的执行速度,则可设置`CMAKE_BUILD_TYPE`为`Release`。亦可以通过手动设置`CMAKE_C/CXX_FLAGS`来影响PaddlePaddle的编译过程。 + +**性能TIPS**,为了达到最快的计算速度,在CMake参数配置上,有以下建议: + +- 设置`CMAKE_BUILD_TYPE`为`Release` +- 设置`IOS_USE_VECLIB_FOR_BLAS=ON`,调用`vecLib`框架提供的BLAS函数进行矩阵计算。 + +## 编译和安装 + +CMake配置完成后,执行以下命令,PaddlePaddle将自动下载和编译所有第三方依赖库、编译和安装PaddlePaddle预测库。 + +``` +$ make +$ make install +``` + +注意:如果你曾在源码目录下编译过其他平台的PaddlePaddle库,请先使用`rm -rf`命令删除`third_party`目录和`build`目录,以确保所有的第三方依赖库和PaddlePaddle代码都是针对新的CMake配置重新编译的。 + +执行完安装命令后,`your/path/to/install`目录中会包含以下内容: + +- `include`目录,其中包含所有C-API的头文件 +- `lib`目录,其中包含PaddlePaddle的C-API静态库 +- `third_party`目录,其中包含所依赖的所有第三方库 + +注意,不同架构的PaddlePaddle库建议安装到不同的目录下,然后使用`lipo`工具将多个静态库合并成一个支持多个架构的fat库。 + +自此,PaddlePaddle库已经安装完成,用户可将合成的fat库用于深度学习相关的iOS App中,调用方法见C-API文档。 diff --git a/doc/howto/cross_compiling/cross_compiling_for_raspberry_cn.md b/doc/howto/cross_compiling/cross_compiling_for_raspberry_cn.md index 026c0c6f3b..6e983645fa 100644 --- a/doc/howto/cross_compiling/cross_compiling_for_raspberry_cn.md +++ b/doc/howto/cross_compiling/cross_compiling_for_raspberry_cn.md @@ -59,4 +59,4 @@ make install 注意:如果你曾经在源码目录下编译过其他平台的PaddlePaddle库,请先使用`rm -rf`命令删除`third_party`目录和`build`目录,以确保所有的第三方依赖库和PaddlePaddle代码都是针对新的CMake配置重新编译的。 -执行完安装命令后,,`your/path/to/install`目录中会包含`include`和`lib`目录,其中`include`中包含C-API的头文件,`lib`中包含一个Raspberry Pi版本的库。 +执行完安装命令后,`your/path/to/install`目录中会包含`include`和`lib`目录,其中`include`中包含C-API的头文件,`lib`中包含一个Raspberry Pi版本的库。 diff --git a/doc/howto/cross_compiling/cross_compiling_for_raspberry_en.md b/doc/howto/cross_compiling/cross_compiling_for_raspberry_en.md index 09ac4733ec..3c1a5950ff 100644 --- a/doc/howto/cross_compiling/cross_compiling_for_raspberry_en.md +++ b/doc/howto/cross_compiling/cross_compiling_for_raspberry_en.md @@ -44,7 +44,7 @@ cmake -DCMAKE_SYSTEM_NAME=RPi \ .. ``` -To build the inference library, please set the argument WITH_API to ON: `WITH_C_API=ON`. +To build the inference library, please set the argument WITH\_C\_API to ON: `WITH_C_API=ON`. You can add more arguments. For example, to minimize the size of the generated inference library, you may use `CMAKE_BUILD_TYPE=MinSizeRel`. For performance optimization, you may use `CMAKE_BUILD_TYPE=Release`. diff --git a/paddle/scripts/docker/build_android.sh b/paddle/scripts/docker/build_android.sh index 11612ad4be..6ef45d33d8 100644 --- a/paddle/scripts/docker/build_android.sh +++ b/paddle/scripts/docker/build_android.sh @@ -4,6 +4,10 @@ set -xe if [ $ANDROID_ABI == "arm64-v8a" ]; then ANDROID_ARCH=arm64 + if [ $ANDROID_API -lt 21 ]; then + echo "Warning: arm64-v8a requires ANDROID_API >= 21." + ANDROID_API=21 + fi else # armeabi, armeabi-v7a ANDROID_ARCH=arm fi diff --git a/paddle/trainer/MergeModel.cpp b/paddle/trainer/MergeModel.cpp index a70673ffec..f3cfd9f97f 100644 --- a/paddle/trainer/MergeModel.cpp +++ b/paddle/trainer/MergeModel.cpp @@ -27,6 +27,13 @@ using namespace paddle; // NOLINT using namespace std; // NOLINT int main(int argc, char** argv) { + if (FLAGS_model_dir.empty() || FLAGS_config_file.empty() || + FLAGS_model_file.empty()) { + LOG(INFO) << "Usage: ./paddle_merge_model --model_dir=pass-00000 " + "--config_file=config.py --model_file=out.paddle"; + return 0; + } + initMain(argc, argv); initPython(argc, argv);