From a4df3f5bd8917b2cb510b23dc63bc97a20108f23 Mon Sep 17 00:00:00 2001 From: yangyaming Date: Wed, 30 Aug 2017 22:21:53 +0800 Subject: [PATCH 1/6] Finish framework of squared_l2_distance_op. --- paddle/operators/CMakeLists.txt | 2 + paddle/operators/squared_l2_distance_op.cc | 82 ++++++++++++++++++ paddle/operators/squared_l2_distance_op.cu | 25 ++++++ paddle/operators/squared_l2_distance_op.h | 84 +++++++++++++++++++ paddle/pybind/CMakeLists.txt | 3 +- paddle/pybind/pybind.cc | 1 + .../paddle/v2/framework/tests/CMakeLists.txt | 1 + .../paddle/v2/framework/tests/op_test_util.py | 10 +-- .../tests/test_squared_l2_distance_op.py | 25 ++++++ 9 files changed, 227 insertions(+), 6 deletions(-) create mode 100644 paddle/operators/squared_l2_distance_op.cc create mode 100644 paddle/operators/squared_l2_distance_op.cu create mode 100644 paddle/operators/squared_l2_distance_op.h create mode 100644 python/paddle/v2/framework/tests/test_squared_l2_distance_op.py diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index f0fd12f1b5..1c32d1df4a 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -73,3 +73,5 @@ op_library(uniform_random_op SRCS uniform_random_op.cc uniform_random_op.cu) op_library(lookup_table_op SRCS lookup_table_op.cc lookup_table_op.cu) op_library(scale_op SRCS scale_op.cc scale_op.cu DEPS net_op) op_library(minus_op SRCS minus_op.cc minus_op.cu DEPS scale_op) + +op_library(squared_l2_distance_op SRCS squared_l2_distance_op.cc squared_l2_distance_op.cu) diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc new file mode 100644 index 0000000000..9fc498d5a5 --- /dev/null +++ b/paddle/operators/squared_l2_distance_op.cc @@ -0,0 +1,82 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/squared_l2_distance_op.h" + +namespace paddle { +namespace operators { + +class SquaredL2DistanceOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input of SquaredL2DistanceOp " + "must be initialized."); + PADDLE_ENFORCE_EQ(ctx.Input("X")->dims(), + ctx.Input("Y")->dims(), + "Dimensions of SquaredL2DistanceOp's two inputs " + "must be same.") + framework::DDim dims = ctx.Input("X")->dims(); + ctx.Output("sub_result")->Resize(dims); + ctx.Output("Out")->Resize(framework::make_ddim({dims[0], 1})); + } +}; + +class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker { + public: + SquaredL2DistanceOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input value."); + AddInput("Y", "Target value."); + AddOutput("sub_result", + "Buffering substraction result which " + "will be reused in backward.") + .AsIntermediate(); + AddOutput("Out", "Squared l2 distance between input and target."); + AddComment(R"DOC( + SquaredL2DistanceOp will cacluate the squared L2 distances for + input and target. Number of distance value equals to the + first dimension of input. + )DOC"); + } +}; + +class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + ctx.Output(framework::GradVarName("X")) + ->Resize(ctx.Input("X")->dims()); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(squared_l2_distance, ops::SquaredL2DistanceOp, + ops::SquaredL2DistanceOpMaker, squared_l2_distance_grad, + ops::SquaredL2DistanceGradOp); +REGISTER_OP_CPU_KERNEL( + squared_l2_distance, + ops::SquaredL2DistanceKernel); +REGISTER_OP_CPU_KERNEL( + squared_l2_distance_grad, + ops::SquaredL2DistanceGradKernel); diff --git a/paddle/operators/squared_l2_distance_op.cu b/paddle/operators/squared_l2_distance_op.cu new file mode 100644 index 0000000000..3fe62f1a9c --- /dev/null +++ b/paddle/operators/squared_l2_distance_op.cu @@ -0,0 +1,25 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU + +#include "paddle/operators/squared_l2_distance_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL( + squared_l2_distance, + ops::SquaredL2DistanceKernel); +REGISTER_OP_GPU_KERNEL( + squared_l2_distance_grad, + ops::SquaredL2DistanceGradKernel); diff --git a/paddle/operators/squared_l2_distance_op.h b/paddle/operators/squared_l2_distance_op.h new file mode 100644 index 0000000000..b350fd0117 --- /dev/null +++ b/paddle/operators/squared_l2_distance_op.h @@ -0,0 +1,84 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenMatrix = framework::EigenMatrix; +template +using EigenVector = framework::EigenVector; + +template +class SquaredL2DistanceKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* input0 = context.Input("X"); + auto* input1 = context.Input("Y"); + auto* output0 = context.Output("sub_result"); + auto* output1 = context.Output("Out"); + + output0->mutable_data(context.GetPlace()); + output1->mutable_data(context.GetPlace()); + + auto X = EigenMatrix::From(*input0); + auto Y = EigenMatrix::From(*input1); + auto subResult = EigenMatrix::From(*output0); + auto Z = EigenMatrix::From(*output1); + + auto place = context.GetEigenDevice(); + // buffer the substraction result + subResult.device(place) = X - Y; + const auto& inDims = X.dimensions(); + const auto& subResMat = subResult.reshape(Eigen::array( + {static_cast(inDims[0]), static_cast(X.size() / inDims[0])})); + Z.device(place) = subResMat.pow(2).sum(Eigen::array({1})); + } +}; + +template +class SquaredL2DistanceGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* input0 = context.Input("sub_result"); + auto* OG = context.Input(framework::GradVarName("Out")); + auto* IG = context.Output(framework::GradVarName("X")); + + IG->mutable_data(context.GetPlace()); + + auto subResult = EigenMatrix::From(*input0); + auto outGrad = EigenMatrix::From(*OG); + auto inGrad = EigenMatrix::From(*IG); + + const auto& subResDims = subResult.dimensions(); + int firstDim = static_cast(subResDims[0]); + int cols = subResult.size() / firstDim; + const auto subResMat = + subResult.reshape(Eigen::array({firstDim, cols})); + // create a matrix view for input gradient tensor + auto inGradMat = inGrad.reshape(Eigen::array({firstDim, cols})); + inGradMat.device(context.GetEigenDevice()) = + 2 * (outGrad.broadcast(Eigen::array({1, cols}))) * subResMat; + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index 37e186a408..df8c2b37cf 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -18,5 +18,6 @@ cc_library(paddle_pybind SHARED fill_zeros_like_op lookup_table_op scale_op - minus_op) + minus_op + squared_l2_distance_op) endif(WITH_PYTHON) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 3bc150ccb7..69a5f98a43 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -48,6 +48,7 @@ USE_OP_ITSELF(identity); USE_OP(minus); USE_CPU_ONLY_OP(gather); USE_CPU_ONLY_OP(scatter); +USE_OP(squared_l2_distance); namespace paddle { namespace framework { diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index 661ebd8964..06ff1f4a0c 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -32,3 +32,4 @@ py_test(test_gradient_checker SRCS test_gradient_checker.py) py_test(test_lookup_table SRCS test_lookup_table.py) py_test(test_scale_and_identity_op SRCS test_scale_and_identity_op.py) py_test(mnist SRCS mnist.py) +py_test(test_squared_l2_distance_op SRCS test_squared_l2_distance_op.py) diff --git a/python/paddle/v2/framework/tests/op_test_util.py b/python/paddle/v2/framework/tests/op_test_util.py index 3bc05a0fec..370f27eaf6 100644 --- a/python/paddle/v2/framework/tests/op_test_util.py +++ b/python/paddle/v2/framework/tests/op_test_util.py @@ -6,13 +6,13 @@ from paddle.v2.framework.op import Operator class OpTestMeta(type): """ Operator Test ClassMeta. - - It injects `test_all` method into user's OperatorTest class, to make Python + + It injects `test_all` method into user's OperatorTest class, to make Python unittest module run that method. - + The `test_all` read what value is stored in `self`. It use self's values to create and run a operator, and check whether that op is OK or not. - + See `test_add_two_op` for example usage. """ @@ -66,7 +66,7 @@ class OpTestMeta(type): self.assertTrue( numpy.allclose( actual, expect, atol=1e-05), - "output name: " + out_name + "has diff") + "output name: " + out_name + " has diff") obj.test_all = test_all return obj diff --git a/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py b/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py new file mode 100644 index 0000000000..eeddb5a3bf --- /dev/null +++ b/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py @@ -0,0 +1,25 @@ +import unittest +from op_test_util import OpTestMeta +from gradient_checker import GradientChecker, create_op +import numpy as np + + +class TestSquaredL2DistanceOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = 'squared_l2_distance' + self.inputs = { + 'X': np.random.uniform(0.1, 1., (2, 3)).astype('float32'), + 'Y': np.random.uniform(0.1, 1., (2, 3)).astype('float32') + } + subRes = self.inputs['X'] - self.inputs['Y'] + output = subRes * subRes + self.outputs = { + 'sub_result': subRes, + 'Out': np.expand_dims(output.sum(1), 1) + } + + +if __name__ == '__main__': + unittest.main() From f8b885f27f19474124d46002d6572c239910eefd Mon Sep 17 00:00:00 2001 From: yangyaming Date: Thu, 31 Aug 2017 20:15:48 +0800 Subject: [PATCH 2/6] Using EigenTensor to reshape tensor. --- paddle/operators/squared_l2_distance_op.cc | 64 ++++++++--- paddle/operators/squared_l2_distance_op.h | 128 ++++++++++++++++++--- 2 files changed, 157 insertions(+), 35 deletions(-) diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index 9fc498d5a5..3049f0f8ba 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -22,36 +22,52 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(const framework::InferShapeContext &ctx) const override { + void InferShape(const framework::InferShapeContext& ctx) const override { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input of SquaredL2DistanceOp " "must be initialized."); - PADDLE_ENFORCE_EQ(ctx.Input("X")->dims(), - ctx.Input("Y")->dims(), - "Dimensions of SquaredL2DistanceOp's two inputs " - "must be same.") - framework::DDim dims = ctx.Input("X")->dims(); - ctx.Output("sub_result")->Resize(dims); - ctx.Output("Out")->Resize(framework::make_ddim({dims[0], 1})); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), + "Target of SquaredL2DistanceOp " + "must be initialized."); + + auto* X = ctx.Input("X"); + auto xDims = X->dims(); + auto* Y = ctx.Input("Y"); + auto yDims = Y->dims(); + + PADDLE_ENFORCE_EQ(framework::arity(xDims), framework::arity(yDims), + "Tensor rank of both SquaredL2DistanceOp's " + "inputs must be same."); + int rank = framework::arity(xDims); + PADDLE_ENFORCE(rank >= 2 || rank <= 6, "Tensor rank should be in [2, 6]."); + PADDLE_ENFORCE(yDims[0] == 1 || yDims[0] == xDims[0], + "First dimension of target must be equal to input " + "or to 1."); + + ctx.Output("sub_result")->Resize(xDims); + ctx.Output("Out")->Resize({xDims[0], 1}); } }; class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker { public: - SquaredL2DistanceOpMaker(framework::OpProto *proto, - framework::OpAttrChecker *op_checker) + SquaredL2DistanceOpMaker(framework::OpProto* proto, + framework::OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "Input value."); - AddInput("Y", "Target value."); + AddInput("X", "Input of SquaredL2DistanceOp."); + AddInput("Y", "Target of SquaredL2DistanceOp."); AddOutput("sub_result", "Buffering substraction result which " "will be reused in backward.") .AsIntermediate(); AddOutput("Out", "Squared l2 distance between input and target."); AddComment(R"DOC( - SquaredL2DistanceOp will cacluate the squared L2 distances for + SquaredL2DistanceOp will cacluate the squared L2 distance for input and target. Number of distance value equals to the - first dimension of input. + first dimension of input. First dimension of target could be equal to + input or to 1. If the first dimension of target is 1, SquaredL2DistanceOp + will broadcast the first dimension to the first dimension of input. + You can decide whether calculate the gradient of target. )DOC"); } }; @@ -61,9 +77,23 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(const framework::InferShapeContext &ctx) const override { - ctx.Output(framework::GradVarName("X")) - ->Resize(ctx.Input("X")->dims()); + void InferShape(const framework::InferShapeContext& ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), + "Gradient of Out should not be null"); + // check out grad dimensions + auto outDims = ctx.Input(framework::GradVarName("Out"))->dims(); + auto xDims = ctx.Input("X")->dims(); + auto yDims = ctx.Input("Y")->dims(); + PADDLE_ENFORCE_EQ(outDims[0], xDims[0], + "First dimension of output gradient and " + "input value must be equal."); + PADDLE_ENFORCE_EQ(outDims[1], 1, + "Second dimension of output gradient " + "must be 1."); + auto* xGrad = ctx.Output(framework::GradVarName("X")); + auto* yGrad = ctx.Output(framework::GradVarName("Y")); + if (xGrad != nullptr) xGrad->Resize(xDims); + if (yGrad != nullptr) yGrad->Resize(yDims); } }; diff --git a/paddle/operators/squared_l2_distance_op.h b/paddle/operators/squared_l2_distance_op.h index b350fd0117..e95364c706 100644 --- a/paddle/operators/squared_l2_distance_op.h +++ b/paddle/operators/squared_l2_distance_op.h @@ -20,17 +20,44 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; -template -using EigenMatrix = framework::EigenMatrix; +using EigenTensor = framework::EigenTensor; template -using EigenVector = framework::EigenVector; +using EigenMatrix = framework::EigenMatrix; template class SquaredL2DistanceKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { + auto* input0 = context.Input("X"); + const int rank = framework::arity(input0->dims()); + switch (rank) { + case 2: + Operate<2>(context); + break; + case 3: + Operate<3>(context); + break; + case 4: + Operate<4>(context); + break; + case 5: + Operate<5>(context); + break; + case 6: + Operate<6>(context); + break; + default: + // already asserted in SquaredL2DistanceOpMaker + break; + } + } + + private: + template + void Operate(const framework::ExecutionContext& context) const { auto* input0 = context.Input("X"); auto* input1 = context.Input("Y"); auto* output0 = context.Output("sub_result"); @@ -39,17 +66,28 @@ class SquaredL2DistanceKernel : public framework::OpKernel { output0->mutable_data(context.GetPlace()); output1->mutable_data(context.GetPlace()); - auto X = EigenMatrix::From(*input0); - auto Y = EigenMatrix::From(*input1); - auto subResult = EigenMatrix::From(*output0); + auto X = EigenTensor::From(*input0); + auto Y = EigenTensor::From(*input1); + auto subResult = EigenTensor::From(*output0); auto Z = EigenMatrix::From(*output1); + auto xDims = X.dimensions(); + auto yDims = Y.dimensions(); + auto place = context.GetEigenDevice(); + // buffer the substraction result - subResult.device(place) = X - Y; - const auto& inDims = X.dimensions(); + if (yDims[0] == 1 && xDims[0] != yDims[0]) { + auto yBroadcastDims = yDims; + yBroadcastDims[0] = xDims[0]; + subResult.device(place) = X - Y.broadcast(yBroadcastDims); + } else { + subResult.device(place) = X - Y; + } + + // create matrix view for substraction result const auto& subResMat = subResult.reshape(Eigen::array( - {static_cast(inDims[0]), static_cast(X.size() / inDims[0])})); + {static_cast(xDims[0]), static_cast(X.size() / xDims[0])})); Z.device(place) = subResMat.pow(2).sum(Eigen::array({1})); } }; @@ -59,24 +97,78 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* input0 = context.Input("sub_result"); - auto* OG = context.Input(framework::GradVarName("Out")); - auto* IG = context.Output(framework::GradVarName("X")); + const int rank = framework::arity(input0->dims()); + switch (rank) { + case 2: + Operate<2>(context); + break; + case 3: + Operate<3>(context); + break; + case 4: + Operate<4>(context); + break; + case 5: + Operate<5>(context); + break; + case 6: + Operate<6>(context); + break; + default: + // already asserted in SquaredL2DistanceOpMaker + break; + } + } - IG->mutable_data(context.GetPlace()); + private: + template + void Operate(const framework::ExecutionContext& context) const { + auto* input0 = context.Input("sub_result"); + auto* OG = context.Input(framework::GradVarName("Out")); + auto* XG = context.Output(framework::GradVarName("X")); + auto* YG = context.Output(framework::GradVarName("Y")); - auto subResult = EigenMatrix::From(*input0); + auto subResult = EigenTensor::From(*input0); auto outGrad = EigenMatrix::From(*OG); - auto inGrad = EigenMatrix::From(*IG); - const auto& subResDims = subResult.dimensions(); + auto subResDims = subResult.dimensions(); int firstDim = static_cast(subResDims[0]); int cols = subResult.size() / firstDim; const auto subResMat = subResult.reshape(Eigen::array({firstDim, cols})); - // create a matrix view for input gradient tensor - auto inGradMat = inGrad.reshape(Eigen::array({firstDim, cols})); - inGradMat.device(context.GetEigenDevice()) = + + // calculate gradient + auto gradMat = 2 * (outGrad.broadcast(Eigen::array({1, cols}))) * subResMat; + + // propagate back to input + auto eigenPlace = context.GetEigenDevice(); + if (XG != nullptr) { + XG->mutable_data(context.GetPlace()); + auto xGrad = EigenTensor::From(*XG); + // dimensions are same with subResult + auto xGradMat = xGrad.reshape(Eigen::array({firstDim, cols})); + xGradMat.device(eigenPlace) = gradMat; + } + if (YG != nullptr) { + YG->mutable_data(context.GetPlace()); + auto yGrad = EigenTensor::From(*YG); + auto dimsYGrad = yGrad.dimensions(); + auto yGradMat = yGrad.reshape(Eigen::array( + {static_cast(dimsYGrad[0]), + static_cast(yGrad.size() / dimsYGrad[0])})); + + PADDLE_ENFORCE(dimsYGrad[0] <= firstDim, + "First dimension of gradient must be greater or " + "equal than first dimension of target"); + + if (dimsYGrad[0] == firstDim) { + yGradMat.device(eigenPlace) = -1 * gradMat; + } else { + yGradMat.device(eigenPlace) = + -1 * (gradMat.sum(Eigen::array({0}))); + } + } } }; From 6bef079660f689a1b9c061e31c8273de353f98da Mon Sep 17 00:00:00 2001 From: yangyaming Date: Thu, 31 Aug 2017 22:31:34 +0800 Subject: [PATCH 3/6] Follow coding style and move reshaping operation to paddle tensor. --- paddle/operators/squared_l2_distance_op.cc | 47 ++--- paddle/operators/squared_l2_distance_op.h | 170 ++++++------------ .../tests/test_squared_l2_distance_op.py | 10 ++ 3 files changed, 92 insertions(+), 135 deletions(-) diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index 3049f0f8ba..b19c274dcc 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -30,22 +30,27 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { "Target of SquaredL2DistanceOp " "must be initialized."); - auto* X = ctx.Input("X"); - auto xDims = X->dims(); - auto* Y = ctx.Input("Y"); - auto yDims = Y->dims(); + auto* x = ctx.Input("X"); + auto x_dims = x->dims(); + auto* y = ctx.Input("Y"); + auto y_dims = y->dims(); - PADDLE_ENFORCE_EQ(framework::arity(xDims), framework::arity(yDims), + PADDLE_ENFORCE_EQ(framework::arity(x_dims), framework::arity(y_dims), "Tensor rank of both SquaredL2DistanceOp's " "inputs must be same."); - int rank = framework::arity(xDims); - PADDLE_ENFORCE(rank >= 2 || rank <= 6, "Tensor rank should be in [2, 6]."); - PADDLE_ENFORCE(yDims[0] == 1 || yDims[0] == xDims[0], + + int rank = framework::arity(x_dims); + PADDLE_ENFORCE(rank >= 2, "Tensor rank should be at least equal to 2."); + PADDLE_ENFORCE_EQ(framework::product(x_dims) / x_dims[0], + framework::product(y_dims) / y_dims[0], + "Product of dimensions expcet the first dimension of " + "input and target must be equal."); + PADDLE_ENFORCE(y_dims[0] == 1 || y_dims[0] == x_dims[0], "First dimension of target must be equal to input " "or to 1."); - ctx.Output("sub_result")->Resize(xDims); - ctx.Output("Out")->Resize({xDims[0], 1}); + ctx.Output("sub_result")->Resize(x_dims); + ctx.Output("Out")->Resize({x_dims[0], 1}); } }; @@ -66,8 +71,8 @@ class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker { input and target. Number of distance value equals to the first dimension of input. First dimension of target could be equal to input or to 1. If the first dimension of target is 1, SquaredL2DistanceOp - will broadcast the first dimension to the first dimension of input. - You can decide whether calculate the gradient of target. + will broadcast target's first dimension to input's first dimension. + You can decide whether calculate the gradient of input and target. )DOC"); } }; @@ -81,19 +86,19 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Gradient of Out should not be null"); // check out grad dimensions - auto outDims = ctx.Input(framework::GradVarName("Out"))->dims(); - auto xDims = ctx.Input("X")->dims(); - auto yDims = ctx.Input("Y")->dims(); - PADDLE_ENFORCE_EQ(outDims[0], xDims[0], + auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); + auto x_dims = ctx.Input("X")->dims(); + auto y_dims = ctx.Input("Y")->dims(); + PADDLE_ENFORCE_EQ(out_dims[0], x_dims[0], "First dimension of output gradient and " "input value must be equal."); - PADDLE_ENFORCE_EQ(outDims[1], 1, + PADDLE_ENFORCE_EQ(out_dims[1], 1, "Second dimension of output gradient " "must be 1."); - auto* xGrad = ctx.Output(framework::GradVarName("X")); - auto* yGrad = ctx.Output(framework::GradVarName("Y")); - if (xGrad != nullptr) xGrad->Resize(xDims); - if (yGrad != nullptr) yGrad->Resize(yDims); + auto* x_grad = ctx.Output(framework::GradVarName("X")); + auto* y_grad = ctx.Output(framework::GradVarName("Y")); + if (x_grad != nullptr) x_grad->Resize(x_dims); + if (y_grad != nullptr) y_grad->Resize(y_dims); } }; diff --git a/paddle/operators/squared_l2_distance_op.h b/paddle/operators/squared_l2_distance_op.h index e95364c706..ec8c34ddf8 100644 --- a/paddle/operators/squared_l2_distance_op.h +++ b/paddle/operators/squared_l2_distance_op.h @@ -20,9 +20,6 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; -template -using EigenTensor = framework::EigenTensor; template using EigenMatrix = framework::EigenMatrix; @@ -31,64 +28,39 @@ template class SquaredL2DistanceKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* input0 = context.Input("X"); - const int rank = framework::arity(input0->dims()); - switch (rank) { - case 2: - Operate<2>(context); - break; - case 3: - Operate<3>(context); - break; - case 4: - Operate<4>(context); - break; - case 5: - Operate<5>(context); - break; - case 6: - Operate<6>(context); - break; - default: - // already asserted in SquaredL2DistanceOpMaker - break; - } - } - - private: - template - void Operate(const framework::ExecutionContext& context) const { - auto* input0 = context.Input("X"); - auto* input1 = context.Input("Y"); - auto* output0 = context.Output("sub_result"); - auto* output1 = context.Output("Out"); - - output0->mutable_data(context.GetPlace()); - output1->mutable_data(context.GetPlace()); - - auto X = EigenTensor::From(*input0); - auto Y = EigenTensor::From(*input1); - auto subResult = EigenTensor::From(*output0); - auto Z = EigenMatrix::From(*output1); - - auto xDims = X.dimensions(); - auto yDims = Y.dimensions(); + auto* in0 = context.Input("X"); + auto* in1 = context.Input("Y"); + auto* out0 = context.Output("sub_result"); + auto* out1 = context.Output("Out"); + + auto in0_dims = in0->dims(); + auto in1_dims = in1->dims(); + + int cols = framework::product(in0_dims) / in0_dims[0]; + // reduce dimensions except the first + auto x = + EigenMatrix::From(*in0, framework::make_ddim({in0_dims[0], cols})); + auto y = + EigenMatrix::From(*in1, framework::make_ddim({in1_dims[0], cols})); + + out0->mutable_data(context.GetPlace()); + out1->mutable_data(context.GetPlace()); + auto sub_result = EigenMatrix::From(*out0); + auto z = EigenMatrix::From(*out1); auto place = context.GetEigenDevice(); - + auto x_dims = x.dimensions(); + auto y_dims = y.dimensions(); // buffer the substraction result - if (yDims[0] == 1 && xDims[0] != yDims[0]) { - auto yBroadcastDims = yDims; - yBroadcastDims[0] = xDims[0]; - subResult.device(place) = X - Y.broadcast(yBroadcastDims); + if (y_dims[0] == 1 && x_dims[0] > y_dims[0]) { + auto y_broadcast_dims = y_dims; + y_broadcast_dims[0] = x_dims[0]; + sub_result.device(place) = x - y.broadcast(y_broadcast_dims); } else { - subResult.device(place) = X - Y; + sub_result.device(place) = x - y; } - // create matrix view for substraction result - const auto& subResMat = subResult.reshape(Eigen::array( - {static_cast(xDims[0]), static_cast(X.size() / xDims[0])})); - Z.device(place) = subResMat.pow(2).sum(Eigen::array({1})); + z.device(place) = sub_result.pow(2).sum(Eigen::array({1})); } }; @@ -96,77 +68,47 @@ template class SquaredL2DistanceGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* input0 = context.Input("sub_result"); - const int rank = framework::arity(input0->dims()); - switch (rank) { - case 2: - Operate<2>(context); - break; - case 3: - Operate<3>(context); - break; - case 4: - Operate<4>(context); - break; - case 5: - Operate<5>(context); - break; - case 6: - Operate<6>(context); - break; - default: - // already asserted in SquaredL2DistanceOpMaker - break; - } - } + auto* in0 = context.Input("sub_result"); + auto* in1 = context.Input(framework::GradVarName("Out")); + auto* x_g = context.Output(framework::GradVarName("X")); + auto* y_g = context.Output(framework::GradVarName("Y")); - private: - template - void Operate(const framework::ExecutionContext& context) const { - auto* input0 = context.Input("sub_result"); - auto* OG = context.Input(framework::GradVarName("Out")); - auto* XG = context.Output(framework::GradVarName("X")); - auto* YG = context.Output(framework::GradVarName("Y")); + auto sub_result = EigenMatrix::From(*in0); + auto out_grad = EigenMatrix::From(*in1); - auto subResult = EigenTensor::From(*input0); - auto outGrad = EigenMatrix::From(*OG); - - auto subResDims = subResult.dimensions(); - int firstDim = static_cast(subResDims[0]); - int cols = subResult.size() / firstDim; - const auto subResMat = - subResult.reshape(Eigen::array({firstDim, cols})); + auto x_dims = x_g->dims(); + auto y_dims = y_g->dims(); + int cols = framework::product(x_dims) / x_dims[0]; // calculate gradient - auto gradMat = - 2 * (outGrad.broadcast(Eigen::array({1, cols}))) * subResMat; + auto grad_mat = + 2 * (out_grad.broadcast(Eigen::array({1, cols}))) * sub_result; // propagate back to input - auto eigenPlace = context.GetEigenDevice(); - if (XG != nullptr) { - XG->mutable_data(context.GetPlace()); - auto xGrad = EigenTensor::From(*XG); + auto eigen_place = context.GetEigenDevice(); + if (x_g != nullptr) { + x_g->mutable_data(context.GetPlace()); + // eigen matrix + auto x_grad = + EigenMatrix::From(*x_g, framework::make_ddim({x_dims[0], cols})); // dimensions are same with subResult - auto xGradMat = xGrad.reshape(Eigen::array({firstDim, cols})); - xGradMat.device(eigenPlace) = gradMat; + x_grad.device(eigen_place) = grad_mat; } - if (YG != nullptr) { - YG->mutable_data(context.GetPlace()); - auto yGrad = EigenTensor::From(*YG); - auto dimsYGrad = yGrad.dimensions(); - auto yGradMat = yGrad.reshape(Eigen::array( - {static_cast(dimsYGrad[0]), - static_cast(yGrad.size() / dimsYGrad[0])})); - - PADDLE_ENFORCE(dimsYGrad[0] <= firstDim, + + if (y_g != nullptr) { + y_g->mutable_data(context.GetPlace()); + auto y_grad = + EigenMatrix::From(*y_g, framework::make_ddim({y_dims[0], cols})); + + PADDLE_ENFORCE(sub_result.dimensions()[0] >= y_dims[0], "First dimension of gradient must be greater or " "equal than first dimension of target"); - if (dimsYGrad[0] == firstDim) { - yGradMat.device(eigenPlace) = -1 * gradMat; + if (sub_result.dimensions()[0] == y_dims[0]) { + y_grad.device(eigen_place) = -1 * grad_mat; } else { - yGradMat.device(eigenPlace) = - -1 * (gradMat.sum(Eigen::array({0}))); + y_grad.device(eigen_place) = + -1 * (grad_mat.sum(Eigen::array({0}))); } } } diff --git a/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py b/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py index eeddb5a3bf..51c95b286a 100644 --- a/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py +++ b/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py @@ -21,5 +21,15 @@ class TestSquaredL2DistanceOp(unittest.TestCase): } +class TestSquaredL2DistanceGradOp(GradientChecker): + def test_squared_l2_distance(self): + op = create_op("squared_l2_distance") + inputs = { + 'X': np.random.uniform(0.1, 1., (2, 3)).astype('float32'), + 'Y': np.random.uniform(0.1, 1., (2, 3)).astype('float32') + } + self.check_grad(op, inputs, set(["X", "Y"]), "Out") + + if __name__ == '__main__': unittest.main() From c33ddc74c1062af7585b6d923acbbcc6299335a5 Mon Sep 17 00:00:00 2001 From: yangyaming Date: Fri, 1 Sep 2017 23:00:15 +0800 Subject: [PATCH 4/6] Fix some bugs, add more unittests. --- paddle/operators/squared_l2_distance_op.cc | 8 ++- paddle/operators/squared_l2_distance_op.h | 19 ++--- .../tests/test_squared_l2_distance_op.py | 72 ++++++++++++++++--- 3 files changed, 79 insertions(+), 20 deletions(-) diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index b19c274dcc..694b00e493 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -49,7 +49,9 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { "First dimension of target must be equal to input " "or to 1."); - ctx.Output("sub_result")->Resize(x_dims); + ctx.Output("sub_result") + ->Resize({static_cast(x_dims[0]), + static_cast(framework::product(x_dims) / x_dims[0])}); ctx.Output("Out")->Resize({x_dims[0], 1}); } }; @@ -97,8 +99,8 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { "must be 1."); auto* x_grad = ctx.Output(framework::GradVarName("X")); auto* y_grad = ctx.Output(framework::GradVarName("Y")); - if (x_grad != nullptr) x_grad->Resize(x_dims); - if (y_grad != nullptr) y_grad->Resize(y_dims); + if (x_grad) x_grad->Resize(x_dims); + if (y_grad) y_grad->Resize(y_dims); } }; diff --git a/paddle/operators/squared_l2_distance_op.h b/paddle/operators/squared_l2_distance_op.h index ec8c34ddf8..97907768f7 100644 --- a/paddle/operators/squared_l2_distance_op.h +++ b/paddle/operators/squared_l2_distance_op.h @@ -53,14 +53,16 @@ class SquaredL2DistanceKernel : public framework::OpKernel { auto y_dims = y.dimensions(); // buffer the substraction result if (y_dims[0] == 1 && x_dims[0] > y_dims[0]) { - auto y_broadcast_dims = y_dims; - y_broadcast_dims[0] = x_dims[0]; - sub_result.device(place) = x - y.broadcast(y_broadcast_dims); + sub_result.device(place) = + x - + y.broadcast(Eigen::array({static_cast(x_dims[0]), 1})); } else { sub_result.device(place) = x - y; } - - z.device(place) = sub_result.pow(2).sum(Eigen::array({1})); + auto sub_res_pow2 = sub_result * sub_result; + z.device(place) = + sub_res_pow2.sum(Eigen::array({1})) + .reshape(Eigen::array({static_cast(x_dims[0]), 1})); } }; @@ -86,7 +88,7 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { // propagate back to input auto eigen_place = context.GetEigenDevice(); - if (x_g != nullptr) { + if (x_g) { x_g->mutable_data(context.GetPlace()); // eigen matrix auto x_grad = @@ -95,7 +97,7 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { x_grad.device(eigen_place) = grad_mat; } - if (y_g != nullptr) { + if (y_g) { y_g->mutable_data(context.GetPlace()); auto y_grad = EigenMatrix::From(*y_g, framework::make_ddim({y_dims[0], cols})); @@ -107,8 +109,9 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { if (sub_result.dimensions()[0] == y_dims[0]) { y_grad.device(eigen_place) = -1 * grad_mat; } else { + auto col_sum_res = -1 * (grad_mat.sum(Eigen::array({0}))); y_grad.device(eigen_place) = - -1 * (grad_mat.sum(Eigen::array({0}))); + col_sum_res.reshape(Eigen::array({1, cols})); } } } diff --git a/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py b/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py index 51c95b286a..2bcdf37df4 100644 --- a/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py +++ b/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py @@ -4,30 +4,84 @@ from gradient_checker import GradientChecker, create_op import numpy as np -class TestSquaredL2DistanceOp(unittest.TestCase): +class TestSquaredL2DistanceOp_f0(unittest.TestCase): __metaclass__ = OpTestMeta def setUp(self): self.type = 'squared_l2_distance' self.inputs = { - 'X': np.random.uniform(0.1, 1., (2, 3)).astype('float32'), - 'Y': np.random.uniform(0.1, 1., (2, 3)).astype('float32') + 'X': np.random.uniform(0.1, 1., (32, 64)).astype('float32'), + 'Y': np.random.uniform(0.1, 1., (32, 64)).astype('float32') } - subRes = self.inputs['X'] - self.inputs['Y'] - output = subRes * subRes + sub_res = self.inputs['X'] - self.inputs['Y'] + output = sub_res * sub_res self.outputs = { - 'sub_result': subRes, + 'sub_result': sub_res, + 'Out': np.expand_dims(output.sum(1), 1) + } + + +class TestSquaredL2DistanceOp_f1(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = 'squared_l2_distance' + self.inputs = { + 'X': np.random.uniform(0.1, 1., (32, 64)).astype('float32'), + 'Y': np.random.uniform(0.1, 1., (1, 64)).astype('float32') + } + sub_res = self.inputs['X'] - self.inputs['Y'] + output = sub_res * sub_res + self.outputs = { + 'sub_result': sub_res, + 'Out': np.expand_dims(output.sum(1), 1) + } + + +class TestSquaredL2DistanceOp_f2(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = 'squared_l2_distance' + self.inputs = { + 'X': np.random.uniform(0.1, 1., (32, 64, 128)).astype('float32'), + 'Y': np.random.uniform(0.1, 1., (1, 64, 128)).astype('float32') + } + sub_res = self.inputs['X'] - self.inputs['Y'] + sub_res = sub_res.reshape((32, 64 * 128)) + output = sub_res * sub_res + self.outputs = { + 'sub_result': sub_res, 'Out': np.expand_dims(output.sum(1), 1) } class TestSquaredL2DistanceGradOp(GradientChecker): - def test_squared_l2_distance(self): + def test_squared_l2_distance_b0(self): + op = create_op("squared_l2_distance") + inputs = { + 'X': np.random.uniform(0.1, .6, (2, 3)).astype('float32'), + 'Y': np.random.uniform(0.1, .6, (2, 3)).astype('float32') + } + self.compare_grad(op, inputs) + self.check_grad(op, inputs, set(["X", "Y"]), "Out") + + def test_squared_l2_distance_b1(self): + op = create_op("squared_l2_distance") + inputs = { + 'X': np.random.uniform(0.1, .6, (2, 3)).astype('float32'), + 'Y': np.random.uniform(0.1, .6, (1, 3)).astype('float32') + } + self.compare_grad(op, inputs) + self.check_grad(op, inputs, set(["X", "Y"]), "Out") + + def test_squared_l2_distance_b2(self): op = create_op("squared_l2_distance") inputs = { - 'X': np.random.uniform(0.1, 1., (2, 3)).astype('float32'), - 'Y': np.random.uniform(0.1, 1., (2, 3)).astype('float32') + 'X': np.random.uniform(0.1, .6, (2, 3, 4)).astype('float32'), + 'Y': np.random.uniform(0.1, .6, (1, 3, 4)).astype('float32') } + self.compare_grad(op, inputs) self.check_grad(op, inputs, set(["X", "Y"]), "Out") From a377b4197cf7e5f3b7f8edb271eb67039ede16eb Mon Sep 17 00:00:00 2001 From: yangyaming Date: Wed, 6 Sep 2017 17:41:03 +0800 Subject: [PATCH 5/6] Follow GLOG enforcing style. --- paddle/operators/squared_l2_distance_op.cc | 3 +-- paddle/operators/squared_l2_distance_op.h | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index 694b00e493..dc30644a5e 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -40,7 +40,7 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { "inputs must be same."); int rank = framework::arity(x_dims); - PADDLE_ENFORCE(rank >= 2, "Tensor rank should be at least equal to 2."); + PADDLE_ENFORCE_GE(rank, 2, "Tensor rank should be at least equal to 2."); PADDLE_ENFORCE_EQ(framework::product(x_dims) / x_dims[0], framework::product(y_dims) / y_dims[0], "Product of dimensions expcet the first dimension of " @@ -87,7 +87,6 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext& ctx) const override { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Gradient of Out should not be null"); - // check out grad dimensions auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); diff --git a/paddle/operators/squared_l2_distance_op.h b/paddle/operators/squared_l2_distance_op.h index 1015513bdf..77c5a0a5c9 100644 --- a/paddle/operators/squared_l2_distance_op.h +++ b/paddle/operators/squared_l2_distance_op.h @@ -101,9 +101,9 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { auto y_grad = EigenMatrix::From(*y_g, framework::make_ddim({y_dims[0], cols})); - PADDLE_ENFORCE(sub_result.dimensions()[0] >= y_dims[0], - "First dimension of gradient must be greater or " - "equal than first dimension of target"); + PADDLE_ENFORCE_GE(sub_result.dimensions()[0], y_dims[0], + "First dimension of gradient must be greater or " + "equal than first dimension of target."); if (sub_result.dimensions()[0] == y_dims[0]) { y_grad.device(eigen_place) = -1 * grad_mat; From 57f9723d36f1740bc306a8e5022ac3cf01595c2f Mon Sep 17 00:00:00 2001 From: yangyaming Date: Wed, 6 Sep 2017 18:43:33 +0800 Subject: [PATCH 6/6] Using EigenVector to replace EigenMatrix for some variables. --- paddle/operators/squared_l2_distance_op.h | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/paddle/operators/squared_l2_distance_op.h b/paddle/operators/squared_l2_distance_op.h index 77c5a0a5c9..ad3347a0b3 100644 --- a/paddle/operators/squared_l2_distance_op.h +++ b/paddle/operators/squared_l2_distance_op.h @@ -20,6 +20,9 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; template using EigenMatrix = framework::EigenMatrix; @@ -46,7 +49,7 @@ class SquaredL2DistanceKernel : public framework::OpKernel { out0->mutable_data(context.GetPlace()); out1->mutable_data(context.GetPlace()); auto sub_result = EigenMatrix::From(*out0); - auto z = EigenMatrix::From(*out1); + auto z = EigenVector::Flatten(*out1); auto place = context.GetEigenDevice(); auto x_dims = x.dimensions(); @@ -55,13 +58,12 @@ class SquaredL2DistanceKernel : public framework::OpKernel { if (y_dims[0] == 1 && x_dims[0] > y_dims[0]) { sub_result.device(place) = x - - y.broadcast(Eigen::array({static_cast(x_dims[0]), 1})); + y.broadcast(Eigen::array({{static_cast(x_dims[0]), 1}})); } else { sub_result.device(place) = x - y; } auto sub_res_pow2 = sub_result * sub_result; - // z is TensorMap, no need reshape - z.device(place) = sub_res_pow2.sum(Eigen::array({1})); + z.device(place) = sub_res_pow2.sum(Eigen::array({{1}})); } }; @@ -82,8 +84,9 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { int cols = framework::product(x_dims) / x_dims[0]; // calculate gradient - auto grad_mat = - 2 * (out_grad.broadcast(Eigen::array({1, cols}))) * sub_result; + auto grad_mat = 2 * + (out_grad.broadcast(Eigen::array({{1, cols}}))) * + sub_result; // propagate back to input auto eigen_place = context.GetEigenDevice(); @@ -98,18 +101,18 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { if (y_g) { y_g->mutable_data(context.GetPlace()); - auto y_grad = - EigenMatrix::From(*y_g, framework::make_ddim({y_dims[0], cols})); PADDLE_ENFORCE_GE(sub_result.dimensions()[0], y_dims[0], "First dimension of gradient must be greater or " "equal than first dimension of target."); if (sub_result.dimensions()[0] == y_dims[0]) { + auto y_grad = + EigenMatrix::From(*y_g, framework::make_ddim({y_dims[0], cols})); y_grad.device(eigen_place) = -1 * grad_mat; } else { - auto col_sum_res = -1 * (grad_mat.sum(Eigen::array({0}))); - // y_grad is TensorMap, no need reshape + auto col_sum_res = -1 * (grad_mat.sum(Eigen::array({{0}}))); + auto y_grad = EigenVector::Flatten(*y_g); y_grad.device(eigen_place) = col_sum_res; } }