From b63e1c6d8a3e44b68263399f9720165703deccfd Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Mon, 14 Aug 2017 11:49:21 +0800 Subject: [PATCH 01/69] "op name" --- paddle/operators/name_convention.md | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 paddle/operators/name_convention.md diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md new file mode 100644 index 0000000000..da5bcb7485 --- /dev/null +++ b/paddle/operators/name_convention.md @@ -0,0 +1,11 @@ +## Operator Name Convention + +To make the operator document itself more clear. we recommend operator names observe the listing conventions. + +### Input/Output names + +Variable name is uppercase. e.g. `X`, `Y` + +Tensor name is lowercase. e.g. `tensor` + +if only have one output, use `Out` From e9eee6f78559d6318e554b7b5ab021b271d8ddb6 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 15 Aug 2017 09:57:40 +0800 Subject: [PATCH 02/69] "polish words" --- paddle/operators/name_convention.md | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index da5bcb7485..2260bf5660 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -4,8 +4,12 @@ To make the operator document itself more clear. we recommend operator names obs ### Input/Output names -Variable name is uppercase. e.g. `X`, `Y` +* Variable name is prefer uppercase. e.g. `X`, `Y`. But when the variable is tensor, its name should lowercase. e.g. `matrix`, to discriminate with otherone. -Tensor name is lowercase. e.g. `tensor` +* element wise operator, math operator or similar op, please obey common name convention. if the operator only have one output, use `Out`. -if only have one output, use `Out` +* we prefer more meaningful input/output name. + +### Best Practice +e.g. `rowwise_add`, inputs : `X`, `Y`, outputs : `Out` +e.g. `cosine` , inputs : `X`, `axis`, outputs : `Out` From 26cec83901dc443a60aef911c1ad2baf882eb474 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 30 Aug 2017 19:54:14 +0800 Subject: [PATCH 03/69] Add pad op --- paddle/operators/CMakeLists.txt | 1 + paddle/operators/pad_op.cc | 77 ++++++++++++++++++ paddle/operators/pad_op.cu | 21 +++++ paddle/operators/pad_op.h | 81 +++++++++++++++++++ paddle/pybind/CMakeLists.txt | 3 +- paddle/pybind/pybind.cc | 1 + .../paddle/v2/framework/tests/test_pad_op.py | 32 ++++++++ 7 files changed, 215 insertions(+), 1 deletion(-) create mode 100644 paddle/operators/pad_op.cc create mode 100644 paddle/operators/pad_op.cu create mode 100644 paddle/operators/pad_op.h create mode 100644 python/paddle/v2/framework/tests/test_pad_op.py diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index f466dbc79a..1a759133e1 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -72,3 +72,4 @@ op_library(uniform_random_op SRCS uniform_random_op.cc uniform_random_op.cu) op_library(lookup_table_op SRCS lookup_table_op.cc lookup_table_op.cu) op_library(scale_op SRCS scale_op.cc scale_op.cu DEPS net_op) op_library(minus_op SRCS minus_op.cc minus_op.cu DEPS scale_op) +op_library(pad_op SRCS pad_op.cc pad_op.cu) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc new file mode 100644 index 0000000000..f96d61669b --- /dev/null +++ b/paddle/operators/pad_op.cc @@ -0,0 +1,77 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/pad_op.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +class PadOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + auto dim0 = ctx.Input("X")->dims(); + auto dim1 = ctx.Output("Out")->dims(); + auto paddings = GetAttr>>("paddings"); + for (int i = 0; i < dim0.size(); ++i) { + dim1[i] = dim0[i] + paddings[i][0] + paddings[i][1]; + } + ctx.Output("Out")->Resize(dim1); + } +}; + +class MulOpMaker : public framework::OpProtoAndCheckerMaker { + public: + MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The input of pad op"); + AddOutput("Out", "The output of pad op"); + AddComment(R"DOC( +Pad Operator. +)DOC"); + AddAttr>>( + "paddings", "The padding rules for each dimension"); + AddAttr("pad_value", "The value to be padded into tensor") + .SetDefault(0.0f); + } +}; + +class PadOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null"); + auto x_dims = ctx.Input("X")->dims(); + auto *x_grad = ctx.Output(framework::GradVarName("X")); + + x_grad->Resize(x_dims); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(pad, ops::PadOp, ops::PadOpMaker, pad_grad, ops::PadOpGrad); +REGISTER_OP_CPU_KERNEL(pad, ops::PadKernel); +REGISTER_OP_CPU_KERNEL(pad_grad, + ops::PadGradKernel); diff --git a/paddle/operators/pad_op.cu b/paddle/operators/pad_op.cu new file mode 100644 index 0000000000..555a7dba23 --- /dev/null +++ b/paddle/operators/pad_op.cu @@ -0,0 +1,21 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/pad_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(pad, ops::PadKernel); +REGISTER_OP_GPU_KERNEL(pad_grad, + ops::PadGradKernel); diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h new file mode 100644 index 0000000000..6a743bd31c --- /dev/null +++ b/paddle/operators/pad_op.h @@ -0,0 +1,81 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/operators/math/math_function.h" + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +using EigenTensor = framework::EigenTensor; + +template +class PadKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto paddings = + context.op_.GetAttr>>("paddings"); + T pad_value = context.op_.GetAttr("pad_value"); + + auto* X = context.Input("X"); + auto* Out = context.Output("Out"); + Out->mutable_data(context.GetPlace()); + auto dims = X->dims(); + + // Eigen::TensorMap> X_tensor = EigenTensor::From(*X); + // Eigen::TensorMap> + // Out_tensor = EigenTensor::From(*Out); + EigenTensor::ConstType X_tensor = + EigenTensor::From(*X); + EigenTensor::Type Out_tensor = + EigenTensor::From(*Out); + Out_tensor = X_tensor.pad(paddings, pad_value); + } +}; + +template +class PadGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + std::vector> paddings = + context.op_.GetAttr>>("paddings"); + for (int i = 0; i < paddings.size(); ++i) { + paddings[0].first = -paddings[0].first; + paddings[1].second = -paddings[1].second; + } + auto* dOut = ctx.Input(framework::GradVarName("Out")); + auto dims = dOut->dims(); + + auto* dX = ctx.Output(framework::GradVarName("X")); + dX->mutable_data(ctx.GetPlace()); + + EigenTensor::Type dX_tensor = + EigenTensor::From(*dX); + EigenTensor::ConstType dOut_tensor = + EigenTensor::From(*dOut); + dX_tensor = dOut_tensor.pad(paddings, 0); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index abb9c248ee..17ef1e8291 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -17,5 +17,6 @@ cc_library(paddle_pybind SHARED fill_zeros_like_op lookup_table_op scale_op - minus_op) + minus_op + pad_op) endif(WITH_PYTHON) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 8fa8be2cef..0176eb7a88 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -47,6 +47,7 @@ USE_OP(scale); USE_OP_ITSELF(identity); USE_OP(minus); USE_CPU_ONLY_OP(gather); +USE_OP(pad); namespace paddle { namespace framework { diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py new file mode 100644 index 0000000000..89ac7e7e1d --- /dev/null +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -0,0 +1,32 @@ +import unittest +import numpy as np +from gradient_checker import GradientChecker, create_op +from op_test_util import OpTestMeta + + +class TestPadOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "pad" + self.inputs = {'X': np.random.random((16, 16)).astype("float32"), } + self.attrs['paddings'] = ((0, 1), (2, 3)) + self.attrs['pad_value'] = 0 + self.outputs = { + 'Out': np.pad(self.inputs['X'], + self.attrs['paddings'], + mode='constant', + constant_value=0) + } + + +class PadGradOpTest(GradientChecker): + def test_pad(self): + op = Operator("pad", paddings=((0, 1), (2, 3)), pad_value=0) + inputs = {'X': np.random.random((16, 16)).astype("float32"), } + + self.check_grad(op, inputs, set(["X"]), "Out", max_relative_error=0.5) + + +if __name__ == '__main__': + unittest.main() From 3eadb42d3d6e5c78b385104b47d5f564b20e3957 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 6 Sep 2017 10:58:23 +0800 Subject: [PATCH 04/69] Fix eigen error. --- paddle/operators/pad_op.cc | 12 +- paddle/operators/pad_op.h | 120 +++++++++++++----- .../paddle/v2/framework/tests/test_pad_op.py | 13 +- 3 files changed, 101 insertions(+), 44 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index f96d61669b..5dee8d0f5e 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -26,18 +26,18 @@ class PadOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { auto dim0 = ctx.Input("X")->dims(); - auto dim1 = ctx.Output("Out")->dims(); - auto paddings = GetAttr>>("paddings"); + auto paddings = GetAttr>>("paddings"); + std::vector dim1(dim0.size()); for (int i = 0; i < dim0.size(); ++i) { - dim1[i] = dim0[i] + paddings[i][0] + paddings[i][1]; + dim1[i] = dim0[i] + paddings[i].first + paddings[i].second; } - ctx.Output("Out")->Resize(dim1); + ctx.Output("Out")->Resize(paddle::framework::make_ddim(dim1)); } }; -class MulOpMaker : public framework::OpProtoAndCheckerMaker { +class PadOpMaker : public framework::OpProtoAndCheckerMaker { public: - MulOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + PadOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of pad op"); AddOutput("Out", "The output of pad op"); diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index 6a743bd31c..9a0a064d75 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -28,52 +28,102 @@ template using EigenTensor = framework::EigenTensor; +template +void PadFunction(const framework::ExecutionContext& context) { + auto pads = context.op_.GetAttr>>("paddings"); + Eigen::array, D> paddings; + for (int i = 0; i < pads.size(); ++i) { + paddings[i] = pads[i]; + } + T pad_value = context.op_.GetAttr("pad_value"); + + auto* X = context.Input("X"); + auto* Out = context.Output("Out"); + Out->mutable_data(context.GetPlace()); + auto dims = X->dims(); + + auto X_tensor = EigenTensor::From(*X); + auto Out_tensor = EigenTensor::From(*Out); + auto place = context.GetEigenDevice(); + Out_tensor.device(place) = X_tensor.pad(paddings, pad_value); +} + template class PadKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto paddings = - context.op_.GetAttr>>("paddings"); - T pad_value = context.op_.GetAttr("pad_value"); - - auto* X = context.Input("X"); - auto* Out = context.Output("Out"); - Out->mutable_data(context.GetPlace()); - auto dims = X->dims(); - - // Eigen::TensorMap> X_tensor = EigenTensor::From(*X); - // Eigen::TensorMap> - // Out_tensor = EigenTensor::From(*Out); - EigenTensor::ConstType X_tensor = - EigenTensor::From(*X); - EigenTensor::Type Out_tensor = - EigenTensor::From(*Out); - Out_tensor = X_tensor.pad(paddings, pad_value); + int dim = context.Input("X")->dims().size(); + switch (dim) { + case 1: + PadFunction(context); + break; + case 2: + PadFunction(context); + break; + case 3: + PadFunction(context); + break; + case 4: + PadFunction(context); + break; + case 5: + PadFunction(context); + break; + case 6: + PadFunction(context); + break; + default: + LOG(ERROR) << "Only ranks up to 6 supported."; + } } }; +template +void PadGradFunction(const framework::ExecutionContext& context) { + auto pads = context.op_.GetAttr>>("paddings"); + Eigen::array, D> paddings; + for (int i = 0; i < pads.size(); ++i) { + paddings[0].first = -paddings[0].first; + paddings[1].second = -paddings[1].second; + } + auto* dOut = context.Input(framework::GradVarName("Out")); + auto* dX = context.Output(framework::GradVarName("X")); + dX->mutable_data(context.GetPlace()); + + auto dX_tensor = EigenTensor::From(*dX); + auto dOut_tensor = EigenTensor::From(*dOut); + auto place = context.GetEigenDevice(); + dX_tensor.device(place) = dOut_tensor.pad(paddings, 0); +} + template class PadGradKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override { - std::vector> paddings = - context.op_.GetAttr>>("paddings"); - for (int i = 0; i < paddings.size(); ++i) { - paddings[0].first = -paddings[0].first; - paddings[1].second = -paddings[1].second; + void Compute(const framework::ExecutionContext& context) const override { + size_t dim = + context.Input(framework::GradVarName("Out"))->dims().size(); + switch (dim) { + case 1: + PadGradFunction(context); + break; + case 2: + PadGradFunction(context); + break; + case 3: + PadGradFunction(context); + break; + case 4: + PadGradFunction(context); + break; + case 5: + PadGradFunction(context); + break; + case 6: + PadGradFunction(context); + break; + default: + LOG(ERROR) << "Only ranks up to 6 supported."; } - auto* dOut = ctx.Input(framework::GradVarName("Out")); - auto dims = dOut->dims(); - - auto* dX = ctx.Output(framework::GradVarName("X")); - dX->mutable_data(ctx.GetPlace()); - - EigenTensor::Type dX_tensor = - EigenTensor::From(*dX); - EigenTensor::ConstType dOut_tensor = - EigenTensor::From(*dOut); - dX_tensor = dOut_tensor.pad(paddings, 0); } }; diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py index 89ac7e7e1d..b862033d8c 100644 --- a/python/paddle/v2/framework/tests/test_pad_op.py +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -1,5 +1,6 @@ import unittest import numpy as np +from paddle.v2.framework.op import Operator from gradient_checker import GradientChecker, create_op from op_test_util import OpTestMeta @@ -10,19 +11,25 @@ class TestPadOp(unittest.TestCase): def setUp(self): self.type = "pad" self.inputs = {'X': np.random.random((16, 16)).astype("float32"), } - self.attrs['paddings'] = ((0, 1), (2, 3)) + self.attrs = {} + self.attrs['paddings'] = [(0, 1), (2, 3)] self.attrs['pad_value'] = 0 self.outputs = { 'Out': np.pad(self.inputs['X'], self.attrs['paddings'], mode='constant', - constant_value=0) + constant_values=0) } class PadGradOpTest(GradientChecker): def test_pad(self): - op = Operator("pad", paddings=((0, 1), (2, 3)), pad_value=0) + op = Operator( + type="pad", + X="X", + Out="Out", + paddings=[(0, 1), (2, 3)], + pad_value=0) inputs = {'X': np.random.random((16, 16)).astype("float32"), } self.check_grad(op, inputs, set(["X"]), "Out", max_relative_error=0.5) From 9f8e4981384d247e461290d7ceb642486663390d Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 6 Sep 2017 11:59:10 +0800 Subject: [PATCH 05/69] Fix some issues. --- paddle/operators/pad_op.cc | 3 +++ paddle/operators/pad_op.h | 10 +++++----- python/paddle/v2/framework/op.py | 2 +- python/paddle/v2/framework/tests/test_pad_op.py | 15 ++++++++++----- 4 files changed, 19 insertions(+), 11 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 5dee8d0f5e..dac1c56bdd 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -27,6 +27,9 @@ class PadOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { auto dim0 = ctx.Input("X")->dims(); auto paddings = GetAttr>>("paddings"); + PADDLE_ENFORCE_EQ( + dim0.size(), paddings.size(), + "Paddings size should be equal to dimension size of input tensor."); std::vector dim1(dim0.size()); for (int i = 0; i < dim0.size(); ++i) { dim1[i] = dim0[i] + paddings[i].first + paddings[i].second; diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index 9a0a064d75..234019394c 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -14,8 +14,6 @@ #pragma once -#include "paddle/operators/math/math_function.h" - #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" @@ -30,12 +28,13 @@ using EigenTensor = framework::EigenTensor; template void PadFunction(const framework::ExecutionContext& context) { - auto pads = context.op_.GetAttr>>("paddings"); + auto pads = + context.op().GetAttr>>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < pads.size(); ++i) { paddings[i] = pads[i]; } - T pad_value = context.op_.GetAttr("pad_value"); + T pad_value = context.op().GetAttr("pad_value"); auto* X = context.Input("X"); auto* Out = context.Output("Out"); @@ -80,7 +79,8 @@ class PadKernel : public framework::OpKernel { template void PadGradFunction(const framework::ExecutionContext& context) { - auto pads = context.op_.GetAttr>>("paddings"); + auto pads = + context.op().GetAttr>>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < pads.size(); ++i) { paddings[0].first = -paddings[0].first; diff --git a/python/paddle/v2/framework/op.py b/python/paddle/v2/framework/op.py index 0349407a85..359ccec814 100644 --- a/python/paddle/v2/framework/op.py +++ b/python/paddle/v2/framework/op.py @@ -96,7 +96,7 @@ class OpDescCreationMethod(object): new_attr.strings.extend(user_defined_attr) elif attr.type == framework_pb2.INT_PAIRS: for p in user_defined_attr: - pair = new_attr.pairs.add() + pair = new_attr.int_pairs.add() pair.first = p[0] pair.second = p[1] else: diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py index b862033d8c..10aeaa752f 100644 --- a/python/paddle/v2/framework/tests/test_pad_op.py +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -22,17 +22,22 @@ class TestPadOp(unittest.TestCase): } -class PadGradOpTest(GradientChecker): - def test_pad(self): - op = Operator( +class TestPadGradOp(GradientChecker): + def setUp(self): + self.op = Operator( type="pad", X="X", Out="Out", paddings=[(0, 1), (2, 3)], pad_value=0) - inputs = {'X': np.random.random((16, 16)).astype("float32"), } + self.inputs = {'X': np.random.random((16, 16)).astype("float32"), } + + def test_normal(self): + self.check_grad( + self.op, self.inputs, set(["X"]), "Out", max_relative_error=0.5) - self.check_grad(op, inputs, set(["X"]), "Out", max_relative_error=0.5) + def test_cpu_gpu_compare(self): + self.compare_grad(self.op, self.inputs) if __name__ == '__main__': From 7c30251d165ee9b3b9fd4fbd2440824ebcfbb5d7 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 6 Sep 2017 13:10:52 +0800 Subject: [PATCH 06/69] Fix padding attribute error. --- paddle/operators/pad_op.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index 234019394c..ed547d0a7f 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -83,8 +83,8 @@ void PadGradFunction(const framework::ExecutionContext& context) { context.op().GetAttr>>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < pads.size(); ++i) { - paddings[0].first = -paddings[0].first; - paddings[1].second = -paddings[1].second; + paddings[i].first = -pads[i].first; + paddings[i].second = -pads[i].second; } auto* dOut = context.Input(framework::GradVarName("Out")); auto* dX = context.Output(framework::GradVarName("X")); From 12eaa22ad2d099717e6ddf2da856b67b6d887510 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Wed, 6 Sep 2017 21:25:58 -0700 Subject: [PATCH 07/69] add reshape operator --- paddle/operators/reshape_op.cc | 84 +++++++++++++++++++ paddle/operators/reshape_op.cu | 22 +++++ paddle/operators/reshape_op.h | 60 +++++++++++++ paddle/pybind/pybind.cc | 1 + .../paddle/v2/framework/tests/CMakeLists.txt | 1 + .../v2/framework/tests/test_reshape_op.py | 28 +++++++ 6 files changed, 196 insertions(+) create mode 100644 paddle/operators/reshape_op.cc create mode 100644 paddle/operators/reshape_op.cu create mode 100644 paddle/operators/reshape_op.h create mode 100644 python/paddle/v2/framework/tests/test_reshape_op.py diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc new file mode 100644 index 0000000000..1b073a79bc --- /dev/null +++ b/paddle/operators/reshape_op.cc @@ -0,0 +1,84 @@ + +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/reshape_op.h" + +namespace paddle { +namespace operators { + +class ReshapeOp : public framework::OperatorWithKernel { + public: + ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + auto *in = ctx.Input("X"); + auto shape = ctx.Attr>("shape"); + PADDLE_ENFORCE_EQ((unsigned)shape.size(), in->dims().size(), + "The dimension of Input(X) mismatches with Attr(shape)."); + size_t shape_size = 1; + for (auto dim : shape) { + shape_size *= dim; + } + size_t in_size = framework::product(in->dims()); + PADDLE_ENFORCE_EQ(shape_size, in_size, + "The size of Input(X) mismatches with Attr(shape)."); + } +}; + +class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ReshapeOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The input tensor of reshape operator."); + AddOutput("Out", "The output tensor of reshape operator."); + AddAttr>("shape", "Target shape of reshape operator."); + AddComment(R"DOC(Reshape operator + +The input tensor will be reshaped with Attr(shape). +)DOC"); + } +}; + +class ReshapeGradOp : public framework::OperatorWithKernel { + public: + ReshapeGradOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + auto dims = ctx.Input("X")->dims(); + auto *d_in = ctx.Output(framework::GradVarName("X")); + d_in->Resize(dims); + } +}; + +} // namespace operators +} // namespace paddle +namespace ops = paddle::operators; + +REGISTER_OP(reshape, ops::ReshapeOp, ops::ReshapeOpMaker, reshape_grad, + ops::ReshapeGradOp); +REGISTER_OP_CPU_KERNEL(reshape, + ops::ReshapeKernel); +REGISTER_OP_CPU_KERNEL( + reshape_grad, ops::ReshapeGradKernel); diff --git a/paddle/operators/reshape_op.cu b/paddle/operators/reshape_op.cu new file mode 100644 index 0000000000..23dbe089d3 --- /dev/null +++ b/paddle/operators/reshape_op.cu @@ -0,0 +1,22 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/reshape_op.h" + +REGISTER_OP_GPU_KERNEL( + reshape, + paddle::operators::ReshapeKernel); +REGISTER_OP_GPU_KERNEL( + reshape_grad, + paddle::operators::ReshapeGradKernel); diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h new file mode 100644 index 0000000000..22ede88b12 --- /dev/null +++ b/paddle/operators/reshape_op.h @@ -0,0 +1,60 @@ + +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +class ReshapeKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto* out = ctx.Output("Out"); + auto* in = ctx.Input("X"); + out->mutable_data(in->place()); + + auto shape = ctx.Attr>("shape"); + std::vector tmp; + for (auto dim : shape) { + tmp.push_back(dim); + } + auto out_dims = framework::make_ddim(tmp); + out->CopyFrom(*in, ctx.GetPlace()); + out->Resize(out_dims); + } +}; + +template +class ReshapeGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto* d_out = ctx.Input(framework::GradVarName("Out")); + auto* d_x = ctx.Output(framework::GradVarName("X")); + d_x->mutable_data(ctx.GetPlace()); + + auto in_dims = d_x->dims(); + + d_x->CopyFrom(*d_out, ctx.GetPlace()); + d_x->Resize(in_dims); + } +}; +} +} diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index c21ad3470b..bf1a321c3f 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -50,6 +50,7 @@ USE_OP(cos_sim); USE_CPU_ONLY_OP(gather); USE_CPU_ONLY_OP(scatter); USE_OP(squared_l2_distance); +USE_OP(reshape); namespace paddle { namespace framework { diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index a9c33ea163..9d41b84e57 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -34,3 +34,4 @@ py_test(test_lookup_table SRCS test_lookup_table.py) py_test(test_scale_and_identity_op SRCS test_scale_and_identity_op.py) py_test(mnist SRCS mnist.py) py_test(test_squared_l2_distance_op SRCS test_squared_l2_distance_op.py) +py_test(test_reshape_op SRCS test_reshape_op.py) diff --git a/python/paddle/v2/framework/tests/test_reshape_op.py b/python/paddle/v2/framework/tests/test_reshape_op.py new file mode 100644 index 0000000000..c101b0df9a --- /dev/null +++ b/python/paddle/v2/framework/tests/test_reshape_op.py @@ -0,0 +1,28 @@ +import unittest +import numpy as np +from gradient_checker import GradientChecker, create_op +from op_test_util import OpTestMeta + + +class TestReshapeOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "reshape" + self.inputs = {'X': np.random.random((2, 4)).astype("float32"), } + print self.inputs + self.attrs = {'shape': [4, 2]} + self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} + print self.outputs + + +class ReshapeGradOpTest(GradientChecker): + def test_normal(self): + op = create_op("reshape") + inputs = {"X": np.random.random((2, 4)).astype("float32")} + attrs = {'shape': [4, 2]} + self.check_grad(op, inputs, attrs, set("X"), "Out") + + +if __name__ == '__main__': + unittest.main() From bea82122dd3c66e3a4cd69939a7ac68f7cce9524 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 7 Sep 2017 15:29:42 +0800 Subject: [PATCH 08/69] Expose LoDTensor to pybind. --- paddle/pybind/pybind.cc | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index ba28b51ade..0b9d2697d2 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include #include "paddle/framework/backward.h" +#include "paddle/framework/lod_tensor.h" #include "paddle/framework/op_registry.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" @@ -54,6 +55,7 @@ namespace paddle { namespace framework { using Tensor = framework::Tensor; +using LODTensor = framework::LODTensor; static size_t UniqueIntegerGenerator() { static std::atomic generator; @@ -113,6 +115,25 @@ PYBIND11_PLUGIN(core) { return self.data()[offset]; }); + py::class_(m, "LODTensor", R"DOC(LOD(Leval of Ddetails) Tensor. + +The tensor and LOD info should be created before creating the LODTensor, then +call the set_tensor and set_lod functions to set them. + +)DOC") + .def("set_tensor", + [](LODTensor &self, Tensor *tensor) { self.set_tensor(tensor); }) + .def("set_lod", + [](LODTensor &self, std::vector> &lod) { + self.set_lod(lod); + }) + .def("get_tensor", + [](LODTensor &self) -> Tensor & { return self.tensor(); }, + py::return_value_policy::reference) + .def("get_lod", [](LODTensor &self) -> std::vector> { + return self.lod(); + }); + py::class_(m, "Variable", R"DOC(Variable Class. All parameter, weight, gradient are variables in Paddle. @@ -124,6 +145,11 @@ All parameter, weight, gradient are variables in Paddle. .def("get_tensor", [](Variable &self) -> Tensor * { return self.GetMutable(); }, py::return_value_policy::reference) + .def("get_lod_tensor", + [](Variable &self) -> LODTensor * { + return self.GetMutable(); + }, + py::return_value_policy::reference) .def("get_net", [](Variable &self) -> operators::NetOp * { return self.GetMutable(); From 899c7d6b353c04565ebaa46d85de57348631f2e1 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Thu, 7 Sep 2017 04:16:32 -0700 Subject: [PATCH 09/69] pass unit test --- paddle/operators/reshape_op.cc | 3 ++- paddle/operators/reshape_op.h | 7 +++---- .../paddle/v2/framework/tests/test_reshape_op.py | 15 ++++++--------- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index 1b073a79bc..d75ec76632 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -38,6 +38,7 @@ class ReshapeOp : public framework::OperatorWithKernel { size_t in_size = framework::product(in->dims()); PADDLE_ENFORCE_EQ(shape_size, in_size, "The size of Input(X) mismatches with Attr(shape)."); + ctx.Output("Out")->Resize(in->dims()); } }; @@ -51,7 +52,7 @@ class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr>("shape", "Target shape of reshape operator."); AddComment(R"DOC(Reshape operator -The input tensor will be reshaped with Attr(shape). +Reshape Input(X) into the shape specified by Attr(shape). )DOC"); } }; diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h index 22ede88b12..61d502c836 100644 --- a/paddle/operators/reshape_op.h +++ b/paddle/operators/reshape_op.h @@ -23,13 +23,13 @@ namespace operators { using Tensor = framework::Tensor; -template +template class ReshapeKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { auto* out = ctx.Output("Out"); auto* in = ctx.Input("X"); - out->mutable_data(in->place()); + out->mutable_data(ctx.GetPlace()); auto shape = ctx.Attr>("shape"); std::vector tmp; @@ -42,7 +42,7 @@ class ReshapeKernel : public framework::OpKernel { } }; -template +template class ReshapeGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { @@ -51,7 +51,6 @@ class ReshapeGradKernel : public framework::OpKernel { d_x->mutable_data(ctx.GetPlace()); auto in_dims = d_x->dims(); - d_x->CopyFrom(*d_out, ctx.GetPlace()); d_x->Resize(in_dims); } diff --git a/python/paddle/v2/framework/tests/test_reshape_op.py b/python/paddle/v2/framework/tests/test_reshape_op.py index c101b0df9a..4797019435 100644 --- a/python/paddle/v2/framework/tests/test_reshape_op.py +++ b/python/paddle/v2/framework/tests/test_reshape_op.py @@ -1,6 +1,6 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op +from gradient_checker import GradientChecker, Operator from op_test_util import OpTestMeta @@ -9,19 +9,16 @@ class TestReshapeOp(unittest.TestCase): def setUp(self): self.type = "reshape" - self.inputs = {'X': np.random.random((2, 4)).astype("float32"), } - print self.inputs - self.attrs = {'shape': [4, 2]} + self.inputs = {'X': np.random.random((37, 51)).astype("float32"), } + self.attrs = {'shape': [51, 37]} self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} - print self.outputs class ReshapeGradOpTest(GradientChecker): def test_normal(self): - op = create_op("reshape") - inputs = {"X": np.random.random((2, 4)).astype("float32")} - attrs = {'shape': [4, 2]} - self.check_grad(op, inputs, attrs, set("X"), "Out") + op = Operator("reshape", X='X', Out='Out', shape=[5, 40]) + inputs = {"X": np.random.random((10, 20)).astype("float32")} + self.check_grad(op, inputs, set("X"), "Out") if __name__ == '__main__': From a2a69f2a54cd7588ede6846deac758e8e8dc6b6e Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Thu, 7 Sep 2017 20:33:48 +0800 Subject: [PATCH 10/69] Add function to get element count from tensor. --- paddle/framework/tensor.h | 6 ++++++ paddle/framework/tensor_impl.h | 13 ++++++++----- paddle/operators/cos_sim_op.h | 4 ++-- paddle/operators/gaussian_random_op.cc | 2 +- paddle/operators/gaussian_random_op.cu | 4 ++-- paddle/operators/lookup_table_op.cu | 4 ++-- paddle/operators/lookup_table_op.h | 4 ++-- paddle/operators/mean_op.h | 5 ++--- paddle/operators/minus_op.cc | 3 +-- paddle/operators/squared_l2_distance_op.cc | 6 ++---- paddle/operators/squared_l2_distance_op.h | 4 ++-- paddle/operators/uniform_random_op.cc | 2 +- paddle/operators/uniform_random_op.cu | 4 ++-- 13 files changed, 33 insertions(+), 28 deletions(-) diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 643f875491..fc54ed697f 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -78,6 +78,9 @@ class Tensor { /*! Return the dimensions of the memory block. */ inline const DDim& dims() const; + /*! Return the numel of the memory block. */ + inline int64_t numel() const; + /*! Resize the dimensions of the memory block. */ inline Tensor& Resize(const DDim& dims); @@ -159,6 +162,9 @@ class Tensor { /*! points to dimensions of memory block. */ DDim dims_; + /*! the element count of tensor. */ + int64_t numel_; + /** * @brief A PlaceHolder may be shared by more than one tensor. * diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 94f436294f..03678784b4 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -24,7 +24,7 @@ inline void Tensor::check_memory_size() const { PADDLE_ENFORCE_NOT_NULL( holder_, "Tenosr holds no memory. Call Tensor::mutable_data first."); PADDLE_ENFORCE_GE( - holder_->size(), product(dims_) * sizeof(T) + offset_, + holder_->size(), numel_ * sizeof(T) + offset_, "Tensor's dims_ is out of bound. Call Tensor::mutable_data " "first to re-allocate memory.\n" "or maybe the required data-type mismatches the data already stored."); @@ -54,11 +54,11 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) { template inline T* Tensor::mutable_data(platform::Place place) { static_assert(std::is_pod::value, "T must be POD"); - PADDLE_ENFORCE_GT(product(dims_), 0, + PADDLE_ENFORCE_GT(numel_, 0, "Tensor's numel must be larger than zero to call " "Tensor::mutable_data. Call Tensor::set_dim first."); /* some versions of boost::variant don't have operator!= */ - int64_t size = product(dims_) * sizeof(T); + int64_t size = numel_ * sizeof(T); if (holder_ == nullptr || !(holder_->place() == place) || holder_->size() < size + offset_) { if (platform::is_cpu_place(place)) { @@ -97,7 +97,7 @@ inline void Tensor::CopyFrom(const Tensor& src, auto dst_ptr = static_cast(mutable_data(dst_place)); - auto size = product(src.dims_) * sizeof(T); + auto size = src.numel() * sizeof(T); if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) { memory::Copy(boost::get(dst_place), dst_ptr, @@ -131,7 +131,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { PADDLE_ENFORCE_LT(begin_idx, end_idx, "Begin index must be less than end index."); PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1."); - size_t base = product(dims_) / dims_[0]; + size_t base = numel_ / dims_[0]; Tensor dst; dst.holder_ = holder_; DDim dst_dims = dims_; @@ -143,10 +143,13 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { inline Tensor& Tensor::Resize(const DDim& dims) { dims_ = dims; + numel_ = product(dims_); return *this; } inline const DDim& Tensor::dims() const { return dims_; } +inline int64_t Tensor::numel() const { return numel_; } + } // namespace framework } // namespace paddle diff --git a/paddle/operators/cos_sim_op.h b/paddle/operators/cos_sim_op.h index 9e2bcebe3b..0dc5099525 100644 --- a/paddle/operators/cos_sim_op.h +++ b/paddle/operators/cos_sim_op.h @@ -42,7 +42,7 @@ class CosSimKernel : public framework::OpKernel { output_y_norm->mutable_data(context.GetPlace()); auto dims = input_x->dims(); - int size = static_cast(framework::product(dims)); + int64_t size = input_x->numel(); auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); auto x = EigenMatrix::From(*input_x, new_dims); auto y = EigenMatrix::From(*input_y, new_dims); @@ -72,7 +72,7 @@ class CosSimGradKernel : public framework::OpKernel { auto* input_grad_z = context.Input(framework::GradVarName("Out")); auto dims = input_x->dims(); - int size = static_cast(framework::product(dims)); + int64_t size = input_x->numel(); auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); auto x = EigenMatrix::From(*input_x, new_dims); auto y = EigenMatrix::From(*input_y, new_dims); diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index 6574880c0e..3d76516405 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -31,7 +31,7 @@ class CPUGaussianRandomKernel : public framework::OpKernel { } engine.seed(seed); std::normal_distribution dist(mean, std); - int64_t size = framework::product(tensor->dims()); + int64_t size = tensor->numel(); for (int64_t i = 0; i < size; ++i) { data[i] = dist(engine); } diff --git a/paddle/operators/gaussian_random_op.cu b/paddle/operators/gaussian_random_op.cu index d9dbc1dcfe..2d63b30499 100644 --- a/paddle/operators/gaussian_random_op.cu +++ b/paddle/operators/gaussian_random_op.cu @@ -50,8 +50,8 @@ class GPUGaussianRandomKernel : public framework::OpKernel { T mean = static_cast(context.Attr("mean")); T std = static_cast(context.Attr("std")); thrust::counting_iterator index_sequence_begin(0); - ssize_t N = framework::product(tensor->dims()); - thrust::transform(index_sequence_begin, index_sequence_begin + N, + int64_t size = tensor->numel(); + thrust::transform(index_sequence_begin, index_sequence_begin + size, thrust::device_ptr(data), GaussianGenerator(mean, std, seed)); } diff --git a/paddle/operators/lookup_table_op.cu b/paddle/operators/lookup_table_op.cu index 27eee3436a..7083440467 100644 --- a/paddle/operators/lookup_table_op.cu +++ b/paddle/operators/lookup_table_op.cu @@ -70,7 +70,7 @@ class LookupTableCUDAKernel : public framework::OpKernel { size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; - size_t K = product(ids_t->dims()); + size_t K = ids_t->numel(); auto ids = ids_t->data(); auto table = table_t->data(); auto output = output_t->mutable_data(context.GetPlace()); @@ -91,7 +91,7 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; - int K = product(ids_t->dims()); + int K = ids_t->numel(); const int32_t* ids = ids_t->data(); const T* d_output = d_output_t->data(); T* d_table = d_table_t->mutable_data(context.GetPlace()); diff --git a/paddle/operators/lookup_table_op.h b/paddle/operators/lookup_table_op.h index 877b36cef4..a1298906dd 100644 --- a/paddle/operators/lookup_table_op.h +++ b/paddle/operators/lookup_table_op.h @@ -35,7 +35,7 @@ class LookupTableKernel : public framework::OpKernel { auto ids = ids_t->data(); auto table = table_t->data(); auto output = output_t->mutable_data(context.GetPlace()); - for (ssize_t i = 0; i < product(ids_t->dims()); ++i) { + for (int64_t i = 0; i < ids_t->numel(); ++i) { PADDLE_ENFORCE_LT(ids[i], N); PADDLE_ENFORCE_GE(ids[i], 0); memcpy(output + i * D, table + ids[i] * D, D * sizeof(T)); @@ -61,7 +61,7 @@ class LookupTableGradKernel : public framework::OpKernel { t.device(context.GetEigenDevice()) = t.constant(static_cast(0)); - for (ssize_t i = 0; i < product(ids_t->dims()); ++i) { + for (int64_t i = 0; i < ids_t->numel(); ++i) { PADDLE_ENFORCE_LT(ids[i], N); PADDLE_ENFORCE_GE(ids[i], 0); for (int j = 0; j < D; ++j) { diff --git a/paddle/operators/mean_op.h b/paddle/operators/mean_op.h index 9848af280b..ce31e178d8 100644 --- a/paddle/operators/mean_op.h +++ b/paddle/operators/mean_op.h @@ -49,12 +49,11 @@ class MeanGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto OG = context.Input(framework::GradVarName("Out")); - PADDLE_ENFORCE(framework::product(OG->dims()) == 1, - "Mean Gradient should be scalar"); + PADDLE_ENFORCE(OG->numel() == 1, "Mean Gradient should be scalar"); auto IG = context.Output(framework::GradVarName("X")); IG->mutable_data(context.GetPlace()); - T ig_size = (T)framework::product(IG->dims()); + T ig_size = static_cast(IG->numel()); Eigen::DSizes bcast(ig_size); EigenVector::Flatten(*IG).device(context.GetEigenDevice()) = diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index 069fb5e1ab..a4876feb2e 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -31,8 +31,7 @@ class MinusOp : public framework::OperatorWithKernel { auto *right_tensor = ctx.Input("Y"); PADDLE_ENFORCE_EQ( - framework::product(left_tensor->dims()), - framework::product(right_tensor->dims()), + left_tensor->numel(), right_tensor->numel(), "Minus operator must take two tensor with same num of elements"); ctx.Output("Out")->Resize(left_tensor->dims()); } diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index dc30644a5e..9f51d3efa8 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -41,8 +41,7 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { int rank = framework::arity(x_dims); PADDLE_ENFORCE_GE(rank, 2, "Tensor rank should be at least equal to 2."); - PADDLE_ENFORCE_EQ(framework::product(x_dims) / x_dims[0], - framework::product(y_dims) / y_dims[0], + PADDLE_ENFORCE_EQ(x->numel() / x_dims[0], y->numel() / y_dims[0], "Product of dimensions expcet the first dimension of " "input and target must be equal."); PADDLE_ENFORCE(y_dims[0] == 1 || y_dims[0] == x_dims[0], @@ -50,8 +49,7 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { "or to 1."); ctx.Output("sub_result") - ->Resize({static_cast(x_dims[0]), - static_cast(framework::product(x_dims) / x_dims[0])}); + ->Resize({x_dims[0], x->numel() / x_dims[0]}); ctx.Output("Out")->Resize({x_dims[0], 1}); } }; diff --git a/paddle/operators/squared_l2_distance_op.h b/paddle/operators/squared_l2_distance_op.h index ad3347a0b3..097ac04fc0 100644 --- a/paddle/operators/squared_l2_distance_op.h +++ b/paddle/operators/squared_l2_distance_op.h @@ -39,7 +39,7 @@ class SquaredL2DistanceKernel : public framework::OpKernel { auto in0_dims = in0->dims(); auto in1_dims = in1->dims(); - int cols = framework::product(in0_dims) / in0_dims[0]; + int cols = in0->numel() / in0_dims[0]; // reduce dimensions except the first auto x = EigenMatrix::From(*in0, framework::make_ddim({in0_dims[0], cols})); @@ -82,7 +82,7 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel { auto x_dims = x_g->dims(); auto y_dims = y_g->dims(); - int cols = framework::product(x_dims) / x_dims[0]; + int cols = x_g->numel() / x_dims[0]; // calculate gradient auto grad_mat = 2 * (out_grad.broadcast(Eigen::array({{1, cols}}))) * diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index f2aeef6c31..b8fbc9b52a 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -35,7 +35,7 @@ class CPUUniformRandomKernel : public framework::OpKernel { std::uniform_real_distribution dist( static_cast(context.Attr("min")), static_cast(context.Attr("max"))); - int64_t size = framework::product(tensor->dims()); + int64_t size = tensor->numel(); for (int64_t i = 0; i < size; ++i) { data[i] = dist(engine); } diff --git a/paddle/operators/uniform_random_op.cu b/paddle/operators/uniform_random_op.cu index c2c041b144..6614b53b3f 100644 --- a/paddle/operators/uniform_random_op.cu +++ b/paddle/operators/uniform_random_op.cu @@ -53,8 +53,8 @@ class GPUUniformRandomKernel : public framework::OpKernel { T min = static_cast(context.Attr("min")); T max = static_cast(context.Attr("max")); thrust::counting_iterator index_sequence_begin(0); - ssize_t N = framework::product(tensor->dims()); - thrust::transform(index_sequence_begin, index_sequence_begin + N, + int64_t size = tensor->numel(); + thrust::transform(index_sequence_begin, index_sequence_begin + size, thrust::device_ptr(data), UniformGenerator(min, max, seed)); } From 0910a9bac4d78347e06ecb90ad4e031bd6c7b0bd Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Fri, 8 Sep 2017 16:39:59 +0800 Subject: [PATCH 11/69] Refine pad op 1. Rename variables by Google style. 2. Add more test cases. 3. Add more detail and meaningful comments. 4. Change type of "padding" to vector --- paddle/operators/pad_op.cc | 44 ++++++++--- paddle/operators/pad_op.h | 49 ++++++------- .../paddle/v2/framework/tests/test_pad_op.py | 73 ++++++++++++++++--- 3 files changed, 122 insertions(+), 44 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index dac1c56bdd..94a6d20583 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -26,13 +26,13 @@ class PadOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { auto dim0 = ctx.Input("X")->dims(); - auto paddings = GetAttr>>("paddings"); + auto paddings = GetAttr>("paddings"); PADDLE_ENFORCE_EQ( - dim0.size(), paddings.size(), + dim0.size(), (int)(paddings.size() / 2), "Paddings size should be equal to dimension size of input tensor."); std::vector dim1(dim0.size()); for (int i = 0; i < dim0.size(); ++i) { - dim1[i] = dim0[i] + paddings[i].first + paddings[i].second; + dim1[i] = dim0[i] + paddings[i * 2] + paddings[i * 2 + 1]; } ctx.Output("Out")->Resize(paddle::framework::make_ddim(dim1)); } @@ -42,14 +42,40 @@ class PadOpMaker : public framework::OpProtoAndCheckerMaker { public: PadOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input of pad op"); - AddOutput("Out", "The output of pad op"); + AddInput("X", "The input of pad op."); + AddOutput("Out", "The output of pad op."); AddComment(R"DOC( -Pad Operator. +Pad input into output, as specified by paddings and pad_value. The input should be a k-D tensor(k > 0 and k < 7). As an example: + +Given: + +X = [[1, 2], + [3, 4]] + +and + +paddings = [(0,1),(1,2)] + +and + +pad_value = 0 + +then we get + +Out = [[0, 1, 2, 0, 0] + [0, 3, 4, 0, 0] + [0, 0, 0, 0, 0]] )DOC"); - AddAttr>>( - "paddings", "The padding rules for each dimension"); - AddAttr("pad_value", "The value to be padded into tensor") + AddAttr>( + "paddings", + "A pair list to describes padding rules for each dimension." + " For 2-D image tensor, paddings=[(0, 1), (2, 3)] means" + " padding 0 row to top, 1 row to bottom, 2 columns to left" + " and 3 columns to right.Paddings size should be equal to" + " dimension size of input tensor."); + AddAttr("pad_value", + "(float) default to 0; " + "The value to be padded into tensor. ") .SetDefault(0.0f); } }; diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index ed547d0a7f..dcf957b47e 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -28,23 +28,23 @@ using EigenTensor = framework::EigenTensor; template void PadFunction(const framework::ExecutionContext& context) { - auto pads = - context.op().GetAttr>>("paddings"); + auto pads = context.GetAttr>("paddings"); Eigen::array, D> paddings; - for (int i = 0; i < pads.size(); ++i) { - paddings[i] = pads[i]; + for (int i = 0; i < paddings.size(); ++i) { + paddings[i].first = pads[i * 2]; + paddings[i].second = pads[i * 2 + 1]; } - T pad_value = context.op().GetAttr("pad_value"); + T pad_value = context.GetAttr("pad_value"); - auto* X = context.Input("X"); - auto* Out = context.Output("Out"); - Out->mutable_data(context.GetPlace()); - auto dims = X->dims(); + auto* x = context.Input("X"); + auto* out = context.Output("Out"); + out->mutable_data(context.GetPlace()); + auto dims = x->dims(); - auto X_tensor = EigenTensor::From(*X); - auto Out_tensor = EigenTensor::From(*Out); + auto x_tensor = EigenTensor::From(*x); + auto out_tensor = EigenTensor::From(*out); auto place = context.GetEigenDevice(); - Out_tensor.device(place) = X_tensor.pad(paddings, pad_value); + out_tensor.device(place) = x_tensor.pad(paddings, pad_value); } template @@ -72,28 +72,27 @@ class PadKernel : public framework::OpKernel { PadFunction(context); break; default: - LOG(ERROR) << "Only ranks up to 6 supported."; + PADDLE_THROW("Only ranks up to 6 supported."); } } }; template void PadGradFunction(const framework::ExecutionContext& context) { - auto pads = - context.op().GetAttr>>("paddings"); + auto pads = context.GetAttr>("paddings"); Eigen::array, D> paddings; - for (int i = 0; i < pads.size(); ++i) { - paddings[i].first = -pads[i].first; - paddings[i].second = -pads[i].second; + for (int i = 0; i < paddings.size(); ++i) { + paddings[i].first = -pads[i * 2]; + paddings[i].second = -pads[i * 2 + 1]; } - auto* dOut = context.Input(framework::GradVarName("Out")); - auto* dX = context.Output(framework::GradVarName("X")); - dX->mutable_data(context.GetPlace()); + auto* d_out = context.Input(framework::GradVarName("Out")); + auto* d_x = context.Output(framework::GradVarName("X")); + d_x->mutable_data(context.GetPlace()); - auto dX_tensor = EigenTensor::From(*dX); - auto dOut_tensor = EigenTensor::From(*dOut); + auto d_x_tensor = EigenTensor::From(*d_x); + auto d_out_tensor = EigenTensor::From(*d_out); auto place = context.GetEigenDevice(); - dX_tensor.device(place) = dOut_tensor.pad(paddings, 0); + d_x_tensor.device(place) = d_out_tensor.pad(paddings, 0); } template @@ -122,7 +121,7 @@ class PadGradKernel : public framework::OpKernel { PadGradFunction(context); break; default: - LOG(ERROR) << "Only ranks up to 6 supported."; + PADDLE_THROW("Only ranks up to 6 supported."); } } }; diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py index 10aeaa752f..56b9c88f7d 100644 --- a/python/paddle/v2/framework/tests/test_pad_op.py +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -9,36 +9,89 @@ class TestPadOp(unittest.TestCase): __metaclass__ = OpTestMeta def setUp(self): + self.initTestCase() self.type = "pad" - self.inputs = {'X': np.random.random((16, 16)).astype("float32"), } + self.inputs = {'X': np.random.random(self.shape).astype("float32"), } self.attrs = {} - self.attrs['paddings'] = [(0, 1), (2, 3)] - self.attrs['pad_value'] = 0 + self.attrs['paddings'] = np.array(self.paddings).flatten() + self.attrs['pad_value'] = self.pad_value self.outputs = { 'Out': np.pad(self.inputs['X'], - self.attrs['paddings'], + self.paddings, mode='constant', - constant_values=0) + constant_values=self.pad_value) } + def initTestCase(self): + self.shape = (16, 16) + self.paddings = [(0, 1), (2, 3)] + self.pad_value = 0 + + +class TestCase1(TestPadOp): + def initTestCase(self): + self.shape = (2, 3, 4, 4) + self.paddings = [(0, 1), (2, 3), (2, 1), (1, 1)] + self.pad_value = 0.5 + + +class TestCase2(TestPadOp): + def initTestCase(self): + self.shape = (2, 2, 2) + self.paddings = [(0, 0), (0, 0), (1, 2)] + self.pad_value = 1 + + +class TestCase3(TestPadOp): + def initTestCase(self): + self.shape = (8) + self.paddings = [(0, 1)] + self.pad_value = 0.9 + class TestPadGradOp(GradientChecker): def setUp(self): + self.initTestCase() self.op = Operator( type="pad", X="X", Out="Out", - paddings=[(0, 1), (2, 3)], - pad_value=0) - self.inputs = {'X': np.random.random((16, 16)).astype("float32"), } + paddings=np.array(self.paddings).flatten(), + pad_value=self.pad_value) + self.inputs = {'X': np.random.random(self.shape).astype("float32"), } + + def initTestCase(self): + self.shape = (16, 16) + self.paddings = [(0, 1), (2, 3)] + self.pad_value = 0 def test_normal(self): - self.check_grad( - self.op, self.inputs, set(["X"]), "Out", max_relative_error=0.5) + self.check_grad(self.op, self.inputs, set(["X"]), "Out") def test_cpu_gpu_compare(self): self.compare_grad(self.op, self.inputs) +class TestiGradCase1(TestPadOp): + def initTestCase(self): + self.shape = (2, 3, 4, 4) + self.paddings = [(0, 1), (2, 3), (2, 1), (1, 1)] + self.pad_value = 0.5 + + +class TestGradCase2(TestPadOp): + def initTestCase(self): + self.shape = (2, 2, 2) + self.paddings = [(0, 0), (0, 0), (1, 2)] + self.pad_value = 1 + + +class TestGradCase3(TestPadOp): + def initTestCase(self): + self.shape = (8) + self.paddings = [(0, 1)] + self.pad_value = 0.9 + + if __name__ == '__main__': unittest.main() From d960cbdcf3f162c0da17fd04c8bc8eb770c9965b Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Fri, 8 Sep 2017 16:48:39 +0800 Subject: [PATCH 12/69] Fix comment --- paddle/operators/pad_op.cc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 94a6d20583..6ea2a25f0b 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -27,9 +27,9 @@ class PadOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { auto dim0 = ctx.Input("X")->dims(); auto paddings = GetAttr>("paddings"); - PADDLE_ENFORCE_EQ( - dim0.size(), (int)(paddings.size() / 2), - "Paddings size should be equal to dimension size of input tensor."); + PADDLE_ENFORCE_EQ(dim0.size(), (int)(paddings.size() / 2), + "Size of paddings should be equal to 2 * dimension size " + "of input tensor."); std::vector dim1(dim0.size()); for (int i = 0; i < dim0.size(); ++i) { dim1[i] = dim0[i] + paddings[i * 2] + paddings[i * 2 + 1]; @@ -54,7 +54,7 @@ X = [[1, 2], and -paddings = [(0,1),(1,2)] +paddings = [0, 1, 1, 2] and @@ -68,11 +68,11 @@ Out = [[0, 1, 2, 0, 0] )DOC"); AddAttr>( "paddings", - "A pair list to describes padding rules for each dimension." - " For 2-D image tensor, paddings=[(0, 1), (2, 3)] means" + "A list to describes padding rules for each dimension." + " For 2-D image tensor, paddings=[0, 1, 2, 3] means" " padding 0 row to top, 1 row to bottom, 2 columns to left" - " and 3 columns to right.Paddings size should be equal to" - " dimension size of input tensor."); + " and 3 columns to right.Size of paddings should be equal to" + " 2 * dimension size of input tensor."); AddAttr("pad_value", "(float) default to 0; " "The value to be padded into tensor. ") From fa5cb7104d7a8bce56a446eb352037302f93ae70 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sat, 9 Sep 2017 17:40:38 +0800 Subject: [PATCH 13/69] fix switchOrderLayer --- python/paddle/trainer/config_parser.py | 4 ++-- python/paddle/trainer_config_helpers/layers.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 7e9112b43b..356e1d8b6f 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -3748,8 +3748,8 @@ class SwitchOrderLayer(LayerBase): def __init__(self, name, inputs, reshape, **xargs): super(SwitchOrderLayer, self).__init__( name, 'switch_order', 0, inputs=inputs, **xargs) - self.config.reshape_conf.heightAxis.extend(reshape['height']) - self.config.reshape_conf.widthAxis.extend(reshape['width']) + self.config.reshape_conf.height_axis.extend(reshape['height']) + self.config.reshape_conf.width_axis.extend(reshape['width']) # Deprecated, use a new layer specific class instead diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index dc68c213da..c103edf237 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -6460,6 +6460,7 @@ def switch_order_layer(input, return LayerOutput( name=name, layer_type=LayerType.SWITCH_ORDER_LAYER, + activation=act, parents=input, size=l.config.size) From c7b347887dd6285dcb171499c17d705d424924ad Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Mon, 11 Sep 2017 11:46:04 +0800 Subject: [PATCH 14/69] Fix variable names and comments --- paddle/operators/pad_op.cc | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 6ea2a25f0b..894fe2cecf 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -25,16 +25,16 @@ class PadOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - auto dim0 = ctx.Input("X")->dims(); - auto paddings = GetAttr>("paddings"); - PADDLE_ENFORCE_EQ(dim0.size(), (int)(paddings.size() / 2), + auto x_dim = ctx.Input("X")->dims(); + auto paddings = Attr>("paddings"); + PADDLE_ENFORCE_EQ(x_dim.size() * 2, int(paddings.size()), "Size of paddings should be equal to 2 * dimension size " "of input tensor."); - std::vector dim1(dim0.size()); - for (int i = 0; i < dim0.size(); ++i) { - dim1[i] = dim0[i] + paddings[i * 2] + paddings[i * 2 + 1]; + std::vector out_dims(x_dim.size()); + for (int i = 0; i < x_dim.size(); ++i) { + out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1]; } - ctx.Output("Out")->Resize(paddle::framework::make_ddim(dim1)); + ctx.Output("Out")->Resize(framework::make_ddim(out_dims)); } }; @@ -42,8 +42,12 @@ class PadOpMaker : public framework::OpProtoAndCheckerMaker { public: PadOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input of pad op."); - AddOutput("Out", "The output of pad op."); + AddInput("X", + "The input of pad op. " + "The input should be a k-D tensor(k > 0 and k < 7)"); + AddOutput("Out", + "The output of pad op." + "A tensor with the same shape as X."); AddComment(R"DOC( Pad input into output, as specified by paddings and pad_value. The input should be a k-D tensor(k > 0 and k < 7). As an example: @@ -75,7 +79,7 @@ Out = [[0, 1, 2, 0, 0] " 2 * dimension size of input tensor."); AddAttr("pad_value", "(float) default to 0; " - "The value to be padded into tensor. ") + "The value to fill padded areas.") .SetDefault(0.0f); } }; From d0dbc0610fd41d10ebb5abc133b25976e53484db Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Mon, 11 Sep 2017 12:10:01 +0800 Subject: [PATCH 15/69] Correctly use host_vector in LoDTensor and expose LoDTensor to Python. --- paddle/framework/CMakeLists.txt | 1 + paddle/framework/lod_tensor.h | 17 ++++- paddle/framework/lod_tensor_test.cu | 52 +++++++++++++++ paddle/operators/math/im2col_test.cc | 4 +- paddle/pybind/pybind.cc | 43 ++++++++++--- .../paddle/v2/framework/tests/test_tensor.py | 63 ++++++++++++++++++- 6 files changed, 164 insertions(+), 16 deletions(-) create mode 100644 paddle/framework/lod_tensor_test.cu diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index c0838d9b75..3371962c63 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -9,6 +9,7 @@ cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor) +nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor) cc_test(variable_test SRCS variable_test.cc) diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index 154068fef6..bbddd6de9d 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -18,8 +18,10 @@ #ifndef PADDLE_ONLY_CPU #include #include +#include #endif +#include #include "paddle/framework/ddim.h" #include "paddle/framework/tensor.h" #include "paddle/platform/enforce.h" @@ -32,7 +34,8 @@ template using Vector = std::vector; #else template -using Vector = thrust::host_vector; +using Vector = thrust::host_vector< + T, thrust::system::cuda::experimental::pinned_allocator>; #endif using LoD = std::vector>; @@ -53,7 +56,17 @@ class LoDTensor { LoDTensor() {} LoDTensor(const LoD& lod, Tensor* t) : lod_(lod), tensor_(t) {} - void set_lod(const LoD& lod) { lod_ = lod; } + void set_lod(const LoD& lod) { + lod_ = lod; + LOG(INFO) << lod_[0][0]; + } + +#ifdef PADDLE_ONLY_CPU + void set_lod(const std::vector>& lod) { + lod_ = lod; + LOG(INFO) << lod_[0][0]; + } +#endif void set_tensor(Tensor* tensor) { tensor_ = tensor; } diff --git a/paddle/framework/lod_tensor_test.cu b/paddle/framework/lod_tensor_test.cu new file mode 100644 index 0000000000..1079a36a2e --- /dev/null +++ b/paddle/framework/lod_tensor_test.cu @@ -0,0 +1,52 @@ +/* + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include +#include +#include "paddle/framework/lod_tensor.h" +#include "paddle/platform/assert.h" + +#include + +__global__ void test(size_t* a, int size) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; + i += blockDim.x * gridDim.x) { + a[i] *= 2; + } +} + +TEST(LoDTensor, LoDInGPU) { + paddle::framework::Tensor tensor; + paddle::framework::LoDTensor lod_tensor; + paddle::platform::GPUPlace place(0); + + paddle::framework::LoD src_lod; + src_lod.push_back(std::vector{0, 2, 4, 6, 8, 10, 12, 14}); + + tensor.Resize({14, 16}); + tensor.mutable_data(place); + + lod_tensor.set_lod(src_lod); + lod_tensor.set_tensor(&tensor); + CHECK_EQ(lod_tensor.lod_element(0, 2), 4); + CHECK_EQ(lod_tensor.lod_element(0, 4), 8); + + auto lod = lod_tensor.lod(); + + test<<<1, 8>>>(lod[0].data(), lod[0].size()); + cudaDeviceSynchronize(); + + for (size_t i = 0; i < src_lod[0].size(); ++i) { + CHECK_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2); + } +} diff --git a/paddle/operators/math/im2col_test.cc b/paddle/operators/math/im2col_test.cc index ee5fb98acd..f905600bb3 100644 --- a/paddle/operators/math/im2col_test.cc +++ b/paddle/operators/math/im2col_test.cc @@ -71,8 +71,10 @@ void testIm2col() { context = new paddle::platform::CPUDeviceContext(paddle::platform::CPUPlace()); } else { +#ifndef PADDLE_ONLY_CPU context = new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace()); +#endif } im2col(input, output_cfo, stride, stride, padding, padding, context); im2col_ocf(input, output_ocf, stride, stride, padding, padding, context); @@ -115,4 +117,4 @@ TEST(math, im2col) { #ifndef PADDLE_ONLY_CPU testIm2col(); #endif -} \ No newline at end of file +} diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 30189d538b..73fb7186ae 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -56,7 +56,8 @@ namespace paddle { namespace framework { using Tensor = framework::Tensor; -using LODTensor = framework::LODTensor; +using LoDTensor = framework::LoDTensor; +using LoD = framework::LoD; static size_t UniqueIntegerGenerator() { static std::atomic generator; @@ -116,23 +117,45 @@ PYBIND11_PLUGIN(core) { return self.data()[offset]; }); - py::class_(m, "LODTensor", R"DOC(LOD(Leval of Ddetails) Tensor. + py::class_(m, "LoDTensor", R"DOC(LoD(Leval of Ddetails) Tensor. -The tensor and LOD info should be created before creating the LODTensor, then +The tensor and LoD info should be created before creating the LoDTensor, then call the set_tensor and set_lod functions to set them. )DOC") .def("set_tensor", - [](LODTensor &self, Tensor *tensor) { self.set_tensor(tensor); }) + [](LoDTensor &self, Tensor *tensor) { self.set_tensor(tensor); }) .def("set_lod", - [](LODTensor &self, std::vector> &lod) { + [](LoDTensor &self, std::vector> &lod) { +#ifdef PADDLE_ONLY_CPU self.set_lod(lod); +#else + paddle::framework::LoD new_lod; + new_lod.reserve(lod.size()); + std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); + self.set_lod(new_lod); +#endif }) - .def("get_tensor", - [](LODTensor &self) -> Tensor & { return self.tensor(); }, + .def("tensor", + [](LoDTensor &self) -> Tensor & { return self.tensor(); }, py::return_value_policy::reference) - .def("get_lod", [](LODTensor &self) -> std::vector> { + .def("lod", [](LoDTensor &self) -> std::vector> { +#ifdef PADDLE_ONLY_CPU return self.lod(); +#else + auto lod = self.lod(); + std::vector> new_lod; + new_lod.reserve(lod.size()); + std::transform(lod.begin(), lod.end(), std::back_inserter(new_lod), + [](paddle::framework::Vector item) -> + std::vector { + std::vector v; + v.reserve(item.size()); + std::copy(item.begin(), item.end(), std::back_inserter(v)); + return v; + }); + return new_lod; +#endif }); py::class_(m, "Variable", R"DOC(Variable Class. @@ -147,8 +170,8 @@ All parameter, weight, gradient are variables in Paddle. [](Variable &self) -> Tensor * { return self.GetMutable(); }, py::return_value_policy::reference) .def("get_lod_tensor", - [](Variable &self) -> LODTensor * { - return self.GetMutable(); + [](Variable &self) -> LoDTensor * { + return self.GetMutable(); }, py::return_value_policy::reference) .def("get_net", diff --git a/python/paddle/v2/framework/tests/test_tensor.py b/python/paddle/v2/framework/tests/test_tensor.py index 1af39818a3..1bfe1370e2 100644 --- a/python/paddle/v2/framework/tests/test_tensor.py +++ b/python/paddle/v2/framework/tests/test_tensor.py @@ -3,7 +3,7 @@ import unittest import numpy -class TestScope(unittest.TestCase): +class TestTensor(unittest.TestCase): def test_int_tensor(self): scope = core.Scope() var = scope.new_var("test_tensor") @@ -20,8 +20,8 @@ class TestScope(unittest.TestCase): tensor.set(tensor_array, place) tensor_array_2 = numpy.array(tensor) - self.assertEqual(1.0, tensor_array_2[3, 9]) - self.assertEqual(2.0, tensor_array_2[19, 11]) + self.assertEqual(1, tensor_array_2[3, 9]) + self.assertEqual(2, tensor_array_2[19, 11]) def test_float_tensor(self): scope = core.Scope() @@ -43,6 +43,63 @@ class TestScope(unittest.TestCase): self.assertAlmostEqual(1.0, tensor_array_2[3, 9]) self.assertAlmostEqual(2.0, tensor_array_2[19, 11]) + def test_int_lod_tensor(self): + scope = core.Scope() + var = scope.new_var("test_tensor") + var_lod = scope.new_var("test_lod_tensor") + place = core.CPUPlace() + + tensor = var.get_tensor() + lod_tensor = var_lod.get_lod_tensor() + + tensor.set_dims([4, 4, 6]) + tensor.alloc_int(place) + array = numpy.array(tensor) + array[0, 0, 0] = 3 + array[3, 3, 5] = 10 + tensor.set(array, place) + + lod_tensor.set_tensor(tensor) + lod_tensor.set_lod([[0, 2, 4]]) + + lod_v = numpy.array(lod_tensor.tensor()) + self.assertTrue(numpy.alltrue(array == lod_v)) + + lod = lod_tensor.lod() + self.assertEqual(0, lod[0][0]) + self.assertEqual(2, lod[0][1]) + self.assertEqual(4, lod[0][2]) + + def test_float_lod_tensor(self): + scope = core.Scope() + var = scope.new_var("test_tensor") + var_lod = scope.new_var("test_lod_tensor") + place = core.CPUPlace() + + tensor = var.get_tensor() + lod_tensor = var_lod.get_lod_tensor() + + tensor.set_dims([5, 2, 3, 4]) + tensor.alloc_float(place) + + tensor_array = numpy.array(tensor) + self.assertEqual((5, 2, 3, 4), tensor_array.shape) + tensor_array[0, 0, 0, 0] = 1.0 + tensor_array[0, 0, 0, 1] = 2.0 + tensor.set(tensor_array, place) + + lod_tensor.set_tensor(tensor) + + lod_v = numpy.array(lod_tensor.tensor()) + self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) + self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) + self.assertEqual(len(lod_tensor.lod()), 0) + + lod_py = [[0, 2, 5], [0, 2, 4, 5]] + lod_tensor.set_lod(lod_py) + lod = lod_tensor.lod() + self.assertListEqual(lod_py, lod) + if __name__ == '__main__': unittest.main() From e75aab3a392a7dd692c0aafa36e12fbf864f1134 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Mon, 11 Sep 2017 12:57:00 +0800 Subject: [PATCH 16/69] Remove redundant code in lod_tensor.h --- paddle/framework/lod_tensor.h | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index bbddd6de9d..568f4e8981 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -56,17 +56,7 @@ class LoDTensor { LoDTensor() {} LoDTensor(const LoD& lod, Tensor* t) : lod_(lod), tensor_(t) {} - void set_lod(const LoD& lod) { - lod_ = lod; - LOG(INFO) << lod_[0][0]; - } - -#ifdef PADDLE_ONLY_CPU - void set_lod(const std::vector>& lod) { - lod_ = lod; - LOG(INFO) << lod_[0][0]; - } -#endif + void set_lod(const LoD& lod) { lod_ = lod; } void set_tensor(Tensor* tensor) { tensor_ = tensor; } From d74fe780402747baf6bd5564b8584bf06e9fb099 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 11 Sep 2017 13:56:07 +0800 Subject: [PATCH 17/69] refine MKLDNNMatrix, solid data handle, rename updateData to setData --- paddle/gserver/layers/MKLDNNLayer.h | 4 ++-- paddle/math/MKLDNNMatrix.cpp | 10 ++++----- paddle/math/MKLDNNMatrix.h | 35 +++++++++++++++++++++++------ 3 files changed, 34 insertions(+), 15 deletions(-) diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/gserver/layers/MKLDNNLayer.h index b983b833d5..ed1ad7c0bd 100644 --- a/paddle/gserver/layers/MKLDNNLayer.h +++ b/paddle/gserver/layers/MKLDNNLayer.h @@ -203,7 +203,7 @@ protected: real* iData = getInputValue(0, CPU_DEVICE)->getData(); // update input data // since it might be changed if this is after data layer - inVal_->updateData(iData); + inVal_->setData(iData); } /** @@ -216,7 +216,7 @@ protected: // update diff real* oDiff = getOutput(CPU_DEVICE).grad->getData(); - outGrad_->updateData(oDiff); + outGrad_->setData(oDiff); } /** diff --git a/paddle/math/MKLDNNMatrix.cpp b/paddle/math/MKLDNNMatrix.cpp index 0a355e2644..c4063e5069 100644 --- a/paddle/math/MKLDNNMatrix.cpp +++ b/paddle/math/MKLDNNMatrix.cpp @@ -33,14 +33,12 @@ MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, memory::primitive_desc pd) { size_t width = cnts / dims[0]; m = Matrix::create(height, width, false, false); } - CHECK(m) << " Matrix should not be empty"; + CpuMatrixPtr cpuMatrix = std::dynamic_pointer_cast(m); CHECK(cpuMatrix) << "Only support create from CPU matrix yet"; - - CHECK_EQ(cnts, m->getElementCnt()) << "Count size does not match"; - return std::make_shared( - m->getData(), m->getHeight(), m->getWidth(), pd); + CHECK_EQ(cpuMatrix->getElementCnt(), cnts) << "Count size does not match"; + return std::make_shared(cpuMatrix, pd); } MKLDNNMatrixPtr MKLDNNMatrix::create(MatrixPtr m, @@ -138,7 +136,7 @@ void MKLDNNMatrix::downSpatial() { mkldnn_primitive_create(&result, pd.get(), nullptr, nullptr), "could not create a memory primitive"); reset(result); - set_data_handle(getData()); + set_data_handle(data_); } } // namespace paddle diff --git a/paddle/math/MKLDNNMatrix.h b/paddle/math/MKLDNNMatrix.h index e50f698b49..eef3b429e6 100644 --- a/paddle/math/MKLDNNMatrix.h +++ b/paddle/math/MKLDNNMatrix.h @@ -30,11 +30,10 @@ typedef std::shared_ptr MKLDNNMatrixPtr; */ class MKLDNNMatrix : public CpuMatrix, public mkldnn::memory { public: - MKLDNNMatrix(real* data, - size_t height, - size_t width, - mkldnn::memory::primitive_desc pd) - : CpuMatrix(data, height, width, false), mkldnn::memory(pd, data) {} + MKLDNNMatrix(CpuMatrixPtr m, mkldnn::memory::primitive_desc pd) + : CpuMatrix(m->getData(), m->getHeight(), m->getWidth(), false), + mkldnn::memory(pd, m->getData()), + m_(m) {} ~MKLDNNMatrix() {} @@ -81,11 +80,29 @@ public: void downSpatial(); /** - * Update the memory data handle. + * set the memory data handle. * Caution: This will not check the buffer size of the data, * it should be coverd by user. */ - void updateData(void* data) { set_data_handle(data); } + void setData(real* data) { + set_data_handle(data); + CpuMatrix::setData(data); + m_.reset(); + } + + /** + * override Matrix::getData + * check data before return + */ + real* getData() override { + CHECK_EQ((void*)data_, get_data_handle()); + return data_; + } + + const real* getData() const override { + CHECK_EQ((void*)data_, get_data_handle()); + return data_; + } /** * Get primitive descriptor. @@ -143,6 +160,10 @@ protected: memory::format srcFmt, memory::format dstFmt, memory::dims dm); + +private: + // save the CpuMatrixPtr in case the buffer released outside + CpuMatrixPtr m_; }; } // namespace paddle From d4c0734840420314298e4a330ddd4f10d957e8e7 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 11 Sep 2017 14:08:13 +0800 Subject: [PATCH 18/69] remove convertOutputToOtherDevice --- paddle/gserver/layers/MKLDNNFcLayer.cpp | 23 ++++------------------- paddle/gserver/layers/MKLDNNFcLayer.h | 2 -- paddle/gserver/layers/MKLDNNLayer.h | 13 +++++++------ 3 files changed, 11 insertions(+), 27 deletions(-) diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp index 8318c8c519..f4deb351f2 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp @@ -77,24 +77,6 @@ void MKLDNNFcLayer::convertWeightsToPaddle() { wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim); } -void MKLDNNFcLayer::convertOutputToOtherDevice() { - copyOutputInfoToOtherDevice(); - // find other cpu device and reorder output to cpu device - int cnt = 0; - for (size_t i = 0; i < outputOtherDevice_.size(); i++) { - if (outputOtherDevice_[i].deviceId == CPU_DEVICE) { - // fc cpu output value do not need convert - // just share point - outputOtherDevice_[i].value = output_.value; - ++cnt; - } - } - - if (cnt > 1) { - LOG(WARNING) << "should not have more than one CPU devie"; - } -} - void MKLDNNFcLayer::reshape() { const Argument& input = getInput(0, getPrev(0)->getDeviceId()); int batchSize = input.getBatchSize(); @@ -155,7 +137,10 @@ void MKLDNNFcLayer::resetFwd() { // change original output value to mkldnn output value output_.value = std::dynamic_pointer_cast(outVal_); if (!outputIsOnlyMKLDNN()) { - convertOutputToOtherDevice(); + copyOutputInfoToOtherDevice(); + // fc cpu output value do not need create convert + // just share point + getOutput(CPU_DEVICE).value->setData(output_.value->getData()); } // create forward handle diff --git a/paddle/gserver/layers/MKLDNNFcLayer.h b/paddle/gserver/layers/MKLDNNFcLayer.h index e138a6faf1..e2657a8d5e 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.h +++ b/paddle/gserver/layers/MKLDNNFcLayer.h @@ -72,8 +72,6 @@ protected: * only would be called when needed */ void resetBwd(); - - void convertOutputToOtherDevice() override; }; } // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/gserver/layers/MKLDNNLayer.h index ed1ad7c0bd..1a3e949fb9 100644 --- a/paddle/gserver/layers/MKLDNNLayer.h +++ b/paddle/gserver/layers/MKLDNNLayer.h @@ -113,12 +113,6 @@ public: */ virtual void convertWeightsToPaddle() {} - /** - * convert MKLDNN output to other device. - * only support CPU device yet - */ - virtual void convertOutputToOtherDevice() {} - /** * print info about sizes */ @@ -155,6 +149,7 @@ protected: * copy base info and do not copy data value */ void copyOutputInfoToOtherDevice() { + int cnt = 0; for (size_t i = 0; i < outputOtherDevice_.size(); i++) { outputOtherDevice_[i].setFrameHeight(output_.getFrameHeight()); outputOtherDevice_[i].setFrameWidth(output_.getFrameWidth()); @@ -163,6 +158,12 @@ protected: outputOtherDevice_[i].subSequenceStartPositions = output_.subSequenceStartPositions; outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims; + if (outputOtherDevice_[i].deviceId == CPU_DEVICE) { + ++cnt; + } + } + if (cnt > 1) { + LOG(WARNING) << "should not have more than one CPU devie"; } } From f40d5f580de3731e071bb9cca3c98a6537955e25 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 11 Sep 2017 14:19:10 +0800 Subject: [PATCH 19/69] remove syncOutputGrad, rename syncInputValue to updateInputData --- paddle/gserver/layers/MKLDNNFcLayer.cpp | 18 +++++++++----- paddle/gserver/layers/MKLDNNFcLayer.h | 2 ++ paddle/gserver/layers/MKLDNNLayer.h | 32 +++++-------------------- 3 files changed, 20 insertions(+), 32 deletions(-) diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp index f4deb351f2..53433cef35 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp @@ -220,13 +220,12 @@ void MKLDNNFcLayer::resetBwd() { pipelineBwd_.push_back(*bwdWgt_); /// backward data - device = inputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE; - const MatrixPtr& in = getInputGrad(0, device); + const MatrixPtr& in = inputLayers_[0]->getOutput().grad; if (in == nullptr) { return; } - if (getInput(0, device).getAllCount() > 1) { - // TODO(TJ): use outputMaps_ ways when merge outgrad done + if (getInput(0, MKLDNN_DEVICE).getAllCount() > 1) { + // TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done } else { inGrad_ = MKLDNNMatrix::create(in, inVal_->getPrimitiveDesc()); } @@ -243,13 +242,21 @@ void MKLDNNFcLayer::resetBwd() { pipelineBwd_.push_back(*bwdData_); } +void MKLDNNFcLayer::updateInputData() { + if (inputLayers_[0]->getType() != "data") { + return; + } + real* iData = getInputValue(0, CPU_DEVICE)->getData(); + inVal_->setData(iData); +} + void MKLDNNFcLayer::forward(PassType passType) { Layer::forward(passType); reshape(); { REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str()); - syncInputValue(); + updateInputData(); // just submit forward pipeline stream_->submit(pipelineFwd_); @@ -271,7 +278,6 @@ void MKLDNNFcLayer::backward(const UpdateCallback& callback) { REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str()); resetBwd(); - syncOutputGrad(); // just sumbmit backward pipeline stream_->submit(pipelineBwd_); } diff --git a/paddle/gserver/layers/MKLDNNFcLayer.h b/paddle/gserver/layers/MKLDNNFcLayer.h index e2657a8d5e..4ad67a16e0 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.h +++ b/paddle/gserver/layers/MKLDNNFcLayer.h @@ -53,6 +53,8 @@ public: void backward(const UpdateCallback& callback) override; + void updateInputData() override; + protected: /** * reshape the input image sizes diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/gserver/layers/MKLDNNLayer.h index 1a3e949fb9..543364edce 100644 --- a/paddle/gserver/layers/MKLDNNLayer.h +++ b/paddle/gserver/layers/MKLDNNLayer.h @@ -113,6 +113,12 @@ public: */ virtual void convertWeightsToPaddle() {} + /** + * Update input value data when input layer is "data" type. + * Since the input value data address might be changed. + */ + virtual void updateInputData() {} + /** * print info about sizes */ @@ -194,32 +200,6 @@ protected: return outputOtherDevice_.size() == 0; } - /** - * Sync input value data - */ - void syncInputValue() { - if (inputIsOnlyMKLDNN()) { - return; - } - real* iData = getInputValue(0, CPU_DEVICE)->getData(); - // update input data - // since it might be changed if this is after data layer - inVal_->setData(iData); - } - - /** - * Sync output grad data - */ - void syncOutputGrad() { - if (outputIsOnlyMKLDNN()) { - return; - } - - // update diff - real* oDiff = getOutput(CPU_DEVICE).grad->getData(); - outGrad_->setData(oDiff); - } - /** * Set deviceId of this layer. */ From f31217fc2e535d0d1079a02895214c2c2f434809 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Mon, 11 Sep 2017 14:50:54 +0800 Subject: [PATCH 20/69] Fix issues --- paddle/operators/pad_op.cc | 5 +++-- paddle/operators/pad_op.h | 21 +++++++++++---------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 894fe2cecf..ef678cf3d3 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -27,10 +27,10 @@ class PadOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { auto x_dim = ctx.Input("X")->dims(); auto paddings = Attr>("paddings"); - PADDLE_ENFORCE_EQ(x_dim.size() * 2, int(paddings.size()), + PADDLE_ENFORCE_EQ(x_dim.size() * 2, int64_t(paddings.size()), "Size of paddings should be equal to 2 * dimension size " "of input tensor."); - std::vector out_dims(x_dim.size()); + std::vector out_dims(x_dim.size()); for (int i = 0; i < x_dim.size(); ++i) { out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1]; } @@ -95,6 +95,7 @@ class PadOpGrad : public framework::OperatorWithKernel { "Input(Out@GRAD) should not be null"); auto x_dims = ctx.Input("X")->dims(); auto *x_grad = ctx.Output(framework::GradVarName("X")); + PADDLE_ENFORCE_NOT_NULL(x_grad, "Output(X@GRAD) should not be null"); x_grad->Resize(x_dims); } diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index dcf957b47e..53451f925a 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -28,18 +28,17 @@ using EigenTensor = framework::EigenTensor; template void PadFunction(const framework::ExecutionContext& context) { - auto pads = context.GetAttr>("paddings"); + auto pads = context.Attr>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < paddings.size(); ++i) { paddings[i].first = pads[i * 2]; paddings[i].second = pads[i * 2 + 1]; } - T pad_value = context.GetAttr("pad_value"); + T pad_value = context.Attr("pad_value"); auto* x = context.Input("X"); auto* out = context.Output("Out"); out->mutable_data(context.GetPlace()); - auto dims = x->dims(); auto x_tensor = EigenTensor::From(*x); auto out_tensor = EigenTensor::From(*out); @@ -51,8 +50,8 @@ template class PadKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - int dim = context.Input("X")->dims().size(); - switch (dim) { + int rank = context.Input("X")->dims().size(); + switch (rank) { case 1: PadFunction(context); break; @@ -72,14 +71,15 @@ class PadKernel : public framework::OpKernel { PadFunction(context); break; default: - PADDLE_THROW("Only ranks up to 6 supported."); + PADDLE_THROW( + "PadOp only support tensors with no more than 6 dimensions."); } } }; template void PadGradFunction(const framework::ExecutionContext& context) { - auto pads = context.GetAttr>("paddings"); + auto pads = context.Attr>("paddings"); Eigen::array, D> paddings; for (int i = 0; i < paddings.size(); ++i) { paddings[i].first = -pads[i * 2]; @@ -99,9 +99,9 @@ template class PadGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - size_t dim = + size_t rank = context.Input(framework::GradVarName("Out"))->dims().size(); - switch (dim) { + switch (rank) { case 1: PadGradFunction(context); break; @@ -121,7 +121,8 @@ class PadGradKernel : public framework::OpKernel { PadGradFunction(context); break; default: - PADDLE_THROW("Only ranks up to 6 supported."); + PADDLE_THROW( + "PadOp only support tensors with no more than 6 dimensions."); } } }; From dd64349a9213b419c6a50c81e06e2d6a8fa9ebd5 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Mon, 11 Sep 2017 00:06:06 -0700 Subject: [PATCH 21/69] refine reshape operator --- paddle/operators/reshape_op.cc | 15 +++++++++------ paddle/operators/reshape_op.h | 10 ++++------ .../paddle/v2/framework/tests/test_reshape_op.py | 16 ++++++++++++++-- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index d75ec76632..37cbecbf25 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -29,14 +29,17 @@ class ReshapeOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { auto *in = ctx.Input("X"); auto shape = ctx.Attr>("shape"); - PADDLE_ENFORCE_EQ((unsigned)shape.size(), in->dims().size(), - "The dimension of Input(X) mismatches with Attr(shape)."); - size_t shape_size = 1; + int64_t capacity = -1; for (auto dim : shape) { - shape_size *= dim; + PADDLE_ENFORCE(dim > 0, "Each dimension of shape must be positive."); + if (capacity < 0) { + capacity = dim; + } else { + capacity *= dim; + } } - size_t in_size = framework::product(in->dims()); - PADDLE_ENFORCE_EQ(shape_size, in_size, + int64_t in_size = framework::product(in->dims()); + PADDLE_ENFORCE_EQ(capacity, in_size, "The size of Input(X) mismatches with Attr(shape)."); ctx.Output("Out")->Resize(in->dims()); } diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h index 61d502c836..0e920329d9 100644 --- a/paddle/operators/reshape_op.h +++ b/paddle/operators/reshape_op.h @@ -21,14 +21,12 @@ namespace paddle { namespace operators { -using Tensor = framework::Tensor; - template class ReshapeKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { - auto* out = ctx.Output("Out"); - auto* in = ctx.Input("X"); + auto* out = ctx.Output("Out"); + auto* in = ctx.Input("X"); out->mutable_data(ctx.GetPlace()); auto shape = ctx.Attr>("shape"); @@ -46,8 +44,8 @@ template class ReshapeGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { - auto* d_out = ctx.Input(framework::GradVarName("Out")); - auto* d_x = ctx.Output(framework::GradVarName("X")); + auto* d_out = ctx.Input(framework::GradVarName("Out")); + auto* d_x = ctx.Output(framework::GradVarName("X")); d_x->mutable_data(ctx.GetPlace()); auto in_dims = d_x->dims(); diff --git a/python/paddle/v2/framework/tests/test_reshape_op.py b/python/paddle/v2/framework/tests/test_reshape_op.py index 4797019435..df7d913ba4 100644 --- a/python/paddle/v2/framework/tests/test_reshape_op.py +++ b/python/paddle/v2/framework/tests/test_reshape_op.py @@ -10,15 +10,27 @@ class TestReshapeOp(unittest.TestCase): def setUp(self): self.type = "reshape" self.inputs = {'X': np.random.random((37, 51)).astype("float32"), } - self.attrs = {'shape': [51, 37]} + self.attrs = {'shape': [51 * 37]} self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} -class ReshapeGradOpTest(GradientChecker): +class TestReshapeGradOp(GradientChecker): + """ def test_normal(self): op = Operator("reshape", X='X', Out='Out', shape=[5, 40]) inputs = {"X": np.random.random((10, 20)).astype("float32")} self.check_grad(op, inputs, set("X"), "Out") + """ + + def setUp(self): + self.op = Operator("reshape", X='X', Out='Out', shape=[5, 40]) + self.inputs = {"X": np.random.random((10, 20)).astype("float32")} + + def test_normal(self): + self.check_grad(self.op, self.inputs, ["X"], "Out") + + def test_dev_compare(self): + self.compare_grad(self.op, self.inputs) if __name__ == '__main__': From 9c929a495980643672f66c882e76ca67e761954f Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Mon, 11 Sep 2017 15:19:19 +0800 Subject: [PATCH 22/69] Fix warning log --- paddle/operators/pad_op.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index 53451f925a..ca8832f26a 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -30,7 +30,7 @@ template void PadFunction(const framework::ExecutionContext& context) { auto pads = context.Attr>("paddings"); Eigen::array, D> paddings; - for (int i = 0; i < paddings.size(); ++i) { + for (size_t i = 0; i < paddings.size(); ++i) { paddings[i].first = pads[i * 2]; paddings[i].second = pads[i * 2 + 1]; } @@ -81,7 +81,7 @@ template void PadGradFunction(const framework::ExecutionContext& context) { auto pads = context.Attr>("paddings"); Eigen::array, D> paddings; - for (int i = 0; i < paddings.size(); ++i) { + for (size_t i = 0; i < paddings.size(); ++i) { paddings[i].first = -pads[i * 2]; paddings[i].second = -pads[i * 2 + 1]; } From 7bd517129ae50850979c7a7c6cc8fce22e2131a8 Mon Sep 17 00:00:00 2001 From: Yancey Date: Mon, 11 Sep 2017 15:21:14 +0800 Subject: [PATCH 23/69] Add Concat operator with CPU kernel (#3775) add concat op with CPU kernel --- paddle/operators/concat_op.cc | 79 +++++++++++++++++++ paddle/operators/concat_op.cu | 19 +++++ paddle/operators/concat_op.h | 64 +++++++++++++++ paddle/pybind/pybind.cc | 1 + python/paddle/v2/framework/op.py | 1 - .../paddle/v2/framework/tests/CMakeLists.txt | 1 + .../v2/framework/tests/gradient_checker.py | 5 +- .../paddle/v2/framework/tests/op_test_util.py | 32 +++++--- .../v2/framework/tests/test_concat_op.py | 22 ++++++ 9 files changed, 211 insertions(+), 13 deletions(-) create mode 100644 paddle/operators/concat_op.cc create mode 100644 paddle/operators/concat_op.cu create mode 100644 paddle/operators/concat_op.h create mode 100644 python/paddle/v2/framework/tests/test_concat_op.py diff --git a/paddle/operators/concat_op.cc b/paddle/operators/concat_op.cc new file mode 100644 index 0000000000..0ebefbab26 --- /dev/null +++ b/paddle/operators/concat_op.cc @@ -0,0 +1,79 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/concat_op.h" +#include + +namespace paddle { +namespace operators { +using framework::Tensor; + +class ConcatOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + auto ins = ctx.MultiInput("X"); + auto *out = ctx.Output("Out"); + size_t axis = static_cast(ctx.Attr("axis")); + size_t n = ins.size(); + + PADDLE_ENFORCE_GT(n, 1, "Input tensors count should > 1."); + + auto out_dims = ins[0]->dims(); + size_t in_zero_dims_size = out_dims.size(); + for (size_t i = 1; i < n; i++) { + for (size_t j = 0; j < in_zero_dims_size; j++) { + if (j == axis) { + out_dims[axis] += ins[i]->dims()[j]; + continue; + } + PADDLE_ENFORCE_EQ(out_dims[j], ins[i]->dims()[j], + "Input tensors should have the same " + "elements except the specify axis.") + } + } + out->Resize(out_dims); + } +}; + +class ConcatOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ConcatOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "the input tensors of concat operator.").AsDuplicable(); + AddOutput("Out", "the output tensor of concat operator."); + AddComment(R"DOC( + Join the input tensors along with the axis. + Examples: + Input[0] = [[1,2],[3,4]] + Input[1] = [[5,6]] + axis = 0 + Output = [[1,2], + [3,4], + [5,6]] + )DOC"); + AddAttr("axis", "The axis which the inputs will be joined with.") + .SetDefault(0); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(concat, ops::ConcatOp, ops::ConcatOpMaker) +REGISTER_OP_CPU_KERNEL(concat, + ops::ConcatKernel) diff --git a/paddle/operators/concat_op.cu b/paddle/operators/concat_op.cu new file mode 100644 index 0000000000..38fee7473d --- /dev/null +++ b/paddle/operators/concat_op.cu @@ -0,0 +1,19 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/concat_op.h" + +namespace ops = paddle::operators; +// TODO(Yancey1989) Add GPU kernel diff --git a/paddle/operators/concat_op.h b/paddle/operators/concat_op.h new file mode 100644 index 0000000000..f977054fdf --- /dev/null +++ b/paddle/operators/concat_op.h @@ -0,0 +1,64 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class ConcatKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto ins = ctx.MultiInput("X"); + auto* out = ctx.Output("Out"); + int64_t axis = static_cast(ctx.Attr("axis")); + size_t n = ins.size(); + size_t output_axis_dim = 0; + size_t before = 1, after = 1; + for (size_t i = 0; i < n; i++) { + output_axis_dim += ins[i]->dims()[axis]; + } + auto& input_zero = ins[0]; + for (int64_t i = 0; i < input_zero->dims().size(); i++) { + if (i == axis) { + continue; + } + if (i < axis) { + before *= input_zero->dims()[i]; + } else { + after *= input_zero->dims()[i]; + } + } + size_t output_offset = 0; + for (size_t i = 0; i < n; i++) { + auto& in = ins[i]; + auto axis_dim = in->dims()[axis]; + for (size_t j = 0; j < before; j++) { + size_t len = axis_dim * after * sizeof(T); + const T* src = in->data() + axis_dim * after * j; + T* out_data = out->mutable_data(platform::CPUPlace()); + T* dest = out_data + output_offset + output_axis_dim * after * j; + memcpy(dest, src, len); + } + output_offset += axis_dim * after; + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index db701a2a30..227b75aff8 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -49,6 +49,7 @@ USE_OP(minus); USE_OP(cos_sim); USE_CPU_ONLY_OP(gather); USE_CPU_ONLY_OP(scatter); +USE_CPU_ONLY_OP(concat); USE_OP(top_k); USE_OP(squared_l2_distance); USE_OP(sum); diff --git a/python/paddle/v2/framework/op.py b/python/paddle/v2/framework/op.py index 4e91924a50..9e665adad2 100644 --- a/python/paddle/v2/framework/op.py +++ b/python/paddle/v2/framework/op.py @@ -43,7 +43,6 @@ class OpDescCreationMethod(object): if len(args) != 0: raise ValueError("Only keyword arguments are supported.") op_desc = framework_pb2.OpDesc() - for input_parameter in self.__op_proto__.inputs: input_arguments = kwargs.get(input_parameter.name, []) if is_str(input_arguments): diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index 2117fdf0d5..2f6be105b6 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -35,4 +35,5 @@ py_test(test_lookup_table SRCS test_lookup_table.py) py_test(test_scale_and_identity_op SRCS test_scale_and_identity_op.py) py_test(test_sum_op SRCS test_sum_op.py) py_test(mnist SRCS mnist.py) +py_test(test_concat_op SRCS test_concat_op.py) py_test(test_squared_l2_distance_op SRCS test_squared_l2_distance_op.py) diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py index fdb06b7988..51a98284bd 100644 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ b/python/paddle/v2/framework/tests/gradient_checker.py @@ -11,11 +11,10 @@ __all__ = ['get_numeric_gradient'] def create_op(op_type): # TODO need to set attrs kwargs = dict() - for in_name in Operator.get_op_input_names(op_type): + for in_name, _ in Operator.get_op_input_names(op_type): kwargs[in_name] = in_name - for out_name in Operator.get_op_output_names(op_type): + for out_name, _ in Operator.get_op_output_names(op_type): kwargs[out_name] = out_name - return Operator(op_type, **kwargs) diff --git a/python/paddle/v2/framework/tests/op_test_util.py b/python/paddle/v2/framework/tests/op_test_util.py index 370f27eaf6..54fe5da440 100644 --- a/python/paddle/v2/framework/tests/op_test_util.py +++ b/python/paddle/v2/framework/tests/op_test_util.py @@ -27,17 +27,30 @@ class OpTestMeta(type): places.append(core.GPUPlace(0)) for place in places: - for in_name in Operator.get_op_input_names(self.type): - if hasattr(self, "inputs") and in_name in self.inputs: - kwargs[in_name] = in_name - var = scope.new_var(in_name).get_tensor() - arr = self.inputs[in_name] - var.set_dims(arr.shape) - var.set(arr, place) + for ins in Operator.get_op_input_names(self.type): + in_name = ins[0] + in_dup = ins[1] + if hasattr(self, 'inputs') and in_name in self.inputs: + kwargs[in_name] = [] + if in_dup: + arrays = self.inputs[in_name] + for index, arr in enumerate(arrays): + var = scope.new_var(in_name + str(index)) + tensor = var.get_tensor() + tensor.set_dims(arr.shape) + tensor.set(arr, place) + kwargs[in_name].append(in_name + str(index)) + else: + kwargs[in_name] = in_name + var = scope.new_var(in_name).get_tensor() + arr = self.inputs[in_name] + var.set_dims(arr.shape) + var.set(arr, place) else: kwargs[in_name] = "@EMPTY@" - for out_name in Operator.get_op_output_names(self.type): + for out_name, out_dup in Operator.get_op_output_names( + self.type): if not hasattr(self, "outputs"): raise ValueError( "The test op must set self.outputs dict.") @@ -60,7 +73,8 @@ class OpTestMeta(type): ctx = core.DeviceContext.create(place) op.run(scope, ctx) - for out_name in Operator.get_op_output_names(self.type): + for out_name, out_dup in Operator.get_op_output_names( + self.type): actual = numpy.array(scope.find_var(out_name).get_tensor()) expect = self.outputs[out_name] self.assertTrue( diff --git a/python/paddle/v2/framework/tests/test_concat_op.py b/python/paddle/v2/framework/tests/test_concat_op.py new file mode 100644 index 0000000000..6bd4c30974 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_concat_op.py @@ -0,0 +1,22 @@ +import unittest +import numpy as np +from gradient_checker import GradientChecker, create_op +from op_test_util import OpTestMeta + + +class TestConcatOp(unittest.TestCase): + __metaclass__ = OpTestMeta + + def setUp(self): + self.type = "concat" + x0 = np.random.random((2, 3, 2, 5)).astype('float32') + x1 = np.random.random((2, 3, 3, 5)).astype('float32') + x2 = np.random.random((2, 3, 4, 5)).astype('float32') + axis = 2 + self.inputs = {'X': [x0, x1, x2]} + self.attrs = {'axis': axis} + self.outputs = {'Out': np.concatenate((x0, x1, x2), axis=axis)} + + +if __name__ == '__main__': + unittest.main() From 2d807f2b4c586656b760e31030a08655f7d298b1 Mon Sep 17 00:00:00 2001 From: qijun Date: Mon, 11 Sep 2017 15:31:33 +0800 Subject: [PATCH 24/69] init refine op python tests --- .../paddle/v2/framework/tests/CMakeLists.txt | 2 - python/paddle/v2/framework/tests/op_test.py | 28 ++++-- .../v2/framework/tests/test_add_two_op.py | 23 ++--- .../v2/framework/tests/test_cos_sim_op.py | 49 +++------- .../framework/tests/test_cross_entropy_op.py | 2 +- .../tests/test_fill_zeros_like_op.py | 19 ++-- .../v2/framework/tests/test_gather_op.py | 32 +++--- .../tests/test_gaussian_random_op.py | 8 +- .../framework/tests/test_gradient_checker.py | 42 ++++---- .../v2/framework/tests/test_lookup_table.py | 29 ++---- .../paddle/v2/framework/tests/test_mean_op.py | 24 ++--- .../v2/framework/tests/test_minus_op.py | 23 ++--- .../paddle/v2/framework/tests/test_mul_op.py | 98 +++++-------------- python/paddle/v2/framework/tests/test_net.py | 2 +- .../v2/framework/tests/test_rowwise_add_op.py | 73 ++++++-------- .../tests/test_scale_and_identity_op.py | 41 +++----- .../v2/framework/tests/test_scatter_op.py | 37 +++---- .../paddle/v2/framework/tests/test_sgd_op.py | 17 ++-- .../v2/framework/tests/test_softmax_op.py | 28 ++---- .../tests/test_squared_l2_distance_op.py | 76 ++++++-------- .../paddle/v2/framework/tests/test_sum_op.py | 12 +-- .../v2/framework/tests/test_top_k_op.py | 17 ++-- .../framework/tests/test_uniform_random_op.py | 8 +- 23 files changed, 267 insertions(+), 423 deletions(-) diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index 2117fdf0d5..07997e201a 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -19,8 +19,6 @@ py_test(test_scatter_op SRCS test_scatter_op.py) py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py) py_test(test_top_k_op SRCS test_top_k_op.py) -py_test(gradient_checker SRCS gradient_checker.py) - py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py) py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py) diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 3a6a5dca4c..fe094df8e5 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -9,7 +9,7 @@ def grad_var_name(var_name): return var_name + "@GRAD" -def create_op(scope, op_type, inputs, outputs, attrs=None): +def create_op(scope, op_type, inputs, outputs, attrs): kwargs = dict() for in_name, in_dup in Operator.get_op_inputs(op_type): @@ -29,15 +29,16 @@ def create_op(scope, op_type, inputs, outputs, attrs=None): kwargs[out_name] = [] if out_dup: sub_in = outputs[out_name] - for sun_in_name in sub_in: - var = scope.new_var(sun_in_name) - kwargs[out_name].append(sun_in_name) + for sub_in_name in sub_in: + var = scope.new_var(sub_in_name) + kwargs[out_name].append(sub_in_name) else: var = scope.new_var(out_name) kwargs[out_name].append(out_name) for attr_name in Operator.get_op_attr_names(op_type): - kwargs[attr_name] = attrs[attr_name] + if attr_name in attrs: + kwargs[attr_name] = attrs[attr_name] return Operator(op_type, **kwargs) @@ -89,6 +90,7 @@ def get_numeric_gradient(scope, delta=0.005, in_place=False): + print "before set input" set_input(scope, op, inputs, core.CPUPlace()) op.infer_shape(scope) @@ -110,7 +112,7 @@ def get_numeric_gradient(scope, # we use a for loop to compute the gradient of every element. for i in xrange(tensor_size): if in_place: - set_input(op, inputs, core.CPUPlace()) + set_input(scope, op, inputs, core.CPUPlace()) # get one input element throw it's index i. origin = tensor_to_check.get_float_element(i) @@ -120,7 +122,7 @@ def get_numeric_gradient(scope, y_pos = get_output() if in_place: - set_input(op, inputs, core.CPUPlace()) + set_input(scope, op, inputs, core.CPUPlace()) x_neg = origin - delta tensor_to_check.set_float_element(i, x_neg) @@ -168,7 +170,11 @@ def get_gradient(scope, op, inputs, outputs, grad_name, place, class OpTest(unittest.TestCase): def check_output_with_place(self, place): self.scope = core.Scope() - self.op = create_op(self.scope, self.op_type, self.inputs, self.outputs) + op_inputs = self.inputs if hasattr(self, "inputs") else dict() + op_outputs = self.outputs if hasattr(self, "outputs") else dict() + op_attrs = self.attrs if hasattr(self, "attrs") else dict() + self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs, + op_attrs) if isinstance(place, core.GPUPlace) and not self.op.support_gpu(): return set_input(self.scope, self.op, self.inputs, place) @@ -227,7 +233,11 @@ class OpTest(unittest.TestCase): in_place=False, max_relative_error=0.005): self.scope = core.Scope() - self.op = create_op(self.scope, self.op_type, self.inputs, self.outputs) + op_inputs = self.inputs if hasattr(self, "inputs") else dict() + op_outputs = self.outputs if hasattr(self, "outputs") else dict() + op_attrs = self.attrs if hasattr(self, "attrs") else dict() + self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs, + op_attrs) if no_grad_set is None: no_grad_set = set() diff --git a/python/paddle/v2/framework/tests/test_add_two_op.py b/python/paddle/v2/framework/tests/test_add_two_op.py index a578e74eca..3ca34d9b9f 100644 --- a/python/paddle/v2/framework/tests/test_add_two_op.py +++ b/python/paddle/v2/framework/tests/test_add_two_op.py @@ -1,23 +1,20 @@ import unittest +import numpy as np +from op_test import OpTest -import numpy -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator - -from op_test_util import OpTestMeta - - -class TestAddOp(unittest.TestCase): - __metaclass__ = OpTestMeta +class TestAddOp(OpTest): def setUp(self): - self.type = "add" + self.op_type = "add" self.inputs = { - 'X': numpy.random.random((102, 105)).astype("float32"), - 'Y': numpy.random.random((102, 105)).astype("float32") + 'X': np.random.random((102, 105)).astype("float32"), + 'Y': np.random.random((102, 105)).astype("float32") } self.outputs = {'Out': self.inputs['X'] + self.inputs['Y']} + def test_check_output(self): + self.check_output() + -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_cos_sim_op.py b/python/paddle/v2/framework/tests/test_cos_sim_op.py index 32013a7999..797cbd8cc5 100644 --- a/python/paddle/v2/framework/tests/test_cos_sim_op.py +++ b/python/paddle/v2/framework/tests/test_cos_sim_op.py @@ -1,17 +1,14 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest -class TestCosSimOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestCosSimOp(OpTest): def setUp(self): - self.type = "cos_sim" + self.op_type = "cos_sim" self.inputs = { - 'X': np.random.random((32, 64)).astype("float32"), - 'Y': np.random.random((32, 64)).astype("float32") + 'X': np.random.random((10, 5)).astype("float32"), + 'Y': np.random.random((10, 5)).astype("float32") } expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) @@ -23,38 +20,20 @@ class TestCosSimOp(unittest.TestCase): 'Out': np.expand_dims(expect_out, 1) } + def test_check_output(self): + self.check_output() -class TestCosSimGradOp(GradientChecker): - def setUp(self): - self.op = create_op("cos_sim") - self.inputs = { - 'X': np.random.random((10, 5)).astype("float32"), - 'Y': np.random.random((10, 5)).astype("float32") - } - - def test_cpu_gpu_compare(self): - self.compare_grad(self.op, self.inputs) - - def test_normal(self): - self.check_grad( - self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.05) + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05) - def test_ignore_x(self): + def test_check_grad_ingore_x(self): self.check_grad( - self.op, - self.inputs, ["Y"], - "Out", - max_relative_error=0.05, - no_grad_set={"X"}) + ['Y'], 'Out', max_relative_error=0.05, no_grad_set=set('X')) - def test_ignore_y(self): + def test_check_grad_ignore_y(self): self.check_grad( - self.op, - self.inputs, ["X"], - "Out", - max_relative_error=0.05, - no_grad_set={"Y"}) + ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y')) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_cross_entropy_op.py b/python/paddle/v2/framework/tests/test_cross_entropy_op.py index fb6a440e23..c2fc102a8b 100644 --- a/python/paddle/v2/framework/tests/test_cross_entropy_op.py +++ b/python/paddle/v2/framework/tests/test_cross_entropy_op.py @@ -21,7 +21,7 @@ class TestCrossEntropy(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(["X"], "Y") + self.check_grad(['X'], 'Y') if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py b/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py index e5c862605f..2473daaba2 100644 --- a/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py +++ b/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py @@ -1,16 +1,17 @@ import unittest -from op_test_util import OpTestMeta -import numpy +import numpy as np +from op_test import OpTest -class TestFillZerosLikeOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestFillZerosLikeOp(OpTest): def setUp(self): - self.type = "fill_zeros_like" - self.inputs = {'Src': numpy.random.random((219, 232)).astype("float32")} - self.outputs = {'Dst': numpy.zeros_like(self.inputs['Src'])} + self.op_type = "fill_zeros_like" + self.inputs = {'Src': np.random.random((219, 232)).astype("float32")} + self.outputs = {'Dst': np.zeros_like(self.inputs["Src"])} + + def test_check_output(self): + self.check_output() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_gather_op.py b/python/paddle/v2/framework/tests/test_gather_op.py index e3de3fd0a1..b0ab429ef1 100644 --- a/python/paddle/v2/framework/tests/test_gather_op.py +++ b/python/paddle/v2/framework/tests/test_gather_op.py @@ -1,30 +1,20 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op -import numpy -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator +import numpy as np +from op_test import OpTest -class TestGatherOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestGatherOp(OpTest): def setUp(self): - self.type = "gather" - xnp = numpy.random.random((10, 20)).astype("float32") - self.inputs = { - 'X': xnp, - 'Index': numpy.array([1, 3, 5]).astype("int32") - } - self.outputs = {'Out': self.inputs['X'][self.inputs['Index']]} + self.op_type = "gather" + xnp = np.random.random((10, 20)).astype("float32") + self.inputs = {'X': xnp, 'Index': np.array([1, 3, 5]).astype("int32")} + self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} + def test_check_output(self): + self.check_output() -class TestGatherGradOp(GradientChecker): - def test_gather_grad(self): - op = create_op("gather") - xnp = numpy.random.random((10, 20)).astype("float32") - inputs = {'X': xnp, 'Index': numpy.array([1, 3, 5]).astype("int32")} - self.check_grad(op, inputs, set("X"), "Out") + def test_check_grad(self): + self.check_grad(['X'], 'Out') if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_gaussian_random_op.py b/python/paddle/v2/framework/tests/test_gaussian_random_op.py index f95ed70b58..1f9e4db783 100644 --- a/python/paddle/v2/framework/tests/test_gaussian_random_op.py +++ b/python/paddle/v2/framework/tests/test_gaussian_random_op.py @@ -14,11 +14,11 @@ class GaussianRandomTest(unittest.TestCase): def gaussian_random_test(self, place): scope = core.Scope() - scope.new_var("Out").get_tensor() + scope.new_var('Out').get_tensor() op = Operator( "gaussian_random", - Out="Out", + Out='Out', dims=[1000, 784], mean=.0, std=1., @@ -27,10 +27,10 @@ class GaussianRandomTest(unittest.TestCase): op.infer_shape(scope) context = core.DeviceContext.create(place) op.run(scope, context) - tensor = numpy.array(scope.find_var("Out").get_tensor()) + tensor = numpy.array(scope.find_var('Out').get_tensor()) self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1) self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_gradient_checker.py b/python/paddle/v2/framework/tests/test_gradient_checker.py index e8a7f848df..abeb01cb34 100644 --- a/python/paddle/v2/framework/tests/test_gradient_checker.py +++ b/python/paddle/v2/framework/tests/test_gradient_checker.py @@ -1,42 +1,44 @@ import unittest -import numpy -from paddle.v2.framework.op import Operator -from gradient_checker import GradientChecker -from gradient_checker import get_numeric_gradient +import numpy as np +import paddle.v2.framework.core as core +from op_test import get_numeric_gradient +from op_test import create_op class GetNumericGradientTest(unittest.TestCase): def test_add_op(self): - add_op = Operator("add", X="X", Y="Y", Out="Z") - x = numpy.random.random((10, 1)).astype("float32") - y = numpy.random.random((10, 1)).astype("float32") - - arr = get_numeric_gradient(add_op, {"X": x, "Y": y}, "Z", "X") + x = np.random.random((10, 1)).astype("float32") + y = np.random.random((10, 1)).astype("float32") + z = x + y + scope = core.Scope() + add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict()) + arr = get_numeric_gradient(scope, add_op, {'X': x, 'Y': y}, 'X', 'Out') self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4) def test_softmax_op(self): def stable_softmax(x): """Compute the softmax of vector x in a numerically stable way.""" - shiftx = x - numpy.max(x) - exps = numpy.exp(shiftx) - return exps / numpy.sum(exps) + shiftx = x - np.max(x) + exps = np.exp(shiftx) + return exps / np.sum(exps) def label_softmax_grad(Y, dY): dX = Y * 0.0 for i in range(Y.shape[0]): - d = numpy.dot(Y[i, :], dY[i, :]) + d = np.dot(Y[i, :], dY[i, :]) dX[i, :] = Y[i, :] * (dY[i, :] - d) return dX - softmax_op = Operator("softmax", X="X", Y="Y") - - X = numpy.random.random((2, 2)).astype("float32") - Y = numpy.apply_along_axis(stable_softmax, 1, X) - dY = numpy.ones(Y.shape) + X = np.random.random((2, 2)).astype("float32") + Y = np.apply_along_axis(stable_softmax, 1, X) + dY = np.ones(Y.shape) dX = label_softmax_grad(Y, dY) - arr = get_numeric_gradient(softmax_op, {"X": X}, "Y", "X") - numpy.testing.assert_almost_equal(arr, dX, decimal=1e-2) + scope = core.Scope() + softmax_op = create_op(scope, "softmax", {"X": X}, {"Y": Y}, dict()) + + arr = get_numeric_gradient(scope, softmax_op, {"X": X}, "X", "Y") + np.testing.assert_almost_equal(arr, dX, decimal=1e-2) if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_lookup_table.py b/python/paddle/v2/framework/tests/test_lookup_table.py index 4b7ce92c0f..b259bb67e8 100644 --- a/python/paddle/v2/framework/tests/test_lookup_table.py +++ b/python/paddle/v2/framework/tests/test_lookup_table.py @@ -1,31 +1,22 @@ import unittest import numpy as np -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op +from op_test import OpTest -class TestLookupTableOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestLookupTableOp(OpTest): def setUp(self): - self.type = 'lookup_table' - table = np.random.random((17, 31)).astype('float32') - ids = np.random.randint(0, 17, 4).astype('int32') + self.op_type = "lookup_table" + table = np.random.random((17, 31)).astype("float32") + ids = np.random.randint(0, 17, 4).astype("int32") self.inputs = {'W': table, 'Ids': ids} self.outputs = {'Out': table[ids]} + def test_check_output(self): + self.check_output() -class TestLookupTableGradOp(GradientChecker): - def test_grad(self): - op = create_op('lookup_table') - table = np.random.random((17, 31)).astype('float32') - ids = np.random.randint(0, 17, 4).astype('int32') - inputs = {'W': table, 'Ids': ids} - # comapre gradients - self.compare_grad(op, inputs, set(['Ids'])) - # check gradients - self.check_grad(op, inputs, set('W'), 'Out') + def test_check_grad(self): + self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_mean_op.py b/python/paddle/v2/framework/tests/test_mean_op.py index f32b3160d6..7823abd8f8 100644 --- a/python/paddle/v2/framework/tests/test_mean_op.py +++ b/python/paddle/v2/framework/tests/test_mean_op.py @@ -1,24 +1,20 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op import numpy as np +from op_test import OpTest -class TestMeanOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestMeanOp(OpTest): def setUp(self): - self.type = "mean" - self.inputs = {'X': np.random.random((32, 784)).astype("float32")} - self.outputs = {'Out': np.mean(self.inputs['X'])} + self.op_type = "mean" + self.inputs = {'X': np.random.random((10, 10)).astype("float32")} + self.outputs = {'Out': np.mean(self.inputs["X"])} + def test_check_output(self): + self.check_output() -class MeanGradOpTest(GradientChecker): - def test_normal(self): - op = create_op("mean") - inputs = {"X": np.random.random((10, 10)).astype("float32")} - self.check_grad(op, inputs, set("X"), "Out") + def test_checkout_grad(self): + self.check_grad(['X'], 'Out') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_minus_op.py b/python/paddle/v2/framework/tests/test_minus_op.py index 5abdd4a69b..dea797a1fe 100644 --- a/python/paddle/v2/framework/tests/test_minus_op.py +++ b/python/paddle/v2/framework/tests/test_minus_op.py @@ -1,30 +1,23 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest -class MinusOpTest(unittest.TestCase): - __metaclass__ = OpTestMeta - +class MinusOpTest(OpTest): def setUp(self): - self.type = "minus" + self.op_type = "minus" self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), 'Y': np.random.random((32, 84)).astype("float32") } self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])} + def test_check_output(self): + self.check_output() -class MinusGradTest(GradientChecker): - def test_left(self): - op = create_op("minus") - inputs = { - "X": np.random.random((10, 10)).astype("float32"), - "Y": np.random.random((10, 10)).astype("float32") - } - self.check_grad(op, inputs, ["X", 'Y'], "Out") + def test_check_grad(self): + self.check_grad(['X', 'Y'], 'Out') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_mul_op.py b/python/paddle/v2/framework/tests/test_mul_op.py index 8c827e242e..b3d95a56b8 100644 --- a/python/paddle/v2/framework/tests/test_mul_op.py +++ b/python/paddle/v2/framework/tests/test_mul_op.py @@ -1,27 +1,35 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta -from paddle.v2.framework.op import Operator +from op_test import OpTest -class TestMulOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestMulOp(OpTest): def setUp(self): - self.type = "mul" + self.op_type = "mul" self.inputs = { 'X': np.random.random((32, 84)).astype("float32"), 'Y': np.random.random((84, 100)).astype("float32") } self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) -class TestMulOp2(unittest.TestCase): - __metaclass__ = OpTestMeta + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) + + +class TestMulOp2(OpTest): def setUp(self): - self.type = "mul" + self.op_type = "mul" self.inputs = { 'X': np.random.random((15, 4, 12, 10)).astype("float32"), 'Y': np.random.random((4, 30, 8, 2, 9)).astype("float32") @@ -32,72 +40,20 @@ class TestMulOp2(unittest.TestCase): self.inputs['Y'].reshape(4 * 30, 8 * 2 * 9)) } + def test_check_output(self): + self.check_output() -class TestMulGradOp(GradientChecker): - def setUp(self): - self.op = create_op("mul") - self.inputs = { - 'X': np.random.random((32, 84)).astype("float32"), - 'Y': np.random.random((84, 100)).astype("float32") - } - - def test_cpu_gpu_compare(self): - self.compare_grad(self.op, self.inputs) - - def test_normal(self): - # mul op will enlarge the relative error - self.check_grad( - self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5) - - def test_ignore_x(self): - self.check_grad( - self.op, - self.inputs, ["Y"], - "Out", - max_relative_error=0.5, - no_grad_set={"X"}) - - def test_ignore_y(self): - self.check_grad( - self.op, - self.inputs, ["X"], - "Out", - max_relative_error=0.5, - no_grad_set={"Y"}) - - -class TestMulGradTest2(GradientChecker): - def setUp(self): - self.op = Operator( - "mul", X="X", Y="Y", Out="Out", x_num_col_dims=2, y_num_col_dims=2) - self.inputs = { - "X": np.random.random((15, 4, 12, 10)).astype("float32"), - "Y": np.random.random((4, 30, 8, 2, 9)).astype("float32") - } - - def test_cpu_gpu_compare(self): - self.compare_grad(self.op, self.inputs) - - def test_normal(self): - self.check_grad( - self.op, self.inputs, ["X", "Y"], "Out", max_relative_error=0.5) + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) - def test_ignore_x(self): + def test_check_grad_ingore_x(self): self.check_grad( - self.op, - self.inputs, ["Y"], - "Out", - max_relative_error=0.5, - no_grad_set={"X"}) + ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set('X')) - def test_ignore_y(self): + def test_check_grad_ignore_y(self): self.check_grad( - self.op, - self.inputs, ["X"], - "Out", - max_relative_error=0.5, - no_grad_set={"Y"}) + ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_net.py b/python/paddle/v2/framework/tests/test_net.py index e4b7cd480c..50cfb855f2 100644 --- a/python/paddle/v2/framework/tests/test_net.py +++ b/python/paddle/v2/framework/tests/test_net.py @@ -35,5 +35,5 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]} self.assertEqual(expected, "\n" + str(net)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_rowwise_add_op.py b/python/paddle/v2/framework/tests/test_rowwise_add_op.py index 8378c1cd21..336645bd99 100644 --- a/python/paddle/v2/framework/tests/test_rowwise_add_op.py +++ b/python/paddle/v2/framework/tests/test_rowwise_add_op.py @@ -1,68 +1,51 @@ import unittest import numpy as np -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op +from op_test import OpTest -class TestRowwiseAddOp(unittest.TestCase): - __metaclass__ = OpTestMeta - - def setUp(self): - self.type = "rowwise_add" - self.inputs = { - 'X': np.random.random((32, 84)).astype("float32"), - 'b': np.random.random(84).astype("float32") - } - self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} - - -class TestRowwiseAddOp2(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestRowwiseAddOp(OpTest): def setUp(self): - self.type = "rowwise_add" + self.op_type = "rowwise_add" self.inputs = { - 'X': np.random.random((13, 6, 7, 8)).astype("float32"), - 'b': np.random.random((7, 8)).astype("float32") + 'X': np.random.uniform(0.1, 1, [5, 10]).astype("float32"), + 'b': np.random.uniform(0.1, 1, [10]).astype("float32") } self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} + def test_check_output(self): + self.check_output() -class TestRowwiseAddGradOp(GradientChecker): - def setUp(self): - self.op = create_op("rowwise_add") - self.inputs = { - "X": np.random.uniform(0.1, 1, [5, 10]).astype("float32"), - "b": np.random.uniform(0.1, 1, [10]).astype("float32") - } + def test_check_grad_normal(self): + self.check_grad(['X', 'b'], 'Out') - def test_normal(self): - self.check_grad(self.op, self.inputs, ["X", "b"], "Out") + def test_check_grad_ingore_b(self): + self.check_grad(['X'], 'Out', no_grad_set=set('b')) - def test_ignore_b(self): - self.check_grad(self.op, self.inputs, ["X"], "Out", no_grad_set={"b"}) + def test_check_grad_ingore_x(self): + self.check_grad(['b'], 'Out', no_grad_set=set('X')) - def test_ignore_x(self): - self.check_grad(self.op, self.inputs, ["b"], "Out", no_grad_set={"X"}) - -class TestRowwiseAddGradOp2(GradientChecker): +class TestRowwiseAddOp2(OpTest): def setUp(self): - self.op = create_op("rowwise_add") + self.op_type = "rowwise_add" self.inputs = { - "X": np.random.uniform(0.1, 1, [2, 3, 2, 5]).astype("float32"), - "b": np.random.uniform(0.1, 1, [2, 5]).astype("float32") + 'X': np.random.uniform(0.1, 1, [2, 3, 2, 5]).astype("float32"), + 'b': np.random.uniform(0.1, 1, [2, 5]).astype("float32") } + self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['b'])} + + def test_check_output(self): + self.check_output() - def test_normal(self): - self.check_grad(self.op, self.inputs, ["X", "b"], "Out") + def test_check_grad_normal(self): + self.check_grad(['X', 'b'], 'Out') - def test_ignore_b(self): - self.check_grad(self.op, self.inputs, ["X"], "Out", no_grad_set={"b"}) + def test_check_grad_ignore_b(self): + self.check_grad(['X'], 'Out', no_grad_set=set('b')) - def test_ignore_x(self): - self.check_grad(self.op, self.inputs, ["b"], "Out", no_grad_set={"X"}) + def test_check_grad_ignore_x(self): + self.check_grad(['b'], 'Out', no_grad_set=set('X')) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_scale_and_identity_op.py b/python/paddle/v2/framework/tests/test_scale_and_identity_op.py index 69b301c376..05d76d4282 100644 --- a/python/paddle/v2/framework/tests/test_scale_and_identity_op.py +++ b/python/paddle/v2/framework/tests/test_scale_and_identity_op.py @@ -1,43 +1,34 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op import numpy as np -from paddle.v2.framework.op import Operator +from op_test import OpTest -class IdentityTest(unittest.TestCase): - __metaclass__ = OpTestMeta - +class IdentityTest(OpTest): def setUp(self): - self.type = "identity" - self.inputs = {'X': np.random.random((32, 784)).astype("float32")} + self.op_type = "identity" + self.inputs = {'X': np.random.random((10, 10)).astype("float32")} self.outputs = {'Out': self.inputs['X']} + def test_check_output(self): + self.check_output() -class IdentityGradOpTest(GradientChecker): - def test_normal(self): - op = create_op("identity") - inputs = {"X": np.random.random((10, 10)).astype("float32")} - self.check_grad(op, inputs, set("X"), "Out") - + def test_check_grad(self): + self.check_grad(['X'], 'Out') -class ScaleTest(unittest.TestCase): - __metaclass__ = OpTestMeta +class ScaleTest(OpTest): def setUp(self): - self.type = "scale" - self.inputs = {'X': np.random.random((32, 784)).astype("float32")} + self.op_type = "scale" + self.inputs = {'X': np.random.random((10, 10)).astype("float32")} self.attrs = {'scale': -2.3} self.outputs = {'Out': self.inputs['X'] * self.attrs['scale']} + def test_check_output(self): + self.check_output() -class ScaleGradTest(GradientChecker): - def test_normal(self): - op = Operator("scale", X="X", Out="Out", scale=3.2) - self.check_grad(op, - {"X": np.random.random((10, 10)).astype("float32")}, - set("X"), "Out") + def test_check_grad(self): + self.check_grad(['X'], 'Out') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_scatter_op.py b/python/paddle/v2/framework/tests/test_scatter_op.py index c1f9444889..33c73c5263 100644 --- a/python/paddle/v2/framework/tests/test_scatter_op.py +++ b/python/paddle/v2/framework/tests/test_scatter_op.py @@ -1,37 +1,24 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op -import numpy -import paddle.v2.framework.core as core -from paddle.v2.framework.op import Operator +import numpy as np +from op_test import OpTest -class TestScatterOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestScatterOp(OpTest): def setUp(self): - self.type = "scatter" - ref_np = numpy.ones((3, 3)).astype("float32") - index_np = numpy.array([1, 2]).astype("int32") - updates_np = numpy.random.random((2, 3)).astype("float32") - output_np = numpy.copy(ref_np) + self.op_type = "scatter" + ref_np = np.ones((3, 3)).astype("float32") + index_np = np.array([1, 2]).astype("int32") + updates_np = np.random.random((2, 3)).astype("float32") + output_np = np.copy(ref_np) output_np[index_np] += updates_np self.inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np} self.outputs = {'Out': output_np} + def test_check_output(self): + self.check_output() -class TestScatterGradOp(GradientChecker): - def test_scatter_grad(self): - op = create_op("scatter") - # test data setup - ref_np = numpy.ones((3, 10)).astype("float32") - index_np = numpy.array([1, 2]).astype("int32") - updates_np = numpy.random.random((2, 10)).astype("float32") - output_np = numpy.copy(ref_np) - output_np[index_np] += updates_np - inputs = {'Ref': ref_np, 'Index': index_np, 'Updates': updates_np} - self.check_grad( - op, inputs, set(["Updates", "Ref"]), "Out", in_place=True) + def test_check_grad(self): + self.check_grad(['Updates', 'Ref'], 'Out', in_place=True) if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_sgd_op.py b/python/paddle/v2/framework/tests/test_sgd_op.py index e5f9ef865e..557cf15ace 100644 --- a/python/paddle/v2/framework/tests/test_sgd_op.py +++ b/python/paddle/v2/framework/tests/test_sgd_op.py @@ -1,21 +1,22 @@ import unittest -import numpy -from op_test_util import OpTestMeta +import numpy as np +from op_test import OpTest -class TestSGD(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestSGD(OpTest): def setUp(self): - self.type = "sgd" - w = numpy.random.random((102, 105)).astype("float32") - g = numpy.random.random((102, 105)).astype("float32") + self.op_type = "sgd" + w = np.random.random((102, 105)).astype("float32") + g = np.random.random((102, 105)).astype("float32") lr = 0.1 self.inputs = {'param': w, 'grad': g} self.attrs = {'learning_rate': lr} self.outputs = {'param_out': w - lr * g} + def test_check_output(self): + self.check_output() + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_softmax_op.py b/python/paddle/v2/framework/tests/test_softmax_op.py index 0d590fa706..1c5802dfd5 100644 --- a/python/paddle/v2/framework/tests/test_softmax_op.py +++ b/python/paddle/v2/framework/tests/test_softmax_op.py @@ -1,9 +1,6 @@ import unittest - import numpy as np - -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest def stable_softmax(x): @@ -13,26 +10,21 @@ def stable_softmax(x): return exps / np.sum(exps) -class TestSoftmaxOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestSoftmaxOp(OpTest): def setUp(self): self.type = "softmax" - self.inputs = {"X": np.random.random((10, 10)).astype("float32")} + self.inputs = { + 'X': np.random.uniform(0.1, 1, [10, 10]).astype("float32") + } self.outputs = { - "Y": np.apply_along_axis(stable_softmax, 1, self.inputs["X"]) + 'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X']) } + def test_check_output(self): + self.check_output() -class TestSoftmaxGradOp(GradientChecker): - def setUp(self): - self.op = create_op("softmax") - self.inputs = { - "X": np.random.uniform(0.1, 1, [10, 10]).astype("float32") - } - - def test_softmax_grad(self): - self.check_grad(self.op, self.inputs, ["X"], "Y") + def test_check_grad(self): + self.check_grad(['X'], 'Y') if __name__ == "__main__": diff --git a/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py b/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py index 2bcdf37df4..dc6ebf5d30 100644 --- a/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py +++ b/python/paddle/v2/framework/tests/test_squared_l2_distance_op.py @@ -1,17 +1,14 @@ import unittest -from op_test_util import OpTestMeta -from gradient_checker import GradientChecker, create_op import numpy as np +from op_test import OpTest -class TestSquaredL2DistanceOp_f0(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestSquaredL2DistanceOp_f0(OpTest): def setUp(self): - self.type = 'squared_l2_distance' + self.op_type = "squared_l2_distance" self.inputs = { - 'X': np.random.uniform(0.1, 1., (32, 64)).astype('float32'), - 'Y': np.random.uniform(0.1, 1., (32, 64)).astype('float32') + 'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"), + 'Y': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32") } sub_res = self.inputs['X'] - self.inputs['Y'] output = sub_res * sub_res @@ -20,15 +17,19 @@ class TestSquaredL2DistanceOp_f0(unittest.TestCase): 'Out': np.expand_dims(output.sum(1), 1) } + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X', 'Y'], 'Out') -class TestSquaredL2DistanceOp_f1(unittest.TestCase): - __metaclass__ = OpTestMeta +class TestSquaredL2DistanceOp_f1(OpTest): def setUp(self): - self.type = 'squared_l2_distance' + self.op_type = "squared_l2_distance" self.inputs = { - 'X': np.random.uniform(0.1, 1., (32, 64)).astype('float32'), - 'Y': np.random.uniform(0.1, 1., (1, 64)).astype('float32') + 'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"), + 'Y': np.random.uniform(0.1, 0.6, (1, 3)).astype("float32") } sub_res = self.inputs['X'] - self.inputs['Y'] output = sub_res * sub_res @@ -37,53 +38,34 @@ class TestSquaredL2DistanceOp_f1(unittest.TestCase): 'Out': np.expand_dims(output.sum(1), 1) } + def test_check_output(self): + self.check_output() -class TestSquaredL2DistanceOp_f2(unittest.TestCase): - __metaclass__ = OpTestMeta + def test_check_grad(self): + self.check_grad(['X', 'Y'], 'Out') + +class TestSquaredL2DistanceOp_f2(OpTest): def setUp(self): - self.type = 'squared_l2_distance' + self.op_type = "squared_l2_distance" self.inputs = { - 'X': np.random.uniform(0.1, 1., (32, 64, 128)).astype('float32'), - 'Y': np.random.uniform(0.1, 1., (1, 64, 128)).astype('float32') + 'X': np.random.uniform(0.1, 0.6, (2, 3, 4)).astype("float32"), + 'Y': np.random.uniform(0.1, 0.6, (1, 3, 4)).astype("float32") } sub_res = self.inputs['X'] - self.inputs['Y'] - sub_res = sub_res.reshape((32, 64 * 128)) + sub_res = sub_res.reshape((2, 3 * 4)) output = sub_res * sub_res self.outputs = { 'sub_result': sub_res, 'Out': np.expand_dims(output.sum(1), 1) } + def test_check_output(self): + self.check_output() -class TestSquaredL2DistanceGradOp(GradientChecker): - def test_squared_l2_distance_b0(self): - op = create_op("squared_l2_distance") - inputs = { - 'X': np.random.uniform(0.1, .6, (2, 3)).astype('float32'), - 'Y': np.random.uniform(0.1, .6, (2, 3)).astype('float32') - } - self.compare_grad(op, inputs) - self.check_grad(op, inputs, set(["X", "Y"]), "Out") - - def test_squared_l2_distance_b1(self): - op = create_op("squared_l2_distance") - inputs = { - 'X': np.random.uniform(0.1, .6, (2, 3)).astype('float32'), - 'Y': np.random.uniform(0.1, .6, (1, 3)).astype('float32') - } - self.compare_grad(op, inputs) - self.check_grad(op, inputs, set(["X", "Y"]), "Out") - - def test_squared_l2_distance_b2(self): - op = create_op("squared_l2_distance") - inputs = { - 'X': np.random.uniform(0.1, .6, (2, 3, 4)).astype('float32'), - 'Y': np.random.uniform(0.1, .6, (1, 3, 4)).astype('float32') - } - self.compare_grad(op, inputs) - self.check_grad(op, inputs, set(["X", "Y"]), "Out") + def test_check_grad(self): + self.check_grad(['X', 'Y'], 'Out') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_sum_op.py b/python/paddle/v2/framework/tests/test_sum_op.py index 66417d70e8..f8eb34c076 100644 --- a/python/paddle/v2/framework/tests/test_sum_op.py +++ b/python/paddle/v2/framework/tests/test_sum_op.py @@ -6,10 +6,10 @@ from op_test import OpTest class TestSumOp(OpTest): def setUp(self): self.op_type = "sum" - x0 = np.random.random((3, 4)).astype('float32') - x1 = np.random.random((3, 4)).astype('float32') - x2 = np.random.random((3, 4)).astype('float32') - self.inputs = {"X": {"x0": x0, "x1": x1, "x2": x2}} + x0 = np.random.random((3, 4)).astype("float32") + x1 = np.random.random((3, 4)).astype("float32") + x2 = np.random.random((3, 4)).astype("float32") + self.inputs = {'X': {'x0': x0, 'x1': x1, 'x2': x2}} y = x0 + x1 + x2 self.outputs = {'Out': y} @@ -17,8 +17,8 @@ class TestSumOp(OpTest): self.check_output() def test_check_grad(self): - self.check_grad(["x0"], "Out") + self.check_grad(['x0'], 'Out') -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_top_k_op.py b/python/paddle/v2/framework/tests/test_top_k_op.py index e841d96d26..cab799256d 100644 --- a/python/paddle/v2/framework/tests/test_top_k_op.py +++ b/python/paddle/v2/framework/tests/test_top_k_op.py @@ -1,14 +1,11 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest -class TestTopkOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestTopkOp(OpTest): def setUp(self): - self.type = "top_k" + self.op_type = "top_k" k = 1 input = np.random.random((32, 84)).astype("float32") output = np.ndarray((32, k)) @@ -25,11 +22,9 @@ class TestTopkOp(unittest.TestCase): self.outputs = {'Out': output, 'Indices': indices} -class TestTopkOp3d(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestTopkOp3d(OpTest): def setUp(self): - self.type = "top_k" + self.op_type = "top_k" k = 1 input = np.random.random((32, 2, 84)).astype("float32") input_flat_2d = input.reshape(64, 84) @@ -48,5 +43,5 @@ class TestTopkOp3d(unittest.TestCase): self.outputs = {'Out': output, 'Indices': indices} -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/python/paddle/v2/framework/tests/test_uniform_random_op.py b/python/paddle/v2/framework/tests/test_uniform_random_op.py index c3d2bb44da..76a5e36e56 100644 --- a/python/paddle/v2/framework/tests/test_uniform_random_op.py +++ b/python/paddle/v2/framework/tests/test_uniform_random_op.py @@ -14,11 +14,11 @@ class UniformRandomTest(unittest.TestCase): def uniform_random_test(self, place): scope = core.Scope() - scope.new_var("X").get_tensor() + scope.new_var('X').get_tensor() op = Operator( "uniform_random", - Out="X", + Out='X', dims=[1000, 784], min=-5.0, max=10.0, @@ -27,9 +27,9 @@ class UniformRandomTest(unittest.TestCase): op.infer_shape(scope) ctx = core.DeviceContext.create(place) op.run(scope, ctx) - tensor = numpy.array(scope.find_var("X").get_tensor()) + tensor = numpy.array(scope.find_var('X').get_tensor()) self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() From 7ae72f752d1dcf3a818b9e9a3bef001fa8344b8e Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Mon, 11 Sep 2017 01:09:20 -0700 Subject: [PATCH 25/69] remove unused code in test --- python/paddle/v2/framework/tests/test_reshape_op.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/python/paddle/v2/framework/tests/test_reshape_op.py b/python/paddle/v2/framework/tests/test_reshape_op.py index df7d913ba4..50653f58ee 100644 --- a/python/paddle/v2/framework/tests/test_reshape_op.py +++ b/python/paddle/v2/framework/tests/test_reshape_op.py @@ -15,13 +15,6 @@ class TestReshapeOp(unittest.TestCase): class TestReshapeGradOp(GradientChecker): - """ - def test_normal(self): - op = Operator("reshape", X='X', Out='Out', shape=[5, 40]) - inputs = {"X": np.random.random((10, 20)).astype("float32")} - self.check_grad(op, inputs, set("X"), "Out") - """ - def setUp(self): self.op = Operator("reshape", X='X', Out='Out', shape=[5, 40]) self.inputs = {"X": np.random.random((10, 20)).astype("float32")} From 94ea8ee0e5e9f079cffb87f756d3274f522066e9 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 11 Sep 2017 16:23:20 +0800 Subject: [PATCH 26/69] refine MKLDNNLayer logical: move forward and backward to MKLDNNLayer and remove copyOutputInfoToOtherDevice --- paddle/gserver/layers/MKLDNNFcLayer.cpp | 81 ++-------- paddle/gserver/layers/MKLDNNFcLayer.h | 32 +--- paddle/gserver/layers/MKLDNNLayer.h | 190 ++++++++++++++++++++---- 3 files changed, 178 insertions(+), 125 deletions(-) diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp index 53433cef35..a47967b3d3 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp @@ -14,7 +14,6 @@ limitations under the License. */ #include "MKLDNNFcLayer.h" #include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" using namespace mkldnn; // NOLINT typedef memory::format format; @@ -40,6 +39,8 @@ bool MKLDNNFcLayer::init(const LayerMap& layerMap, oc_ = getSize(); oh_ = 1; ow_ = 1; + ih_ = 1; + iw_ = 1; // input size can not change in FC iLayerSize_ = inputLayers_[0]->getSize(); @@ -78,36 +79,17 @@ void MKLDNNFcLayer::convertWeightsToPaddle() { } void MKLDNNFcLayer::reshape() { - const Argument& input = getInput(0, getPrev(0)->getDeviceId()); - int batchSize = input.getBatchSize(); - if (bs_ == batchSize) { - return; - } - bs_ = batchSize; - ih_ = input.getFrameHeight(); - iw_ = input.getFrameWidth(); - if (ih_ == 0) { - ih_ = 1; - } - if (iw_ == 0) { - iw_ = 1; - } + reshapeInput(); + CHECK_EQ(iLayerSize_, inputLayers_[0]->getSize()); ic_ = iLayerSize_ / (ih_ * iw_); CHECK_EQ(size_t(ic_ * ih_ * iw_), iLayerSize_) << "not divisible"; CHECK_EQ(size_t(oc_), getSize()); - printSizeInfo(); - // reset output - output_.setFrameHeight(oh_); - output_.setFrameWidth(ow_); - resetOutput(bs_, oc_); + reshapeOutput(oh_, ow_); + resizeOutput(bs_, oc_); - // reset mkldnn forward - resetFwd(); - needResetBwd_ = true; - - convertWeightsFromPaddle(); + printSizeInfo(); } void MKLDNNFcLayer::resetFwd() { @@ -137,7 +119,6 @@ void MKLDNNFcLayer::resetFwd() { // change original output value to mkldnn output value output_.value = std::dynamic_pointer_cast(outVal_); if (!outputIsOnlyMKLDNN()) { - copyOutputInfoToOtherDevice(); // fc cpu output value do not need create convert // just share point getOutput(CPU_DEVICE).value->setData(output_.value->getData()); @@ -243,51 +224,13 @@ void MKLDNNFcLayer::resetBwd() { } void MKLDNNFcLayer::updateInputData() { - if (inputLayers_[0]->getType() != "data") { - return; - } - real* iData = getInputValue(0, CPU_DEVICE)->getData(); - inVal_->setData(iData); -} - -void MKLDNNFcLayer::forward(PassType passType) { - Layer::forward(passType); - reshape(); - - { - REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str()); - updateInputData(); - - // just submit forward pipeline - stream_->submit(pipelineFwd_); - } - - /* activation */ { - REGISTER_TIMER_INFO("FwActTimer", getName().c_str()); - forwardActivation(); - } + inVal_->setData(getInputValue(0, CPU_DEVICE)->getData()); } -void MKLDNNFcLayer::backward(const UpdateCallback& callback) { - /* Do derivation */ { - REGISTER_TIMER_INFO("BpActTimer", getName().c_str()); - backwardActivation(); - } - - { - REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str()); - resetBwd(); - - // just sumbmit backward pipeline - stream_->submit(pipelineBwd_); - } - - { - REGISTER_TIMER_INFO("WeightUpdate", getName().c_str()); - weight_->getParameterPtr()->incUpdate(callback); - if (biases_ && biases_->getWGrad()) { - biases_->getParameterPtr()->incUpdate(callback); - } +void MKLDNNFcLayer::updateWeights(const UpdateCallback& callback) { + weight_->getParameterPtr()->incUpdate(callback); + if (biases_ && biases_->getWGrad()) { + biases_->getParameterPtr()->incUpdate(callback); } } } // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNFcLayer.h b/paddle/gserver/layers/MKLDNNFcLayer.h index 4ad67a16e0..add8ac9991 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.h +++ b/paddle/gserver/layers/MKLDNNFcLayer.h @@ -45,35 +45,19 @@ public: bool init(const LayerMap& layerMap, const ParameterMap& parameterMap) override; - void convertWeightsFromPaddle() override; - - void convertWeightsToPaddle() override; + void reshape() override; - void forward(PassType passType) override; + void resetFwd() override; - void backward(const UpdateCallback& callback) override; + void resetBwd() override; void updateInputData() override; -protected: - /** - * reshape the input image sizes - * and reset output buffer size - * and reset mkldnn forward - */ - void reshape(); - - /** - * reset the forward primitve and memory - * only would be called when input size changes - */ - void resetFwd(); - - /** - * reset the backward primitve and memory for mkldnn fc - * only would be called when needed - */ - void resetBwd(); + void updateWeights(const UpdateCallback& callback) override; + + void convertWeightsFromPaddle() override; + + void convertWeightsToPaddle() override; }; } // namespace paddle diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/gserver/layers/MKLDNNLayer.h index 543364edce..c10f2fec2f 100644 --- a/paddle/gserver/layers/MKLDNNLayer.h +++ b/paddle/gserver/layers/MKLDNNLayer.h @@ -19,6 +19,7 @@ limitations under the License. */ #include "MKLDNNBase.h" #include "mkldnn.hpp" #include "paddle/math/MKLDNNMatrix.h" +#include "paddle/utils/Stat.h" DECLARE_bool(use_mkldnn); @@ -33,6 +34,8 @@ typedef std::shared_ptr MKLDNNLayerPtr; */ class MKLDNNLayer : public Layer { protected: + // input value element count + size_t inputElemenCnt_; // batch size int bs_; // input image channel, height and width @@ -52,7 +55,7 @@ protected: std::vector pipelineFwd_; std::vector pipelineBwd_; - // MKLDNNMatrixPtr + // MKLDNNMatrixPtr with internal format MKLDNNMatrixPtr inVal_; MKLDNNMatrixPtr inGrad_; MKLDNNMatrixPtr outVal_; @@ -65,6 +68,7 @@ protected: public: explicit MKLDNNLayer(const LayerConfig& config) : Layer(config), + inputElemenCnt_(0), bs_(0), ic_(0), ih_(0), @@ -95,12 +99,93 @@ public: if (!Layer::init(layerMap, parameterMap)) { return false; } + checkCPUOutputsNumber(); stream_.reset(new MKLDNNStream()); engine_ = CPUEngine::Instance().getEngine(); return true; } + void forward(PassType passType) override { + passType_ = passType; + + { + REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str()); + copySeqInfoToOutputs(); + CHECK(!inputLayers_.empty()); + size_t elemenCnt = inputLayers_[0]->getOutput().value->getElementCnt(); + if (inputElemenCnt_ != elemenCnt) { + inputElemenCnt_ = elemenCnt; + reshape(); + resetFwd(); + convertWeightsFromPaddle(); + needResetBwd_ = true; + } + + if (inputLayers_[0]->getType() == "data") { + updateInputData(); + } + + stream_->submit(pipelineFwd_); + } + + /* activation */ { + REGISTER_TIMER_INFO("FwActTimer", getName().c_str()); + forwardActivation(); + } + } + + void backward(const UpdateCallback& callback) override { + /* Do derivation */ { + REGISTER_TIMER_INFO("BpActTimer", getName().c_str()); + backwardActivation(); + } + + { + REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str()); + if (needResetBwd_) { + resetBwd(); + needResetBwd_ = false; + } + + stream_->submit(pipelineBwd_); + } + + { + REGISTER_TIMER_INFO("WeightUpdate", getName().c_str()); + updateWeights(callback); + } + } + + /** + * reshape the input image sizes + * and reset output image and buffer size + */ + virtual void reshape() = 0; + + /** + * reset the mkldnn forward primitve and memory + * only would be called when input size changes + */ + virtual void resetFwd() = 0; + + /** + * reset the mkldnn backward primitve and memory for mkldnn fc + * only would be called when needed + */ + virtual void resetBwd() = 0; + + /** + * Update input value data when input layer is "data" type. + * Since the input value data address might be changed. + */ + virtual void updateInputData() {} + + /** + * Update weights and biases if necessary. + */ + virtual void updateWeights(const UpdateCallback& callback) {} + /** * convert weight from paddle format to mkldnn format * weight_ will be override @@ -114,10 +199,38 @@ public: virtual void convertWeightsToPaddle() {} /** - * Update input value data when input layer is "data" type. - * Since the input value data address might be changed. + * add this interface as public for unit test */ - virtual void updateInputData() {} + void addOutputArgument(int deviceId) { Layer::addOutputArgument(deviceId); } + +protected: + /** + * reshape the input image sizes and input batchsize + */ + virtual void reshapeInput() { + const Argument& input = inputLayers_[0]->getOutput(); + bs_ = input.getBatchSize(); + int height = input.getFrameHeight(); + int width = input.getFrameWidth(); + if (height != 0) { + ih_ = height; + } + if (width != 0) { + iw_ = width; + } + } + + /** + * reshape output image sizes + */ + virtual void reshapeOutput(size_t height, size_t width) { + output_.setFrameHeight(height); + output_.setFrameWidth(width); + for (size_t i = 0; i < outputOtherDevice_.size(); i++) { + outputOtherDevice_[i].setFrameHeight(height); + outputOtherDevice_[i].setFrameWidth(width); + } + } /** * print info about sizes @@ -133,8 +246,8 @@ public: */ virtual void printValueFormatFlow() { if (inVal_ && outVal_) { - VLOG(MKLDNN_FMTS) << "value format flow --- " << inVal_->getFormat() - << " >>> " << outVal_->getFormat(); + VLOG(MKLDNN_FMTS) << inVal_->getFormat() << " >>> " + << outVal_->getFormat(); } } @@ -143,36 +256,12 @@ public: */ virtual void printGradFormatFlow() { if (inGrad_ && outGrad_) { - VLOG(MKLDNN_FMTS) << "grad format flow --- " << inGrad_->getFormat() - << " <<< " << outGrad_->getFormat(); + VLOG(MKLDNN_FMTS) << inGrad_->getFormat() << " <<< " + << outGrad_->getFormat(); } } protected: - /** - * copy image size and sequence info to other device - * @note: can not directly use Layer::copyOutputToOtherDevice since here only - * copy base info and do not copy data value - */ - void copyOutputInfoToOtherDevice() { - int cnt = 0; - for (size_t i = 0; i < outputOtherDevice_.size(); i++) { - outputOtherDevice_[i].setFrameHeight(output_.getFrameHeight()); - outputOtherDevice_[i].setFrameWidth(output_.getFrameWidth()); - outputOtherDevice_[i].sequenceStartPositions = - output_.sequenceStartPositions; - outputOtherDevice_[i].subSequenceStartPositions = - output_.subSequenceStartPositions; - outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims; - if (outputOtherDevice_[i].deviceId == CPU_DEVICE) { - ++cnt; - } - } - if (cnt > 1) { - LOG(WARNING) << "should not have more than one CPU devie"; - } - } - /** * If input only has MKLDNN device. * Otherwise, only support the previous layer using CPU device. @@ -205,6 +294,7 @@ protected: */ void setDevice(int id) { deviceId_ = id; } +private: /** * Set deviceId of the params used in this layer. */ @@ -228,6 +318,42 @@ protected: parameter->setDevice(id); } } + + /** + * Check the cpu device number of outputOtherDevice_. + * should have only one at most. + */ + void checkCPUOutputsNumber(int max = 1) { + int cnt = 0; + for (size_t i = 0; i < outputOtherDevice_.size(); i++) { + if (outputOtherDevice_[i].deviceId == CPU_DEVICE) { + ++cnt; + } + } + CHECK_LE(cnt, max) << "too much CPU devies"; + } + + /** + * copy SeqInfo from input layer to this output and other output devices. + * @note: do not use getInput(0) since it used this deviceId_, + * use "inputLayers_[0]->getOutput()" instead. + */ + void copySeqInfoToOutputs() { + if (inputLayers_.empty() || !needSequenceInfo_) { + return; + } + const Argument& input = inputLayers_[0]->getOutput(); + output_.sequenceStartPositions = input.sequenceStartPositions; + output_.subSequenceStartPositions = input.subSequenceStartPositions; + output_.cpuSequenceDims = input.cpuSequenceDims; + for (size_t i = 0; i < outputOtherDevice_.size(); i++) { + outputOtherDevice_[i].sequenceStartPositions = + output_.sequenceStartPositions; + outputOtherDevice_[i].subSequenceStartPositions = + output_.subSequenceStartPositions; + outputOtherDevice_[i].cpuSequenceDims = output_.cpuSequenceDims; + } + } }; } // namespace paddle From f3bb7b99dddf98b1217e6d906ccbe069e2e1e309 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 11 Sep 2017 16:24:58 +0800 Subject: [PATCH 27/69] refine MKLDNNTester add UpdateCallback for test --- paddle/gserver/tests/MKLDNNTester.cpp | 77 +++++++++++++++------------ paddle/gserver/tests/MKLDNNTester.h | 12 +++-- 2 files changed, 51 insertions(+), 38 deletions(-) diff --git a/paddle/gserver/tests/MKLDNNTester.cpp b/paddle/gserver/tests/MKLDNNTester.cpp index de1635be2a..11e8527910 100644 --- a/paddle/gserver/tests/MKLDNNTester.cpp +++ b/paddle/gserver/tests/MKLDNNTester.cpp @@ -63,8 +63,12 @@ void MKLDNNTester::reset(const TestConfig& dnn, initTestLayer( configs_[i], &(layerMaps_[i]), &(parameters_[i]), &(testLayers_[i])); } - dnnLayer_ = testLayers_[DNN]; refLayer_ = testLayers_[REF]; + dnnLayer_ = std::dynamic_pointer_cast(testLayers_[DNN]); + CHECK(dnnLayer_); + // for comparison with Paddle reference results, + // need manually add cpu device output for test + dnnLayer_->addOutputArgument(-1); EXPECT_EQ(dataLayers_[DNN].size(), dataLayers_[REF].size()); EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size()); @@ -109,20 +113,21 @@ void MKLDNNTester::randomBotDatas() { void MKLDNNTester::randomTopDiffs() { refLayer_->getOutputGrad()->randomizeUniform(); - dnnLayer_->getOutputGrad()->copyFrom(*(refLayer_->getOutputGrad())); - VLOG(lvl_) << "Random dom Backward Input, TopDiff: "; + dnnLayer_->getOutput(-1).grad->copyFrom(*(refLayer_->getOutputGrad())); + VLOG(lvl_) << "Random Backward Input, TopDiff: "; printMatrix(refLayer_->getOutputGrad()); } void MKLDNNTester::checkForward() { - printTopDatas(); - double delta = compareMatrix(testLayers_[DNN]->getOutputValue(), - testLayers_[REF]->getOutputValue()); VLOG(MKLDNN_ALL) << "Check Forward"; + printTopDatas(); + double delta = compareMatrix(dnnLayer_->getOutput(-1).value, + refLayer_->getOutputValue()); EXPECT_LE(fabs(delta), eps_); } void MKLDNNTester::checkBackwardData() { + VLOG(MKLDNN_ALL) << "Check Backward Data"; // TODO(TJ): uncomment me when batch norm ready // const bool isBN = dnnLayer_->getType() == "mkldnn_batch_norm"; for (size_t i = 0; i < dataLayers_[DNN].size(); ++i) { @@ -144,14 +149,12 @@ void MKLDNNTester::checkBackwardData() { } void MKLDNNTester::checkBackwardWgts() { + VLOG(MKLDNN_ALL) << "Check Backward Weight"; CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size()); vector dnnWgts; // used to temply save mkldnn weights saveWgt(parameters_[DNN], dnnWgts); - const MKLDNNLayerPtr dnnlayer = - std::dynamic_pointer_cast(dnnLayer_); - CHECK(dnnlayer); - dnnlayer->convertWeightsToPaddle(); + dnnLayer_->convertWeightsToPaddle(); for (size_t i = 0; i < parameters_[DNN].size(); ++i) { const VectorPtr& dnn = parameters_[DNN][i]->getBuf(PARAMETER_VALUE); const VectorPtr& ref = parameters_[REF][i]->getBuf(PARAMETER_VALUE); @@ -189,38 +192,38 @@ void MKLDNNTester::restoreWgt(const vector& from, } // clear parameters grad -void MKLDNNTester::clearWgtDiffs() { +void MKLDNNTester::clearWgtDiffs(size_t id) { + CHECK_LE(id, parameters_.size()); for (size_t n = 0; n < parameters_.size(); ++n) { - for (size_t i = 0; i < parameters_[n].size(); ++i) { - const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT); - if (grad) { - grad->zeroMem(); + if (id == n || id == parameters_.size()) { + for (size_t i = 0; i < parameters_[n].size(); ++i) { + const VectorPtr& grad = parameters_[n][i]->getBuf(PARAMETER_GRADIENT); + if (grad) { + grad->zeroMem(); + } } } } } -void MKLDNNTester::clearBotDiffs() { - // dnn and ref +void MKLDNNTester::clearBotDiffs(size_t id) { + CHECK_LE(id, dataLayers_.size()); for (size_t n = 0; n < dataLayers_.size(); ++n) { - // all inputs layers - for (size_t i = 0; i < dataLayers_[n].size(); ++i) { - dataLayers_[n][i]->getOutputGrad()->zeroMem(); + if (id == n || id == dataLayers_.size()) { + // clear inputs layers of this specific layer + for (size_t i = 0; i < dataLayers_[n].size(); ++i) { + dataLayers_[n][i]->getOutputGrad()->zeroMem(); + } } } } -void MKLDNNTester::clearBotDiffs(int n) { - CHECK_LT(n, NUM); - // all inputs layers - for (size_t i = 0; i < dataLayers_[n].size(); ++i) { - dataLayers_[n][i]->getOutputGrad()->zeroMem(); - } -} - -void MKLDNNTester::clearTopDatas() { +void MKLDNNTester::clearTopDatas(size_t id) { + CHECK_LE(id, testLayers_.size()); for (size_t i = 0; i < testLayers_.size(); ++i) { - testLayers_[i]->getOutputValue()->zeroMem(); + if (id == i || id == testLayers_.size()) { + testLayers_[i]->getOutputValue()->zeroMem(); + } } } @@ -300,16 +303,24 @@ void MKLDNNTester::runOnce() { checkForward(); // test backward + // simple updater + UpdateCallback updateCallback = [](Parameter* para) { + auto& grad = para->getBuf(PARAMETER_GRADIENT); + auto& value = para->getBuf(PARAMETER_VALUE); + real lr = 1e-3; + value->add(*grad, lr); + }; randomTopDiffs(); - dnnLayer_->backward(nullptr); - refLayer_->backward(nullptr); + dnnLayer_->backward(updateCallback); + refLayer_->backward(updateCallback); checkBackwardData(); checkBackwardWgts(); // clear buffers // ref code will addto the diff, dnn code will writeto it - // and clearTopDatas() and clearWgtDiffs() should be coverd by test layers + // and clearTopDatas(REF) should be coverd by ref layers clearBotDiffs(REF); + clearWgtDiffs(REF); } void MKLDNNTester::run(const TestConfig& dnn, diff --git a/paddle/gserver/tests/MKLDNNTester.h b/paddle/gserver/tests/MKLDNNTester.h index e55e4493ff..5ac885638c 100644 --- a/paddle/gserver/tests/MKLDNNTester.h +++ b/paddle/gserver/tests/MKLDNNTester.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "LayerGradUtil.h" #include "paddle/gserver/layers/MKLDNNBase.h" +#include "paddle/gserver/layers/MKLDNNLayer.h" namespace paddle { @@ -40,7 +41,8 @@ protected: vector layerMaps_; vector> parameters_; vector testLayers_; - LayerPtr dnnLayer_, refLayer_; + LayerPtr refLayer_; + MKLDNNLayerPtr dnnLayer_; /// run some iterations, all the result should pass size_t iter_; @@ -88,10 +90,10 @@ private: void checkBackwardData(); void checkBackwardWgts(); - void clearWgtDiffs(); - void clearBotDiffs(); - void clearBotDiffs(int n); // clear specific layer - void clearTopDatas(); + // clear specific layer, clear all when id equals NUM + void clearWgtDiffs(size_t id = NUM); + void clearBotDiffs(size_t id = NUM); + void clearTopDatas(size_t id = NUM); void printTopDatas(); void printMatrix(const MatrixPtr& m); From 9d46f443fede2edba2e8041b2b30d9513852820b Mon Sep 17 00:00:00 2001 From: qijun Date: Mon, 11 Sep 2017 16:43:17 +0800 Subject: [PATCH 28/69] fix attr bug in op_test and ensure order in duplicate inputs/outputs --- python/paddle/v2/framework/tests/op_test.py | 34 ++++++++++++------- .../paddle/v2/framework/tests/test_sum_op.py | 2 +- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 3a6a5dca4c..1daa6fa277 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -9,7 +9,7 @@ def grad_var_name(var_name): return var_name + "@GRAD" -def create_op(scope, op_type, inputs, outputs, attrs=None): +def create_op(scope, op_type, inputs, outputs, attrs): kwargs = dict() for in_name, in_dup in Operator.get_op_inputs(op_type): @@ -17,7 +17,7 @@ def create_op(scope, op_type, inputs, outputs, attrs=None): kwargs[in_name] = [] if in_dup: sub_in = inputs[in_name] - for sub_in_name in sub_in: + for sub_in_name, arr in sub_in: var = scope.new_var(sub_in_name) kwargs[in_name].append(sub_in_name) else: @@ -29,15 +29,16 @@ def create_op(scope, op_type, inputs, outputs, attrs=None): kwargs[out_name] = [] if out_dup: sub_in = outputs[out_name] - for sun_in_name in sub_in: - var = scope.new_var(sun_in_name) - kwargs[out_name].append(sun_in_name) + for sub_in_name, arr in sub_in: + var = scope.new_var(sub_in_name) + kwargs[out_name].append(sub_in_name) else: var = scope.new_var(out_name) kwargs[out_name].append(out_name) for attr_name in Operator.get_op_attr_names(op_type): - kwargs[attr_name] = attrs[attr_name] + if attr_name in attrs: + kwargs[attr_name] = attrs[attr_name] return Operator(op_type, **kwargs) @@ -46,10 +47,9 @@ def set_input(scope, op, inputs, place): if in_name in inputs: if in_dup: sub_in = inputs[in_name] - for sub_in_name in sub_in: + for sub_in_name, arr in sub_in: var = scope.find_var(sub_in_name) tensor = var.get_tensor() - arr = sub_in[sub_in_name] tensor.set_dims(arr.shape) tensor.set(arr, place) else: @@ -65,7 +65,7 @@ def set_output_grad(scope, op, outputs, place): if out_name in outputs: if out_dup: sub_out = outputs[out_name] - for sub_out_name in sub_out: + for sub_out_name, arr in sub_out: out_tensor = scope.find_var(sub_out_name).get_tensor() grad_tensor = scope.new_var(grad_var_name( sub_out_name)).get_tensor() @@ -110,7 +110,7 @@ def get_numeric_gradient(scope, # we use a for loop to compute the gradient of every element. for i in xrange(tensor_size): if in_place: - set_input(op, inputs, core.CPUPlace()) + set_input(scope, op, inputs, core.CPUPlace()) # get one input element throw it's index i. origin = tensor_to_check.get_float_element(i) @@ -120,7 +120,7 @@ def get_numeric_gradient(scope, y_pos = get_output() if in_place: - set_input(op, inputs, core.CPUPlace()) + set_input(scope, op, inputs, core.CPUPlace()) x_neg = origin - delta tensor_to_check.set_float_element(i, x_neg) @@ -168,7 +168,11 @@ def get_gradient(scope, op, inputs, outputs, grad_name, place, class OpTest(unittest.TestCase): def check_output_with_place(self, place): self.scope = core.Scope() - self.op = create_op(self.scope, self.op_type, self.inputs, self.outputs) + op_inputs = self.inputs if hasattr(self, "inputs") else dict() + op_outputs = self.outputs if hasattr(self, "outputs") else dict() + op_attrs = self.attrs if hasattr(self, "attrs") else dict() + self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs, + op_attrs) if isinstance(place, core.GPUPlace) and not self.op.support_gpu(): return set_input(self.scope, self.op, self.inputs, place) @@ -227,7 +231,11 @@ class OpTest(unittest.TestCase): in_place=False, max_relative_error=0.005): self.scope = core.Scope() - self.op = create_op(self.scope, self.op_type, self.inputs, self.outputs) + op_inputs = self.inputs if hasattr(self, "inputs") else dict() + op_outputs = self.outputs if hasattr(self, "outputs") else dict() + op_attrs = self.attrs if hasattr(self, "attrs") else dict() + self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs, + op_attrs) if no_grad_set is None: no_grad_set = set() diff --git a/python/paddle/v2/framework/tests/test_sum_op.py b/python/paddle/v2/framework/tests/test_sum_op.py index 66417d70e8..2ad1cc2610 100644 --- a/python/paddle/v2/framework/tests/test_sum_op.py +++ b/python/paddle/v2/framework/tests/test_sum_op.py @@ -9,7 +9,7 @@ class TestSumOp(OpTest): x0 = np.random.random((3, 4)).astype('float32') x1 = np.random.random((3, 4)).astype('float32') x2 = np.random.random((3, 4)).astype('float32') - self.inputs = {"X": {"x0": x0, "x1": x1, "x2": x2}} + self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]} y = x0 + x1 + x2 self.outputs = {'Out': y} From 1eb18c5715c845f64ac70b770f053c90bcbb272c Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 11 Sep 2017 16:48:31 +0800 Subject: [PATCH 29/69] update write_docs_cn.rst --- doc/howto/dev/write_docs_cn.rst | 64 +++++++++++++-------------------- 1 file changed, 25 insertions(+), 39 deletions(-) diff --git a/doc/howto/dev/write_docs_cn.rst b/doc/howto/dev/write_docs_cn.rst index 36e5d420c9..731a63f945 100644 --- a/doc/howto/dev/write_docs_cn.rst +++ b/doc/howto/dev/write_docs_cn.rst @@ -5,15 +5,13 @@ PaddlePaddle的文档包括英文文档 ``doc`` 和中文文档 ``doc_cn`` 两个部分。文档都是通过 `cmake`_ 驱动 `sphinx`_ 编译生成,生成后的文档分别存储在编译目录的 ``doc`` 和 ``doc_cn`` 两个子目录下。 -如何构建PaddlePaddle的文档 -========================== +如何构建文档 +============ -PaddlePaddle的文档构建有直接构建和基于Docker构建两种方式,我们提供了一个构建脚本build_docs.sh来进行构建。 -PaddlePaddle文档需要准备的环境相对较复杂,所以我们推荐使用基于Docker来构建PaddlePaddle的文档。 +PaddlePaddle的文档构建有两种方式。 - -使用Docker构建PaddlePaddle的文档 --------------------------------- +使用Docker构建 +-------------- 使用Docker构建PaddlePaddle的文档,需要在系统里先安装好Docker工具包。Docker安装请参考 `Docker的官网 `_ 。安装好Docker之后可以使用源码目录下的脚本构建文档,即 @@ -21,58 +19,46 @@ PaddlePaddle文档需要准备的环境相对较复杂,所以我们推荐使 cd TO_YOUR_PADDLE_CLONE_PATH cd paddle/scripts/tools/build_docs - bash build_docs.sh with_docker - -编译完成后,会在当前目录生成两个子目录\: - -* doc 英文文档目录 -* doc_cn 中文文档目录 + sh build_docs.sh +编译完成之后,会在当前目录生成两个子目录\: doc(英文文档目录)和 doc_cn(中文文档目录)。 打开浏览器访问对应目录下的index.html即可访问本地文档。 - - -直接构建PaddlePaddle的文档 --------------------------- - -因为PaddlePaddle的v2 api文档生成过程依赖于py_paddle Python包,用户需要首先确认py_paddle包已经安装。 - -.. code-block:: bash - - python -c "import py_paddle" - -如果提示错误,那么用户需要在本地编译安装PaddlePaddle,请参考 `源码编译文档 `_ 。 -注意,用户在首次编译安装PaddlePaddle时,请将WITH_DOC选项关闭。在编译安装正确之后,请再次确认py_paddle包已经安装,即可进行下一步操作。 +直接构建 +-------- 如果提示正确,可以执行以下命令编译生成文档,即 .. code-block:: bash cd TO_YOUR_PADDLE_CLONE_PATH - cd paddle/scripts/tools/build_docs - bash build_docs.sh local - -编译完成之后,会在当前目录生成两个子目录\: - -* doc 英文文档目录 -* doc_cn 中文文档目录 + mkdir -p build + cd build + cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKLDNN=OFF -DWITH_MKLML=OFF -DWITH_DOC=ON + make gen_proto_py + make paddle_docs paddle_docs_cn +编译完成之后,会在当前目录生成两个子目录\: doc(英文文档目录)和 doc_cn(中文文档目录)。 打开浏览器访问对应目录下的index.html即可访问本地文档。 -如何书写PaddlePaddle的文档 -========================== +如何书写文档 +============ PaddlePaddle文档使用 `sphinx`_ 自动生成,用户可以参考sphinx教程进行书写。 -如何更新www.paddlepaddle.org文档 -================================ +如何更新文档主题 +================ + +PaddlePaddle文档主题在 `TO_YOUR_PADDLE_CLONE_PATH/doc_theme` 文件夹下,包含所有和前端网页设计相关的文件。 -开发者给PaddlePaddle代码增加的注释以PR的形式提交到github中,提交方式可参见 `贡献文档 `_ 。 +如何更新doc.paddlepaddle.org +============================ + +更新的文档以PR的形式提交到github中,提交方式参见 `贡献文档 `_ 。 目前PaddlePaddle的develop分支的文档是自动触发更新的,用户可以分别查看最新的 `中文文档 `_ 和 `英文文档 `_ 。 - .. _cmake: https://cmake.org/ .. _sphinx: http://www.sphinx-doc.org/en/1.4.8/ From 76a70d10db35a812654d3d2b2351b1ce867d511d Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Mon, 11 Sep 2017 17:27:49 +0800 Subject: [PATCH 30/69] fix unit test error --- python/paddle/v2/framework/tests/gradient_checker.py | 4 ++-- python/paddle/v2/framework/tests/op_test_util.py | 10 +++------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py index 51a98284bd..ed838b5979 100644 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ b/python/paddle/v2/framework/tests/gradient_checker.py @@ -11,9 +11,9 @@ __all__ = ['get_numeric_gradient'] def create_op(op_type): # TODO need to set attrs kwargs = dict() - for in_name, _ in Operator.get_op_input_names(op_type): + for in_name in Operator.get_op_input_names(op_type): kwargs[in_name] = in_name - for out_name, _ in Operator.get_op_output_names(op_type): + for out_name in Operator.get_op_output_names(op_type): kwargs[out_name] = out_name return Operator(op_type, **kwargs) diff --git a/python/paddle/v2/framework/tests/op_test_util.py b/python/paddle/v2/framework/tests/op_test_util.py index 54fe5da440..99a114e45f 100644 --- a/python/paddle/v2/framework/tests/op_test_util.py +++ b/python/paddle/v2/framework/tests/op_test_util.py @@ -27,9 +27,7 @@ class OpTestMeta(type): places.append(core.GPUPlace(0)) for place in places: - for ins in Operator.get_op_input_names(self.type): - in_name = ins[0] - in_dup = ins[1] + for in_name, in_dup in Operator.get_op_inputs(self.type): if hasattr(self, 'inputs') and in_name in self.inputs: kwargs[in_name] = [] if in_dup: @@ -49,8 +47,7 @@ class OpTestMeta(type): else: kwargs[in_name] = "@EMPTY@" - for out_name, out_dup in Operator.get_op_output_names( - self.type): + for out_name, out_dup in Operator.get_op_outputs(self.type): if not hasattr(self, "outputs"): raise ValueError( "The test op must set self.outputs dict.") @@ -73,8 +70,7 @@ class OpTestMeta(type): ctx = core.DeviceContext.create(place) op.run(scope, ctx) - for out_name, out_dup in Operator.get_op_output_names( - self.type): + for out_name, out_dup in Operator.get_op_outputs(self.type): actual = numpy.array(scope.find_var(out_name).get_tensor()) expect = self.outputs[out_name] self.assertTrue( From 477b23c3f5c123b446cec48321105e1a471c1212 Mon Sep 17 00:00:00 2001 From: qijun Date: Mon, 11 Sep 2017 18:37:19 +0800 Subject: [PATCH 31/69] follow comments --- python/paddle/v2/framework/tests/op_test.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 1daa6fa277..489358ba85 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -17,7 +17,7 @@ def create_op(scope, op_type, inputs, outputs, attrs): kwargs[in_name] = [] if in_dup: sub_in = inputs[in_name] - for sub_in_name, arr in sub_in: + for sub_in_name, _ in sub_in: var = scope.new_var(sub_in_name) kwargs[in_name].append(sub_in_name) else: @@ -29,7 +29,7 @@ def create_op(scope, op_type, inputs, outputs, attrs): kwargs[out_name] = [] if out_dup: sub_in = outputs[out_name] - for sub_in_name, arr in sub_in: + for sub_in_name, _ in sub_in: var = scope.new_var(sub_in_name) kwargs[out_name].append(sub_in_name) else: @@ -47,11 +47,11 @@ def set_input(scope, op, inputs, place): if in_name in inputs: if in_dup: sub_in = inputs[in_name] - for sub_in_name, arr in sub_in: + for sub_in_name, sub_in_array in sub_in: var = scope.find_var(sub_in_name) tensor = var.get_tensor() - tensor.set_dims(arr.shape) - tensor.set(arr, place) + tensor.set_dims(sub_in_array.shape) + tensor.set(sub_in_array, place) else: var = scope.find_var(in_name) tensor = var.get_tensor() @@ -65,7 +65,7 @@ def set_output_grad(scope, op, outputs, place): if out_name in outputs: if out_dup: sub_out = outputs[out_name] - for sub_out_name, arr in sub_out: + for sub_out_name, sub_out_grad in sub_out: out_tensor = scope.find_var(sub_out_name).get_tensor() grad_tensor = scope.new_var(grad_var_name( sub_out_name)).get_tensor() @@ -169,9 +169,8 @@ class OpTest(unittest.TestCase): def check_output_with_place(self, place): self.scope = core.Scope() op_inputs = self.inputs if hasattr(self, "inputs") else dict() - op_outputs = self.outputs if hasattr(self, "outputs") else dict() op_attrs = self.attrs if hasattr(self, "attrs") else dict() - self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs, + self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs, op_attrs) if isinstance(place, core.GPUPlace) and not self.op.support_gpu(): return @@ -232,9 +231,8 @@ class OpTest(unittest.TestCase): max_relative_error=0.005): self.scope = core.Scope() op_inputs = self.inputs if hasattr(self, "inputs") else dict() - op_outputs = self.outputs if hasattr(self, "outputs") else dict() op_attrs = self.attrs if hasattr(self, "attrs") else dict() - self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs, + self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs, op_attrs) if no_grad_set is None: no_grad_set = set() From 68943f59aa58b3f4f18c7647305dcc3683c17b7a Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Mon, 11 Sep 2017 18:38:07 +0800 Subject: [PATCH 32/69] Add construction function for LoDTensor in pybind. --- paddle/pybind/pybind.cc | 15 +++- .../paddle/v2/framework/tests/test_tensor.py | 89 ++++++++++++------- 2 files changed, 69 insertions(+), 35 deletions(-) diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 12eb14cf20..fe4af2c99e 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -125,10 +125,23 @@ The tensor and LoD info should be created before creating the LoDTensor, then call the set_tensor and set_lod functions to set them. )DOC") + .def("__init__", + [](LoDTensor &instance, + const std::vector> &lod, + Tensor *t) { +#ifdef PADDLE_ONLY_CPU + new (&instance) LoDTensor(lod, t); +#else + paddle::framework::LoD new_lod; + new_lod.reserve(lod.size()); + std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); + new (&instance) LoDTensor(new_lod, t); +#endif + }) .def("set_tensor", [](LoDTensor &self, Tensor *tensor) { self.set_tensor(tensor); }) .def("set_lod", - [](LoDTensor &self, std::vector> &lod) { + [](LoDTensor &self, const std::vector> &lod) { #ifdef PADDLE_ONLY_CPU self.set_lod(lod); #else diff --git a/python/paddle/v2/framework/tests/test_tensor.py b/python/paddle/v2/framework/tests/test_tensor.py index 1bfe1370e2..f26ed4964c 100644 --- a/python/paddle/v2/framework/tests/test_tensor.py +++ b/python/paddle/v2/framework/tests/test_tensor.py @@ -44,61 +44,82 @@ class TestTensor(unittest.TestCase): self.assertAlmostEqual(2.0, tensor_array_2[19, 11]) def test_int_lod_tensor(self): - scope = core.Scope() - var = scope.new_var("test_tensor") - var_lod = scope.new_var("test_lod_tensor") - place = core.CPUPlace() + places = [core.CPUPlace(), core.GPUPlace(0)] + for place in places: + scope = core.Scope() + var = scope.new_var("test_tensor") + var_lod = scope.new_var("test_lod_tensor") - tensor = var.get_tensor() - lod_tensor = var_lod.get_lod_tensor() + tensor = var.get_tensor() + lod_tensor = var_lod.get_lod_tensor() - tensor.set_dims([4, 4, 6]) - tensor.alloc_int(place) - array = numpy.array(tensor) - array[0, 0, 0] = 3 - array[3, 3, 5] = 10 - tensor.set(array, place) + tensor.set_dims([4, 4, 6]) + tensor.alloc_int(place) + array = numpy.array(tensor) + array[0, 0, 0] = 3 + array[3, 3, 5] = 10 + tensor.set(array, place) - lod_tensor.set_tensor(tensor) - lod_tensor.set_lod([[0, 2, 4]]) + lod_tensor.set_tensor(tensor) + lod_tensor.set_lod([[0, 2, 4]]) - lod_v = numpy.array(lod_tensor.tensor()) - self.assertTrue(numpy.alltrue(array == lod_v)) + lod_v = numpy.array(lod_tensor.tensor()) + self.assertTrue(numpy.alltrue(array == lod_v)) - lod = lod_tensor.lod() - self.assertEqual(0, lod[0][0]) - self.assertEqual(2, lod[0][1]) - self.assertEqual(4, lod[0][2]) + lod = lod_tensor.lod() + self.assertEqual(0, lod[0][0]) + self.assertEqual(2, lod[0][1]) + self.assertEqual(4, lod[0][2]) def test_float_lod_tensor(self): + places = [core.CPUPlace(), core.GPUPlace(0)] + for place in places: + scope = core.Scope() + var = scope.new_var("test_tensor") + var_lod = scope.new_var("test_lod_tensor") + + tensor = var.get_tensor() + lod_tensor = var_lod.get_lod_tensor() + + tensor.set_dims([5, 2, 3, 4]) + tensor.alloc_float(place) + + tensor_array = numpy.array(tensor) + self.assertEqual((5, 2, 3, 4), tensor_array.shape) + tensor_array[0, 0, 0, 0] = 1.0 + tensor_array[0, 0, 0, 1] = 2.0 + tensor.set(tensor_array, place) + + lod_tensor.set_tensor(tensor) + + lod_v = numpy.array(lod_tensor.tensor()) + self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) + self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) + self.assertEqual(len(lod_tensor.lod()), 0) + + lod_py = [[0, 2, 5], [0, 2, 4, 5]] + lod_tensor.set_lod(lod_py) + lod = lod_tensor.lod() + self.assertListEqual(lod_py, lod) + + def test_lod_tensor_init(self): scope = core.Scope() var = scope.new_var("test_tensor") - var_lod = scope.new_var("test_lod_tensor") place = core.CPUPlace() - tensor = var.get_tensor() - lod_tensor = var_lod.get_lod_tensor() - tensor.set_dims([5, 2, 3, 4]) tensor.alloc_float(place) - tensor_array = numpy.array(tensor) - self.assertEqual((5, 2, 3, 4), tensor_array.shape) tensor_array[0, 0, 0, 0] = 1.0 tensor_array[0, 0, 0, 1] = 2.0 tensor.set(tensor_array, place) + lod_py = [[0, 2, 5], [0, 2, 4, 5]] - lod_tensor.set_tensor(tensor) - + lod_tensor = core.LoDTensor(lod_py, tensor) lod_v = numpy.array(lod_tensor.tensor()) self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) - self.assertEqual(len(lod_tensor.lod()), 0) - - lod_py = [[0, 2, 5], [0, 2, 4, 5]] - lod_tensor.set_lod(lod_py) - lod = lod_tensor.lod() - self.assertListEqual(lod_py, lod) + self.assertListEqual(lod_py, lod_tensor.lod()) if __name__ == '__main__': From e9a1f2ad8b2c1e78d0f3fcb50d2e026030bfcb03 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 11 Sep 2017 17:51:09 +0800 Subject: [PATCH 33/69] fix DeConv3D switch(imgSize*_, output*_) --- paddle/gserver/layers/DeConv3DLayer.cpp | 40 ++++++++++++------------- paddle/gserver/tests/test_LayerGrad.cpp | 33 ++++++++++---------- 2 files changed, 37 insertions(+), 36 deletions(-) diff --git a/paddle/gserver/layers/DeConv3DLayer.cpp b/paddle/gserver/layers/DeConv3DLayer.cpp index 1b59ed60c5..3eea638649 100644 --- a/paddle/gserver/layers/DeConv3DLayer.cpp +++ b/paddle/gserver/layers/DeConv3DLayer.cpp @@ -53,27 +53,27 @@ bool DeConv3DLayer::init(const LayerMap &layerMap, size_t DeConv3DLayer::getSize() { CHECK_NE(inputLayers_.size(), 0UL); - outputH_.clear(); - outputW_.clear(); - outputD_.clear(); + imgSizeW_.clear(); + imgSizeH_.clear(); + imgSizeD_.clear(); N_.clear(); NOut_.clear(); size_t layerSize = 0; for (size_t i = 0; i < inputLayers_.size(); ++i) { - outputW_.push_back( - imageSize(imgSizeW_[i], filterSize_[i], padding_[i], stride_[i], true)); - outputH_.push_back(imageSize( - imgSizeH_[i], filterSizeY_[i], paddingY_[i], strideY_[i], true)); - outputD_.push_back(imageSize( - imgSizeD_[i], filterSizeZ_[i], paddingZ_[i], strideZ_[i], true)); - NOut_.push_back(outputD_[i] * outputH_[i] * outputW_[i]); - N_.push_back(imgSizeD_[i] * imgSizeH_[i] * imgSizeW_[i]); + imgSizeW_.push_back( + imageSize(outputW_[i], filterSize_[i], padding_[i], stride_[i], true)); + imgSizeH_.push_back(imageSize( + outputH_[i], filterSizeY_[i], paddingY_[i], strideY_[i], true)); + imgSizeD_.push_back(imageSize( + outputD_[i], filterSizeZ_[i], paddingZ_[i], strideZ_[i], true)); + NOut_.push_back(imgSizeD_[i] * imgSizeH_[i] * imgSizeW_[i]); + N_.push_back(outputD_[i] * outputH_[i] * outputW_[i]); CHECK(layerSize == 0 || N_[i] * size_t(numFilters_) == layerSize); layerSize += NOut_[i] * numFilters_; } - getOutput().setFrameHeight(outputH_[0]); - getOutput().setFrameWidth(outputW_[0]); - getOutput().setFrameDepth(outputD_[0]); + getOutput().setFrameHeight(imgSizeH_[0]); + getOutput().setFrameWidth(imgSizeW_[0]); + getOutput().setFrameDepth(imgSizeD_[0]); return layerSize; } @@ -103,9 +103,9 @@ void DeConv3DLayer::forward(PassType passType) { } colBuf_->col2Vol(outMat->getData() + n * outMat->getStride(), numFilters_, - outputD_[i], - outputH_[i], - outputW_[i], + imgSizeD_[i], + imgSizeH_[i], + imgSizeW_[i], filterSizeZ_[i], filterSizeY_[i], filterSize_[i], @@ -144,9 +144,9 @@ void DeConv3DLayer::backward(const UpdateCallback &callback) { colBuf_->vol2Col( getOutputGrad()->getData() + n * getOutputGrad()->getStride(), numFilters_, - outputD_[i], - outputH_[i], - outputW_[i], + imgSizeD_[i], + imgSizeH_[i], + imgSizeW_[i], filterSizeZ_[i], filterSizeY_[i], filterSize_[i], diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 0e6be2df9e..090bde7b20 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -2302,26 +2302,27 @@ void test3DDeConvLayer(const string& type, bool trans, bool useGpu) { conv->set_stride(2); conv->set_stride_y(2); conv->set_stride_z(2); - conv->set_img_size(IMAGE_SIZE); - conv->set_img_size_y(IMAGE_SIZE_Y); - conv->set_img_size_z(IMAGE_SIZE_Z); - conv->set_output_x(imageSize(conv->img_size(), + conv->set_output_x(IMAGE_SIZE); + conv->set_output_y(IMAGE_SIZE_Y); + conv->set_output_z(IMAGE_SIZE_Z); + + conv->set_img_size(imageSize(conv->output_x(), conv->filter_size(), conv->padding(), conv->stride(), true)); - conv->set_output_y(imageSize(conv->img_size_y(), - conv->filter_size_y(), - conv->padding_y(), - conv->stride_y(), - true)); - conv->set_output_z(imageSize(conv->img_size_z(), - conv->filter_size_z(), - conv->padding_z(), - conv->stride_z(), - true)); - config.layerConfig.set_size(conv->output_x() * conv->output_y() * - conv->output_z() * NUM_FILTERS); + conv->set_img_size_y(imageSize(conv->output_y(), + conv->filter_size_y(), + conv->padding_y(), + conv->stride_y(), + true)); + conv->set_img_size_z(imageSize(conv->output_z(), + conv->filter_size_z(), + conv->padding_z(), + conv->stride_z(), + true)); + config.layerConfig.set_size(conv->img_size() * conv->img_size_y() * + conv->img_size_z() * NUM_FILTERS); conv->set_groups(1); conv->set_filter_channels(conv->channels() / conv->groups()); config.inputDefs.push_back( From a3ec652110291c539eca9234561f2ca433fbe2a1 Mon Sep 17 00:00:00 2001 From: qijun Date: Mon, 11 Sep 2017 21:39:43 +0800 Subject: [PATCH 34/69] fix bug --- python/paddle/v2/framework/tests/op_test.py | 1 - python/paddle/v2/framework/tests/test_softmax_op.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index ddeb2aabc6..4fec4c9109 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -89,7 +89,6 @@ def get_numeric_gradient(scope, delta=0.005, in_place=False): - print "before set input" set_input(scope, op, inputs, core.CPUPlace()) op.infer_shape(scope) diff --git a/python/paddle/v2/framework/tests/test_softmax_op.py b/python/paddle/v2/framework/tests/test_softmax_op.py index 1c5802dfd5..1b948f252f 100644 --- a/python/paddle/v2/framework/tests/test_softmax_op.py +++ b/python/paddle/v2/framework/tests/test_softmax_op.py @@ -12,7 +12,7 @@ def stable_softmax(x): class TestSoftmaxOp(OpTest): def setUp(self): - self.type = "softmax" + self.op_type = "softmax" self.inputs = { 'X': np.random.uniform(0.1, 1, [10, 10]).astype("float32") } From 4f0869bef36a349506467160b67596ac251bf78b Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 11 Sep 2017 22:28:41 +0800 Subject: [PATCH 35/69] add some necessary params for reset functions --- paddle/gserver/layers/MKLDNNFcLayer.cpp | 135 +++++++++++++----------- paddle/gserver/layers/MKLDNNFcLayer.h | 19 +++- paddle/gserver/layers/MKLDNNLayer.h | 41 ++++--- 3 files changed, 111 insertions(+), 84 deletions(-) diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp index a47967b3d3..f70343251a 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp @@ -78,46 +78,52 @@ void MKLDNNFcLayer::convertWeightsToPaddle() { wgtVal_->reorderDataTo(wgtVal_, dstFmt, targetDim); } -void MKLDNNFcLayer::reshape() { - reshapeInput(); +void MKLDNNFcLayer::reshape( + int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) { + reshapeInput(bs, ih, iw); CHECK_EQ(iLayerSize_, inputLayers_[0]->getSize()); - ic_ = iLayerSize_ / (ih_ * iw_); - CHECK_EQ(size_t(ic_ * ih_ * iw_), iLayerSize_) << "not divisible"; - CHECK_EQ(size_t(oc_), getSize()); + ic = iLayerSize_ / (ih * iw); + CHECK_EQ(size_t(ic * ih * iw), iLayerSize_) << "not divisible"; + CHECK_EQ(size_t(oc), getSize()); - reshapeOutput(oh_, ow_); - resizeOutput(bs_, oc_); + reshapeOutput(oh, ow); + resizeOutput(bs, oc); printSizeInfo(); } -void MKLDNNFcLayer::resetFwd() { +void MKLDNNFcLayer::resetFwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + pipeline.clear(); bool hasBias = biases_ && biases_->getW(); - const MatrixPtr& wgt = weight_->getW(); - const MatrixPtr& bias = hasBias ? biases_->getW() : nullptr; - const MatrixPtr& out = output_.value; + const MatrixPtr& wgtVal = weight_->getW(); + const MatrixPtr& biasVal = hasBias ? biases_->getW() : nullptr; + const MatrixPtr& outVal = output_.value; if (inputIsOnlyMKLDNN()) { - const MatrixPtr& in = getInputValue(0); - inVal_ = std::dynamic_pointer_cast(in); - CHECK(inVal_) << "Input should be MKLDNNMatrix"; + const MatrixPtr& inVal = getInputValue(0); + in = std::dynamic_pointer_cast(inVal); + CHECK(in) << "Input should be MKLDNNMatrix"; } else { CHECK_EQ(getPrev(0)->getDeviceId(), CPU_DEVICE) << "Only support CPU yet"; - const MatrixPtr& in = getInputValue(0, CPU_DEVICE); - inVal_ = MKLDNNMatrix::create( - in, memory::dims{bs_, ic_, ih_, iw_}, format::nchw, engine_); + const MatrixPtr& inVal = getInputValue(0, CPU_DEVICE); + in = MKLDNNMatrix::create( + inVal, memory::dims{bs_, ic_, ih_, iw_}, format::nchw, engine_); } - inVal_->downSpatial(); - wgtVal_ = MKLDNNMatrix::create( - wgt, memory::dims{oc_, ic_, ih_, iw_}, format::oihw, engine_); - wgtVal_->downSpatial(); - biasVal_ = - hasBias ? MKLDNNMatrix::create(bias, {oc_}, format::x, engine_) : nullptr; - outVal_ = MKLDNNMatrix::create(out, {bs_, oc_}, format::nc, engine_); + in->downSpatial(); + wgt = MKLDNNMatrix::create( + wgtVal, memory::dims{oc_, ic_, ih_, iw_}, format::oihw, engine_); + wgt->downSpatial(); + bias = hasBias ? MKLDNNMatrix::create(biasVal, {oc_}, format::x, engine_) + : nullptr; + out = MKLDNNMatrix::create(outVal, {bs_, oc_}, format::nc, engine_); // change original output value to mkldnn output value - output_.value = std::dynamic_pointer_cast(outVal_); + output_.value = std::dynamic_pointer_cast(out); if (!outputIsOnlyMKLDNN()) { // fc cpu output value do not need create convert // just share point @@ -127,27 +133,31 @@ void MKLDNNFcLayer::resetFwd() { // create forward handle prop_kind pk = prop_kind::forward; fc_fwd::desc fwdDesc = hasBias ? fc_fwd::desc(pk, - inVal_->getMemoryDesc(), - wgtVal_->getMemoryDesc(), - biasVal_->getMemoryDesc(), - outVal_->getMemoryDesc()) + in->getMemoryDesc(), + wgt->getMemoryDesc(), + bias->getMemoryDesc(), + out->getMemoryDesc()) : fc_fwd::desc(pk, - inVal_->getMemoryDesc(), - wgtVal_->getMemoryDesc(), - outVal_->getMemoryDesc()); + in->getMemoryDesc(), + wgt->getMemoryDesc(), + out->getMemoryDesc()); fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_); if (hasBias) { - fwd_.reset(new fc_fwd(fwdPD, *inVal_, *wgtVal_, *biasVal_, *outVal_)); + fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *bias, *out)); } else { - fwd_.reset(new fc_fwd(fwdPD, *inVal_, *wgtVal_, *outVal_)); + fwd_.reset(new fc_fwd(fwdPD, *in, *wgt, *out)); } printValueFormatFlow(); - pipelineFwd_.clear(); - pipelineFwd_.push_back(*fwd_); + pipeline.push_back(*fwd_); } -void MKLDNNFcLayer::resetBwd() { +void MKLDNNFcLayer::resetBwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) { + pipeline.clear(); if (!needResetBwd_) { return; } @@ -156,8 +166,8 @@ void MKLDNNFcLayer::resetBwd() { /// backward weight CHECK(inVal_) << "Should have input value"; - const MatrixPtr& wgt = weight_->getWGrad(); - const MatrixPtr& bias = hasBias ? biases_->getWGrad() : nullptr; + const MatrixPtr& wgtGrad = weight_->getWGrad(); + const MatrixPtr& biasGrad = hasBias ? biases_->getWGrad() : nullptr; // TODO(TJ): merge outgrad int device = outputIsOnlyMKLDNN() ? MKLDNN_DEVICE : CPU_DEVICE; @@ -168,59 +178,56 @@ void MKLDNNFcLayer::resetBwd() { // for CPU device: // fc do not need to convert from cpu device since output is always nc format // only need create from cpu device - const MatrixPtr& out = getOutput(device).grad; - outGrad_ = MKLDNNMatrix::create(out, outVal_->getPrimitiveDesc()); - wgtGrad_ = MKLDNNMatrix::create(wgt, wgtVal_->getPrimitiveDesc()); - biasGrad_ = hasBias ? MKLDNNMatrix::create(bias, biasVal_->getPrimitiveDesc()) - : nullptr; + const MatrixPtr& outGrad = getOutput(device).grad; + out = MKLDNNMatrix::create(outGrad, outVal_->getPrimitiveDesc()); + wgt = MKLDNNMatrix::create(wgtGrad, wgtVal_->getPrimitiveDesc()); + bias = hasBias ? MKLDNNMatrix::create(biasGrad, biasVal_->getPrimitiveDesc()) + : nullptr; // create memory primitive desc fc_fwd::desc fwdDesc = fc_fwd::desc(prop_kind::forward, inVal_->getMemoryDesc(), - wgtGrad_->getMemoryDesc(), - outGrad_->getMemoryDesc()); + wgt->getMemoryDesc(), + out->getMemoryDesc()); fc_fwd::primitive_desc fwdPD = fc_fwd::primitive_desc(fwdDesc, engine_); fc_bwdWgt::desc bwdWgtDesc = hasBias ? fc_bwdWgt::desc(inVal_->getMemoryDesc(), - wgtGrad_->getMemoryDesc(), - biasGrad_->getMemoryDesc(), - outGrad_->getMemoryDesc()) + wgt->getMemoryDesc(), + bias->getMemoryDesc(), + out->getMemoryDesc()) : fc_bwdWgt::desc(inVal_->getMemoryDesc(), - wgtGrad_->getMemoryDesc(), - outGrad_->getMemoryDesc()); + wgt->getMemoryDesc(), + out->getMemoryDesc()); fc_bwdWgt::primitive_desc bwdWgtPD = fc_bwdWgt::primitive_desc(bwdWgtDesc, engine_, fwdPD); if (hasBias) { - bwdWgt_.reset( - new fc_bwdWgt(bwdWgtPD, *inVal_, *outGrad_, *wgtGrad_, *biasGrad_)); + bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt, *bias)); } else { - bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *outGrad_, *wgtGrad_)); + bwdWgt_.reset(new fc_bwdWgt(bwdWgtPD, *inVal_, *out, *wgt)); } - pipelineBwd_.clear(); - pipelineBwd_.push_back(*bwdWgt_); + pipeline.push_back(*bwdWgt_); /// backward data - const MatrixPtr& in = inputLayers_[0]->getOutput().grad; - if (in == nullptr) { + const MatrixPtr& inGrad = inputLayers_[0]->getOutput().grad; + if (inGrad == nullptr) { return; } if (getInput(0, MKLDNN_DEVICE).getAllCount() > 1) { // TODO(TJ): use outputMaps_ ways to get the inGrad_ when merge outgrad done } else { - inGrad_ = MKLDNNMatrix::create(in, inVal_->getPrimitiveDesc()); + in = MKLDNNMatrix::create(inGrad, inVal_->getPrimitiveDesc()); } - fc_bwdData::desc bwdDataDesc = fc_bwdData::desc(inVal_->getMemoryDesc(), - wgtGrad_->getMemoryDesc(), - outGrad_->getMemoryDesc()); + fc_bwdData::desc bwdDataDesc = fc_bwdData::desc( + inVal_->getMemoryDesc(), wgt->getMemoryDesc(), out->getMemoryDesc()); fc_bwdData::primitive_desc bwdDataPD = fc_bwdData::primitive_desc(bwdDataDesc, engine_, fwdPD); CHECK(wgtVal_) << "Should have weight memory"; - bwdData_.reset(new fc_bwdData(bwdDataPD, *outGrad_, *wgtVal_, *inGrad_)); + bwdData_.reset(new fc_bwdData(bwdDataPD, *out, *wgtVal_, *in)); printGradFormatFlow(); - pipelineBwd_.push_back(*bwdData_); + pipeline.push_back(*bwdData_); } void MKLDNNFcLayer::updateInputData() { diff --git a/paddle/gserver/layers/MKLDNNFcLayer.h b/paddle/gserver/layers/MKLDNNFcLayer.h index add8ac9991..3119f86349 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.h +++ b/paddle/gserver/layers/MKLDNNFcLayer.h @@ -45,11 +45,20 @@ public: bool init(const LayerMap& layerMap, const ParameterMap& parameterMap) override; - void reshape() override; - - void resetFwd() override; - - void resetBwd() override; + void reshape( + int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) override; + + void resetFwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) override; + + void resetBwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) override; void updateInputData() override; diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/gserver/layers/MKLDNNLayer.h index c10f2fec2f..169679c829 100644 --- a/paddle/gserver/layers/MKLDNNLayer.h +++ b/paddle/gserver/layers/MKLDNNLayer.h @@ -111,13 +111,14 @@ public: { REGISTER_TIMER_INFO("mkldnn_FwdTimer", getName().c_str()); - copySeqInfoToOutputs(); CHECK(!inputLayers_.empty()); + copySeqInfoToOutputs(); size_t elemenCnt = inputLayers_[0]->getOutput().value->getElementCnt(); if (inputElemenCnt_ != elemenCnt) { + // reset when input total sizes changed, not only the batchsize inputElemenCnt_ = elemenCnt; - reshape(); - resetFwd(); + reshape(bs_, ic_, ih_, iw_, oc_, oh_, ow_); + resetFwd(pipelineFwd_, inVal_, wgtVal_, biasVal_, outVal_); convertWeightsFromPaddle(); needResetBwd_ = true; } @@ -144,7 +145,7 @@ public: { REGISTER_TIMER_INFO("mkldnn_bwdTimer", getName().c_str()); if (needResetBwd_) { - resetBwd(); + resetBwd(pipelineBwd_, inGrad_, wgtGrad_, biasGrad_, outGrad_); needResetBwd_ = false; } @@ -160,20 +161,30 @@ public: /** * reshape the input image sizes * and reset output image and buffer size + * output channel can not be changed */ - virtual void reshape() = 0; + virtual void reshape( + int& bs, int& ic, int& ih, int& iw, int oc, int& oh, int& ow) = 0; /** * reset the mkldnn forward primitve and memory * only would be called when input size changes */ - virtual void resetFwd() = 0; + virtual void resetFwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) = 0; /** * reset the mkldnn backward primitve and memory for mkldnn fc * only would be called when needed */ - virtual void resetBwd() = 0; + virtual void resetBwd(std::vector& pipeline, + MKLDNNMatrixPtr& in, + MKLDNNMatrixPtr& wgt, + MKLDNNMatrixPtr& bias, + MKLDNNMatrixPtr& out) = 0; /** * Update input value data when input layer is "data" type. @@ -207,16 +218,16 @@ protected: /** * reshape the input image sizes and input batchsize */ - virtual void reshapeInput() { + virtual void reshapeInput(int& batchsize, int& height, int& width) { const Argument& input = inputLayers_[0]->getOutput(); - bs_ = input.getBatchSize(); - int height = input.getFrameHeight(); - int width = input.getFrameWidth(); - if (height != 0) { - ih_ = height; + batchsize = input.getBatchSize(); + int h = input.getFrameHeight(); + int w = input.getFrameWidth(); + if (h != 0) { + height = h; } - if (width != 0) { - iw_ = width; + if (w != 0) { + width = w; } } From 21d49744051a6ba0d2f6901cd8db8a242cfcc05a Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Mon, 11 Sep 2017 11:01:29 -0700 Subject: [PATCH 36/69] "fix name" --- paddle/operators/name_convention.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 2260bf5660..280ab8d317 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -4,7 +4,7 @@ To make the operator document itself more clear. we recommend operator names obs ### Input/Output names -* Variable name is prefer uppercase. e.g. `X`, `Y`. But when the variable is tensor, its name should lowercase. e.g. `matrix`, to discriminate with otherone. +* Variable name is prefer uppercase. e.g. `X`, `Y`. But when the variable is tensor, its name should lowercase. e.g. `matrix`, to discriminate with other one. * element wise operator, math operator or similar op, please obey common name convention. if the operator only have one output, use `Out`. From fb32106e246695fc91a63186fb22a68c66f98a33 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 11 Sep 2017 17:14:01 -0700 Subject: [PATCH 37/69] Make paddle.v2.inference can direct load protobuf --- python/paddle/v2/inference.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/python/paddle/v2/inference.py b/python/paddle/v2/inference.py index 8acea6155c..19624a704f 100644 --- a/python/paddle/v2/inference.py +++ b/python/paddle/v2/inference.py @@ -25,11 +25,18 @@ class Inference(object): :type parameters: paddle.v2.parameters.Parameters """ - def __init__(self, output_layer, parameters): + def __init__(self, output_layer, parameters, data_types=None): import py_paddle.swig_paddle as api - topo = topology.Topology(output_layer) - gm = api.GradientMachine.createFromConfigProto( - topo.proto(), api.CREATE_MODE_TESTING, [api.PARAMETER_VALUE]) + if isinstance(output_layer, str): + gm = api.GradientMachine.createByConfigProtoStr(output_layer) + if data_types is None: + raise ValueError("data_types != None when using protobuf bin") + self.__data_types__ = data_types + else: + topo = topology.Topology(output_layer) + gm = api.GradientMachine.createFromConfigProto( + topo.proto(), api.CREATE_MODE_TESTING, [api.PARAMETER_VALUE]) + self.__data_types__ = topo.data_type() for param in gm.getParameters(): val = param.getBuf(api.PARAMETER_VALUE) name = param.getName() @@ -43,7 +50,6 @@ class Inference(object): # called here, but it's better to call this function in one place. param.setValueUpdated() self.__gradient_machine__ = gm - self.__data_types__ = topo.data_type() def iter_infer(self, input, feeding=None): from data_feeder import DataFeeder From 2b352212c27ccdccb94a2878d823b2150d74bf00 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 11 Sep 2017 17:42:20 -0700 Subject: [PATCH 38/69] Add serialize to file for topology and read file obj for inference --- python/paddle/v2/inference.py | 20 +++++++++++++------- python/paddle/v2/topology.py | 9 +++++++++ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/python/paddle/v2/inference.py b/python/paddle/v2/inference.py index 19624a704f..e80456d9bb 100644 --- a/python/paddle/v2/inference.py +++ b/python/paddle/v2/inference.py @@ -2,6 +2,7 @@ import numpy import collections import topology import minibatch +import cPickle __all__ = ['infer', 'Inference'] @@ -25,18 +26,23 @@ class Inference(object): :type parameters: paddle.v2.parameters.Parameters """ - def __init__(self, output_layer, parameters, data_types=None): + def __init__(self, parameters, output_layer=None, fileobj=None): import py_paddle.swig_paddle as api - if isinstance(output_layer, str): - gm = api.GradientMachine.createByConfigProtoStr(output_layer) - if data_types is None: - raise ValueError("data_types != None when using protobuf bin") - self.__data_types__ = data_types - else: + + if output_layer is not None: topo = topology.Topology(output_layer) gm = api.GradientMachine.createFromConfigProto( topo.proto(), api.CREATE_MODE_TESTING, [api.PARAMETER_VALUE]) self.__data_types__ = topo.data_type() + elif fileobj is not None: + tmp = cPickle.load(fileobj) + gm = api.GradientMachine.createByConfigProtoStr( + tmp['protobin'], api.CREATE_MODE_TESTING, + [api.PARAMETER_VALUE]) + self.__data_types__ = tmp['data_type'] + else: + raise ValueError("Either output_layer or fileobj must be set") + for param in gm.getParameters(): val = param.getBuf(api.PARAMETER_VALUE) name = param.getName() diff --git a/python/paddle/v2/topology.py b/python/paddle/v2/topology.py index a20e878d08..2db66be250 100644 --- a/python/paddle/v2/topology.py +++ b/python/paddle/v2/topology.py @@ -18,6 +18,7 @@ from paddle.proto.ModelConfig_pb2 import ModelConfig import paddle.trainer_config_helpers as conf_helps import layer as v2_layer import config_base +import cPickle __all__ = ['Topology'] @@ -100,6 +101,14 @@ class Topology(object): return layer return None + def serialize_for_inference(self, stream): + protobin = self.proto().SerializeToString() + data_type = self.data_type() + cPickle.dump({ + 'protobin': protobin, + 'data_type': data_type + }, stream, cPickle.HIGHEST_PROTOCOL) + def __check_layer_type__(layer): if not isinstance(layer, config_base.Layer): From 7f7fa325287a2ef434aca8a38bed4af2496429a2 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 11 Sep 2017 22:49:11 +0800 Subject: [PATCH 39/69] use CPU_DEVICE instead of magic number --- paddle/gserver/layers/Layer.h | 11 ++++++----- paddle/gserver/tests/MKLDNNTester.cpp | 5 +++-- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/paddle/gserver/layers/Layer.h b/paddle/gserver/layers/Layer.h index edef36194a..4002a3d074 100644 --- a/paddle/gserver/layers/Layer.h +++ b/paddle/gserver/layers/Layer.h @@ -49,6 +49,12 @@ struct LayerState { }; typedef std::shared_ptr LayerStatePtr; +/// Paddle device ID, MKLDNN is -2, CPU is -1 +enum PADDLE_DEVICE_ID { + MKLDNN_DEVICE = -2, + CPU_DEVICE = -1, +}; + /** * @brief Base class for layer. * Define necessary variables and functions for every layer. @@ -59,11 +65,6 @@ protected: LayerConfig config_; /// whether to use GPU bool useGpu_; - /// Paddle device ID, MKLDNN is -2, CPU is -1 - enum PADDLE_DEVICE_ID { - MKLDNN_DEVICE = -2, - CPU_DEVICE = -1, - }; /// Device Id. MKLDNN is -2, CPU is -1, and GPU is 0, 1, 2 ... int deviceId_; /// Input layers diff --git a/paddle/gserver/tests/MKLDNNTester.cpp b/paddle/gserver/tests/MKLDNNTester.cpp index 11e8527910..2f48e5b2d3 100644 --- a/paddle/gserver/tests/MKLDNNTester.cpp +++ b/paddle/gserver/tests/MKLDNNTester.cpp @@ -68,7 +68,7 @@ void MKLDNNTester::reset(const TestConfig& dnn, CHECK(dnnLayer_); // for comparison with Paddle reference results, // need manually add cpu device output for test - dnnLayer_->addOutputArgument(-1); + dnnLayer_->addOutputArgument(CPU_DEVICE); EXPECT_EQ(dataLayers_[DNN].size(), dataLayers_[REF].size()); EXPECT_EQ(parameters_[DNN].size(), parameters_[REF].size()); @@ -113,7 +113,8 @@ void MKLDNNTester::randomBotDatas() { void MKLDNNTester::randomTopDiffs() { refLayer_->getOutputGrad()->randomizeUniform(); - dnnLayer_->getOutput(-1).grad->copyFrom(*(refLayer_->getOutputGrad())); + dnnLayer_->getOutput(CPU_DEVICE) + .grad->copyFrom(*(refLayer_->getOutputGrad())); VLOG(lvl_) << "Random Backward Input, TopDiff: "; printMatrix(refLayer_->getOutputGrad()); } From 355e35fecd2866a1894c304647f6875cf15f7571 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Tue, 12 Sep 2017 10:12:33 +0800 Subject: [PATCH 40/69] fix paddle enforce check --- paddle/operators/pad_op.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index ef678cf3d3..449463c830 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -95,7 +95,6 @@ class PadOpGrad : public framework::OperatorWithKernel { "Input(Out@GRAD) should not be null"); auto x_dims = ctx.Input("X")->dims(); auto *x_grad = ctx.Output(framework::GradVarName("X")); - PADDLE_ENFORCE_NOT_NULL(x_grad, "Output(X@GRAD) should not be null"); x_grad->Resize(x_dims); } From 2b1450f1512753fa53717334a07b024efa8ffefa Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Mon, 11 Sep 2017 19:28:39 -0700 Subject: [PATCH 41/69] rewrite the document --- paddle/framework/backward.md | 60 ++++++++++++------ paddle/framework/images/duplicate_op2.graffle | Bin 2434 -> 2611 bytes paddle/framework/images/duplicate_op2.png | Bin 24393 -> 24748 bytes 3 files changed, 39 insertions(+), 21 deletions(-) diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md index c762811dfc..0859bf1d9b 100644 --- a/paddle/framework/backward.md +++ b/paddle/framework/backward.md @@ -2,9 +2,20 @@ ## Motivation -In Neural Network, the backpropagation algorithm follows the chain rule, so we need to compound the gradient operators/expressions together with the chain rule. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass. +In Neural Network, many model is solved by the the backpropagation algorithm(known as BP) at present. Technically it caculates the gradient of the loss function, then distributed back through the networks. Follows the chain rule, so we need to compound the gradient operators/expressions together with the chain rule. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass. -## Backward Operator Registry +## Implementation + +In this design doc, we exported only one API for generating the backward pass. + +```c++ +std::unique_ptr Backward(const OperatorBase& forwardOp, + const std::unordered_set& no_grad_vars); +``` + +The implementation behind it can be divided into two parts. Namely, ** Backward Operator Creating** and **Backward Operator Building**. + +###Backward Operator Registry A backward network is built up with several backward operators. Backward operators take forward operators' inputs outputs, and output gradients and then calculate its input gradients. @@ -25,7 +36,7 @@ REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad); `mul_grad` is the type of backward operator, and `MulOpGrad` is its class name. -## Backward Opeartor Creating +###Backward Opeartor Creating Given a certain forward operator, we can get its corresponding backward operator by calling: @@ -43,40 +54,47 @@ The function `BuildGradOp` will sequentially execute following processes: 4. Building backward operator with `inputs`, `outputs` and forward operator's attributes. -## Backward Network Building - -A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and put them together. +###Backward Network Building -In our design, the network itself is also a kind of operator. So the operators contained by a big network may be some small network. - -given a forward network, it generates the backward network. We only care about the Gradients—`OutputGradients`, `InputGradients`. +A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and append them together one by one. There is some corner case need to process specially. 1. Op - when the input forward network is an Op, return its gradient Operator Immediately. + when the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NoGradient` operator 2. NetOp - when the input forward network is a NetOp, it needs to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to the forward NetOp. + In our design, the network itself is also a kind of operator(**NetOp**). So the operators contained by a big network may be some small network. When the input forward network is a NetOp, it needs to call the sub NetOp/Operators backward function recursively. During the process, we need to collect the `OutputGradients` name according to the forward NetOp. + +3. RnnOp + + RnnOp is a nested stepnet operator. Backward module need to recusively call `Backward` for every stepnet. + +4. Shared Variable **shared variable**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwrite their shared input variable. -

-
+

+
+ +​ pic 1. Shared variable in operators. + +

- 1. Shared variable in operators. +​ Share variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator replace the overwrite links. -

+

+
- Share variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator replace the overwrite links. +​ pic 2. Replace shared variable's gradient with `Add` operator. -

-
+

- 2. Replace shared variable's gradient with `Add` operator. +​ Because our framework find variable accord to its name, we need rename the output links. We add a suffix of number represent its position in clockwise. -

+5. Part of Gradient is Zero. + In the whole graph, there is some case of that one operator's gradient is not needed, but its input's gradient is a dependency link of other operator, we need to fill a same shape gradient matrix in the position. In our implement, we insert a special `fillZeroLike` operator. -​ Then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it. +Follow these rules above, then collect the sub graph `OutputGradients`/`InputGradients` as the NetOp's and return it. diff --git a/paddle/framework/images/duplicate_op2.graffle b/paddle/framework/images/duplicate_op2.graffle index ede3bca30ae17d5af52505fd94dc2f79b23b57e0..5cec3bc64dbd44dc99e348485969f29bd128ceb1 100644 GIT binary patch literal 2611 zcmV-33e5E%iwFP!000030PS3BQ`<@s{e1ZqTE1-6Zjk3CX_QO2*I*LJ65wJ3o0O}z z26=325t1Xx0g~dsZ;ynnHyA?5V@;(Z;W<6i8g-xP?qR(C$JK?WeUf412L7v6tfN&; z`c5!#{n4w{f-O^UqTCMhE;>lL4)9Yx5 zd%NA927RNt)kFI%n4%S&A-lfYc-`3CN^+wx?-A{5)T+49RAIPiqmiiPLT|bs18E&BU*6emB0%N<5h@s1q zwCA~y2t%Q6ep`>?5FU90G}MC&-yMa)bfO=?-J4Jhhn_UoTepxL8k@FGD5y^?(=SMCHfBj=x0B&#b4v2lo5H&BwI?&=Oyjf>x(v>< z)?9Pi-U-EJrdFod{y||O3>YjaG;AhK9s`SpkR8*%?t{^C|mA`xS|e0 z*jhu@8pRqmUNZP+Y5zpPT-F|jNNgdlqgDc+-Ty7>#ifb%CJ~-eeIQ| zE@iRTt|xoflZuD{k0@gkJM09`^g{Y^*;VIrM=FzdYrm0VlN#498f3|ba!zEynv4ON(Aq=SGa)a}Fq)fH-n}ZGpUu3%3%+0H{FR}HuIJ^4&YI_TpIKoUkwVV$&9}-UouT3n$vw?X za`b4)utpLWaJd;P-Y29t7K8jNtUh;{_L_LFrtcD=K26_c11MQ*Nrz%<=0g229^$_6 zBRBcanT#+oNZ$#ZK&U1Iv6V0VAsB3Y=JkiDKlEL=I%5$=GDg~t^ghWL*6MvZmA~Hr z)=~BW;NSb991z3kk3uPZb#vxTC0rBKKl<`&A|Mxg(G#LG=We%fiRXpKzEc%tBZ*fh-m>t_3MVdh^L|O`X3$jLF|sk zvGj++0hUE8E6Kw~1ZjO|h_p{4e5v*SZqw60-yF3&e`2_2Hw_D3I_0N8Emck^zR}zw zxhUrmg7qcs-A#M;lJ=Igx1_zNr#2)CA%ylyqUX&zXceC4`>&s?t zyO)0yqQ+e2Z@Xlb&6Gv%psXD!2F)tu2`$HHR*BITp+OVZ(BNSJALVCm9(Cv3gBX@h z5wQ(0H$pifM6EJRXSR(kLbzd2Vw0QFbY#+|ZWGH!2DeRSTO5Qf4jDJY1dN$t3lpshggK`zc1@OdL=;%uN>&BSBlJ3+hJIk ztNBzwp6JP0bAC=z**!TD&UHa>RzS%x;I8>pvtaWj>zy&WaJ@#AP@N*D?jRl)&}ovM zfA+f;7#Q%O?n-%+yDpjUS(pz!miaK|z;dvVa>kH>O>C#k2N8i`FlG@TB+8q}3uCMU zSy9X_f(_e9n2+fuA*SL!$}JPKMq$&|k%fTw2q%V(=q>XxMr_7xP7T}U_LBLQ%=aTR zpY>qoYfg2sNt6pvmFP0ad71HqiS8qlQTE_u)T&EHm2y>`x7^V2YBJ~V;?>Jc>sc}_ zo@QF@!Qke;{gVfH4uW+HQ>q)vcPs>Mgno`9QLPU#;}Z_~VjxN_ZWt=aX1@kYu^N_Af!;%dC{_*t6YRlo2*z8DNO&Qf zOEaCwjdMG(Ir6gLO(2dSi(!;9jLeGgQ4lAa)urgPes{iVMoU&+vhtFZe+*V$)|48O zRQnM%B}N}zQ!<%ua%v-!a)ZGV2(dY3ROgr)+~C+ks?^<77E%Q$gESY7sLlvgq)rGd zr^tGtnWl~nkRf27hQ*m-E{jIXqR|grG%~DZx%{c9jX#Fk9=%bBIfV08^G+Z$bw)X? zoeX0ONXmfV-^My694?gF*d+G<=VqRX2%))nl`YIxi%5o#kbHhVdq#3v63LSl^6w%N z^o@uFQy>l7LW)T^BobIv5p1Lr&JaX7rft~b%kW4H+-X#`3;y^8=8n=RRpAh~zs2@u{Y&IO{Z{0o_QKFJ zPqX#wOmpc}+?&uH4%vsGIN`~#qi3mkuBc-Ndl;FKY&24{_-|d;{G6>>|GQK*@ zr*elq>5p=e$ZP;I+s3dtB9stT8ZeQ6fP(R5FmV^c7o81MJa6)zsG<>C)>5W3Qj3)5SF z26u)AV_CR!#Y&s-hR&k}u*#8}U1q<#8N_jL@lJ%f5W1fOUwELm>z7QfI*=0#6qnZwwAqe74|d*q?@uw3|K9XI?Cu0@_SfcLyWQ<^ zdwg(=419L_)^kQ%@cZE47=IX_?VNaRXy4j4dOh;?9I)ZRVaZg)Qd=Jl0#x%K95 z_4nj3{+j7gd~#R+hY$;dg*?8d`|8f~ETl0p(GzkoEkrm+>Tl$&(%<9P464lHNUEdy zvc^__hWB!$_SL@Si|GZEZ_#~`Q&LyLdVq; zKJK}3m6y^}^@ZRemADXLoT#DBfu|u?`n3-WL|lomT9k$F$OpqGqA%&}Y3Foc;%X0rN?45<2z4BNLm*#AkVI2T)G>)~z1FwBQ_ zxip7Bnqwq??}w4vVzdaU`BDiLqZdk&Sv6(eel+#6b+Q707j!1>YcAGTvUL#EJUcjD z*Ar(uQh9eNGNH5-$lR`&M{-o)u7`ZDM*d4Dsylmg$cgZ#mG{tC!K`tO`hCOdM|m2m VP)2r5zG-D+<$oBADfa+I007jCD%=17 literal 2434 zcmV-|34Qh-iwFP!000030PS0CQ`234qrC$1GRZk|M9#3Ffj2#sw*g@!Nvb?7E-y&w)e z^|>dv*IsO0vAgY-C;2CKNbgv55c=$PtErwsArB=RuxJ}?HK(`JD$qCgsDviZ)0StI zXcSR(^I|KO5xnv?NZ1h9As<9yJZc=n-J6K^`vJ4IniEJ4jV-r9bP%7|mSvF~WUD!C zIu&6oMa%zl9LrhPG8(hyw0wtpSE|dC7#ObNO(q8>Cc!$*t;+d7zDax;a(KgyQdR93_x2-};;-qf*% z&6fuJ+S)%6{8XeGfeW(J_|lE3A4o!8?)c;0qcoCL;(J+7crlh zW^WKN7OI=eV9el}p#ITgw<8J@lAqzdcmO8Y^RRM21hC$SMt)D;nihh;7?b2;IqE`) zkz5?4F1nsTbT@$*MgjrZQCeGFESx|r8z8D(9)(nnBO3HRdg_7XgP~+$pL*cuXl*Td z*_b$5&+8+t<@@&!5$y7wL+lOBfvM0$Ah=~=AVrht%8#%r($Qb&NL-dIV}f|U*Qr4V zc|)B-Pg$HeTNz#NP3L&Lh>Fcm}D=w~M=^A{i}yrQEO9epo4`hKAk zavF|{UDre=b{$v8ikOTvkg^@qG_i$^UxTHL8kRcb;R2G%x07oK7aK~k$NfGSZ*{@P z$@4{mJP|RMxs|XSdD$=$$U&xK8M z%B=uM?T}uwpdE>PGkqGw$yU;0f>qr073lHgUBHZwDG%m~gz5yj;JzFd(CNNmAsDEe9#&NIO{j)EnQCme zfpwU59m7C2umxVoJT?I|%w-00Ovkkh#Y4I|4~6L`nP4V^e}(RCMy z%f!`Vutnen{ zemsJepx-7 zUg{>9s!r&D#Y-H^Kez_qE?Avp(Gu`~GdAjq;t}~-h?!1wDsd%#@Q}KC@rO1FQsfc6 zuCchJ8x2?CC-CpGo2Z?Ddn{f~jC0Oz9&C1=!GW}5Ea#hCE9BR?WQTbhC#uu~R}=4e z*v_Q_)NHM0*bW=fh!*x&YgTs>e2|E_7Y_29z*AX#AuGa!D#t>7I~=lbkc&j79gykn zrt2W06JnG)OzJ;EKzSoZd?|d<*+9grdEQfXws&+8GWi&M4BpcjGA2ugKl6%r!sq=- ziq`zJ7Izcb8o@7-{?ah&(^}Go*kkfqj)Y{(zszyOSttwYtxt`bqQO{}Zd|d_4vf%M zyaZM`@_d&$>g)(9#q~QH<+7aepJmgg`+=ac=+KjU0~eh)BdfHBeG&1`BBTMx&BKz( zRXg&8ueiLXqn(!g?C-w~-d|wE{<9N&IM^30Yjr*X=JjoGv-@VT_=l`7zeYWZPZqU*0Qk>zIq^K8q!#pG!k;KY(yLfITn07yTy;^nogBjoHCU`$!cu%XBd}LwcY-l1Jf0w z=V-pjsmVYbhfMkOoLJx#KA4Zg;O=2(KNUC2CK3X4sVu^TkGot}c_~eHECdf}jcXdo zL=2Ulmxf$vHtrG;S5jF`$~5%YBkP`p{NtEqQTT$lKa1I3D0#FgDw9}Rh)~hviWebVi@Jay4+_DfzO;YLlGn0RjX-cO|Gws z(F;qG*=owH{B#^-^JE1819V9rDlV?CWb+`Zd3SKSPCzgBney&ZWTNY(KxQ&yp0YuK zyB?G7oX-WW?!wI==QJ2s#$oOXW{qpq@9|Y1SfZtyGA9Pozp_e}Z0H0>8 A_y7O^ diff --git a/paddle/framework/images/duplicate_op2.png b/paddle/framework/images/duplicate_op2.png index 4e872dc2caf3b0cbd0d5176f11a14801b538dc86..21cdd5cabf1b5203e1435a75b57770d2f702fa92 100644 GIT binary patch literal 24748 zcmZ_01ymeimnet_cXw-oySoz_C%6Q6cXx*X!6A5XcMVR^;1GhlySr><{@HnN_MGL= zO?TD(?kCr!h*DORK|v%$gn)oRk^Lm83IPEb0lfO)p@1tfhlzKfgNQm!Q_yE8kD>W@wEd_ag69+qHBU1-sGiFaaM*tcELeP^R_-JS5 zYDDg7XKU}m?rxgSlIaZ_*hukS=iZ`00<@*FMC%bPbPa8 zs{bJQfAL6~xtKUxIl5Xo*pvUmYh>);<|;%<`ER2C`T5T}U9HUj&rJ3%|Gh0>gDn5< zu&^<+viuKjV5s1~tNaoUc8<-?EgLb{~zN2i%ZGb$_&``e~o7Q@6rGFwf}|}Wcj!8|LY+BbISj96*y*L zL_wDSA(=2@?Kb-=1cWGrtfZKlC*-LC@ek$9lCtp1&$9RLuh z9kmX;V<_W6je(EqKiKpG0At`6)PIw=qZ?|drU_{g{hM85%j4e~=E5Kv65CIDmjD2W z2Fz|~UwdZVT_~GWu3j2puW zW$o$dxiDAT;;;^2bsJ5l`#17W#owMFXg^0!SD53)>bxQ`gKc!`@&hSj3f9 zCupi>kX-6qwf?dO7>biNIG7fd0v$F8y+U`m)o4O0@EJTYuw2w66t#m41A|knnnjAg| z9cdE;P8O9q3#MzCWrKG#7-4HImp>X!VPFg3^tn4}cRS`+Ylxnw`*jQW1Y*8!tzyPr1UKSmoRFHxUr=8#flSyiDRO zWg1mFKiyBAQ9=3kYN1E$7875~w@drT@F9=U4cG0D9cRp$uSv7OA3lu3#+ zdcrUK%hk#fIV$$rT>}AJu7;Ac3O&(8?M&%gPB(gqheuJYLvvNSEha;8m*rV}k-V$a z0Kd@~O-mRTNXORFa(mxc;&Ja))O7I!yE*z(po%9uT2gvxOAhv=5}i6-b9U5O@+ZrN&*P{Sz?UZl;bB8lwj~|${|K~m0p|k{n=^*6>pS8kf>mBrFH`;uVWyU7C*Ly zAN+n-qVNfA6I75rhPtXH0u71GiVpg)xq$Hj2Klyio#X0AO@S)V2CGs}f(f)usn2EP@%A$@-RdeylBTZIo2(Qc}{HHHMaFVAf1O)}v%hg7Qoxfre z5B{#)!MAh+rD)5F?8zsFYO39uD0tW-G$i_0*1YAw^MjRDMIv3~i2q49K!h+g-%W!q z-^1wdRaIC=Yz*2+sYLrGzosF@kFP+|PFN_ZTaAE|dzan}G zjQj3!|8Xd+W8)M2M{hhvEyvBiuoQZAXD^}$Nr$vdQ5Qr<13*6Hw2a8e$Xu14ZjbRu zNU|O6W%dH0{GRUyoYs^_5^ig4_5axo2A9!TD?mX4!U2auNL|vp1BWOY%Wb#RTIgP_ zRseqiuUHT+r)8H#gNcnDI#2_s6q>AMNIq`cg+MBocruY3VL$H=kroN<|MXfr7gH<5 zXE{e~2o~P%gmgvmKgxl0Baa(4oziCvR;$4X2i<^jX4Y@txEZCCil+Xi-ZoUCl*_Es zco2=l@SW4@N0wUZ4)Z?-T%U2gtp*hFTBi?#M&&{1{0|#kOw4-A=^x*@Z20r}tHlHU zEiQnY%N}C^ay1oAh?jgN6Gev}tGDmTNB(^7&3&~)C@*ln?Z^}fEp zKVNCPZgbh+k^JdE4)X)u2-El!w-1&8QsUo4C%KRob9e+|p9As9G(qih=lC_9T&;A$4#p=JnwXbsF}cVFXksU`1Q(R|#J*iHWn=Oa|{K4;Si8 zWRqx^zI3!^IlsTXk_fnyuYhRc(T5R8We;dCn{^4ux>R5Q9VTU{7V$0n`0%fo>t5Zw?HeB7>-<iE~^&_Ns0%JHi+kGvsa31v!uuqgbNTvJ_JDZz9 z(trU*Es+Zj5momUk7B4g;dcAzl9_tcn9svJ+j?%fYD~$o3HiOTAtx~q!}Eh?zIfP_ z8zx0Rd9~}|O!zi$w)n`nDiYNETWGnI!Mud@#x(Skfk?S#Ih>rW4%R&32mdklm$4{F+^;o z>~o3y?k=aas28xGEMcxVALzcknUCIvx+MebAkyQxnGBU>D#5!k8pGMf2X0vLBTK6 z_zOC1g+4mJgkmTo1JoGEbB)b=kvd3nP|ze5ldp^vmB%$WJ`gI&8KGL z9?j8Tu)OK-}T; zU;dkipgcVtk?&nm^$$q*`dx(xa-pZK&1mbjnLG1Uy20p_$m_p|9$B;yRzl+!ggiF; zVEM8g@F52P`=m;&I)vm)ES+sLduk)D@I{!@i)oh$U)Sc;yzvAbhaMx-cy%>}LGza) z`qumN<0AnMIF&Jj_>l+$>KB1if#6F5hVp-e!S%fQhi}6wG2i+g_nrnQ6kOU znp!8YcrIH6iUy|3z&UNyVY(GKg&yb3pxfVXPyQ=`Lo1VNGjVUpo5 zvh;4oiJm&tZ4CoA0|*A47+WD`QWxkLNysqqn;6W|1z3@m5US?*PM?-#0=&{ z6!42tr}R`zEm?H9G|O9u{?6Iu1%ro4K(1lFzP^ldgL8^ks8S60rX|S!;u|aYI@W?5_SzJ=QXjFzm0B6pd z?Ec8Y3ZMGQrn(xtJpD~1`42SFdTlXWMpsu?2D@3kG_UjMv1_`1U-%aruoJ0t4;bK! zN|;fpMMt#}vi_oW8=jH`4m(6ftJs;VK_1uSN|8>?UV1>>P#-rpH=Vyi+LUh#eiaP{ zP9FrHaLhjTDiJxz^hN!6b^#U}>yxfn7%L1`0gcfr6A-G*%MJP`2g8E?jsrHt1mDt6 zdtwA$3y6(I*oCWo+FkikJ5tJ7$mF} ze*=*L5-^`O#E@IO##sJ&-31FwXdx!f!KC^Q-HF)eyXeTsZ~NuDUX;fbqW6fiYsUVy zn1$K~sV?kLxPa?a4-pqbTBT3hN+UHSPk~3L@cD02d<~UaFD{nT55l+vtJ&hF0>qoz zUp;FJqC-KQrx4#_(j|daQZrV|tvsNi*eabH$1O0UQ^P=iGGk+J>HHoXzZ2`A!4LDR zX4DZ4aRmu*W^hH&N?4(qDFNy+7{`ruyW#jPhOZ5d5%05rIa(Glw&uaFtnL!>n5NRc zn8XkkW@QhH^`;}i5+As*Q9=T@)A$1uDXG&*D2s zB!Q|OkaUe|{7LEco4NTx*b^P=YQ$b!?;D0Fts0yd4*bcYq~CYhI(wJh676N$kSR!~ zsj&RiPh|MPpw>$!F%s5~ARin@!*iI;Eo1~>oyp{qag%}9G(OztW2U3>-UQnmy5JoP-l^z ziT?{P)>Hy86i1&74C!0vs$ll z#zAx}Rf;2gk)QZhT5v_nK}6JnQ@xA%i)~gPJar^QgK983A7g*e>5UatG&XKu=(U5G z>{}a&hIXZ)SB`F3Tp}&u@)jaqD&;kB{u(QnM7j*?Y)~3NEF(5>j@Lk&-9|ou;v(m`{a0Kx}OSm!6NGEkd zqBCLchcZh)r%)ql1k#01!93i>?c0K%&q7Yrq8hpfSm{Pb%w_F^00$at92wlFmuSR7 zm!^(kg`n|oN%oPg&ZFj3&Doch8F~8D zdY2!vNnt)aP!FtL75TP2NdH0bLRW7V*?oBr@ysPiN5j zlgbqK$YE{ss$2nn<*zn(8MR;RZsGqTCFFK9(5)TwfZP=tI_=2ivb{nx3@R9P>A}37 zzd&-#s0f^b7`1@wu{TCgPy$#p)+k_+WpG(?5!**Vj~?@dNSuX^IT!W_?Vm4c*KRI$;soJ^2oK#dcK6{(sl)sr zb6D{!>g|4WrM0s1w{%D&`u@H?MvJ`?AfuG%eBLwKRu@6D%OvmO&dWb#7hL`u6;ivw z9LMA>DafaG^K1 zw>a^(a>=c~aKcfq&@mh2j1Pi;MeWQ0(T+CVbg}*g-s==}Qd@AII4BGYOzahxd3!W^ zy`s4AU>>UxhKM7Mt-mA5D*w3f#Rk!t)B|8AIw#y*j)d^LWZE)Ig6~a#oJv-g3TffM z#^E!!QqppK4C80n388GIG1vCrg7>^0@>w*$J*-en+lHGUL zDD)Cj6|Woti;1kQX(}+dMN{ML<#AwppOTVtB3Hzr2A{da1(2x|u8ZVp&MUbx$1*Ud zA660Aq=qr<_oe{LB<)Y%oaI`(gI1#@U3o|bF{Sjr>!BKM8dT$Rsy?lNzCpiwlN8&Q$p7vnC)* z452_uhf}JP`=%pB*J(?4{ zD>9J+0u$;1al%=$-*jOd!Td`wuyB%FX~fW=57`gqNy2;VCe3ofZE4UxnGz#*vl%Is ztU?pYL`yMy^55#`?G>?7DcK`&K-t++XS6_mL0>P#qZv-)@5gg7F;SVs&npR8}F5bf5F{E@7z z1N(ozLnH6}-|jPcxnghzVM~F*zDm6*?(fzU3R+p$^ub;kUiuj`Z!LYcySzx!oJhHk z_g9t7;{&9~Q=yF@G$xbSIgjP3ink6ZXCK%7ZTR(nmXV6^fjtICRfdlw ztHA+x`tfI(Je(UQI^9|`g;E|*>+8wXQO7tYDEN?_REB$L2Iu37d44qpNGkSbVC&@= zIy%T-4{ZO!iWSZ12s5>0N8Fxf^_dXV+-Z;Qf#1InfGqF^Ex>kLltehPote|sZmQO4 zkeml;t$YtyK<=3r7vK?SUn;87zY&cV1<7;UL`Ubf1O_42kEuXb85!UP@A$!rw7$dT z=ax^oKChy=C;{(Z-z|V#_CDZgVu#%R#=DV~c=CvV@$KH6d-hX<*k25`F$V5K0;qrB zO_uri@ptHYQ+2&$2tR&bM+K7BjyO>E=L8pmS?zyfiwbM}r(r-<+Afy`slP8~#xeuS zZCaXIt(JVP-k<^qx};w}*eWER?873NP*rq#-||b1s`Q7SueDwxl5WB&IdNRxON7Hd z=3qE7L7k0*Cv#QOmRwD1OhX(7RsKR&H$|j0p}jkuCwCuHT$fIN@XysE;!&^I>2Q6# z?2lkELs<6EyZ`jv^!e`Cc{JdC)$esr|C8fcqm|G#96THS>x|FtXok-EM7}ig+f|Y1 z__|*=FdYw{%lp|O7h?R*udtYjz?#V%zhPu%ldVP~9`hm#rP9w#{QjbSPtOQ*hb|?X zIm6NRtKQt^C*9|3QtRzFZ_5ww^X#9BkdJS_Blz@_&Dhc5e8+6mboHK;w(GX>P3lrIj?b|Q_;%(LFab2bWABKy3r<9fA@f0H!zO?IxUCOCDW^Vxjy zisM3#;rA~Gho+4-&)WoczrjmmvCkd8(k0hKF+o~d=~KOnwVTdW{796PYd0rN6b}9r zlD&V|Z6m&YKr{X+Qs0Qj@`c5~iL~94iX6SVwWQ?FF_qiAYnH_>P09O%`-76(|} zjiu&S@BCo@Jo+2v0qhr8jXxC#lII%#*^>PTScz1n)d@LMG8Jb2*Mo6Nx}jFj1pAOTrxF+bbUI zlE#<;(qBpza7Q%Yw(VbQId^xw)oYzhpt@aKCCO7VG<@>&;V%U5j$XJ)gr7atE~;9B z3|8A~q$q{MBV_JQTSi3oCBL>g?>?IRQku~cNI;I8K8s1KQOas%nU55+|K!O0lJuNX zil-D30$aHf-D`%`dN=qwzd;&-knDPQs7YW-Syj^c5X(_XyZx(-nBhe(!bM0XsVa* zpvT0l6JK^&rx3qCJF&Dw7}D0xSiFwXD10x}X(E1@_giT`$lySmu0yP5et9cVWSUlf zx~Y!8AW{?LfYZif4uyq#5^MRWL}@<#e4|v5vN;UT&J| zeS0;E+~xI~tC5h8iKDgT3iV%S+ifF*e5C?WC=q zAOVMUDI#{yrA9g+mPCZRzg0JB6loM!O3S=WAr&f&+#KYhy^7f zo6y4cORhNE#ZR}_TmENRgYP6KUo%UTsG4ot+RcueN@>e@LM3`X1Q=)+Qkz~}UrU$3 zVTbtGFv~MV=?6aF8)ON;)=Gb}Pew$TU~-3KrM|qC{nBj9Z6nFYfo9md;4&~8V_bAV zA1q18IgF&`D3TZONY+)eQmGvz^6}4j^7QlYw2v`&Mz6)VZ74SDLu^IoIGbmi^CCJs zJ~HzR`lmL@sIe_#V(G=e3h&#j11sZsvRT=0aFs6&C8N0_LT3pC$)QB7yq_^09sA5q ziOylviNC5`?4Z*2SsR(6RF8_l!?FPA`!pdDgM zGD-`G3%(v08(PbbuXElou$}v#uensh8D4!EJX#MpHyow2d#Zy)Qp(|aHvP^io-5Ub z*}rk18C*Fn2Za*UDwSbKiOwJ1qfEoAg@1IliwUz5lG1nf`Ps1D6CB7}_TiO1;(MbD z)Qa6QcCXID(`_1tC7%ArxkwPVG8_$9T--UPv+#FXE^R47m+b+Rp-P!_>`J|s?;~AU~2^5u#cj3m*{?7QvL|6nwZE zy+yKM*4J*6Arz-lq&kY;HZ+p<7{%Rdyc+eGloel+^;SrRphW)AsUmzli-bhNCbu{# zHsjI6f%^s)ZXO%L$T(#Bfv*VCh`KHM{f+SLa3+JKdQX#W7L$~0BZLM;K?ZgVD%Rg+ zCj%cc2Cyrcjk;d?24t?zMP?+#ovYRzK?>G@6UULsaIoY$mfd8l0w2PQj#b;Yfbw** z=7M#?YHvFp4ExGzhc@I&UYwkh-oT*4xxG^+lPeqxBN1gR@<2{B#(9(m-R4p>lC6vX z{_jL@Ccwrlvc=R{uv4|Mvw?$XKF1ni)3Ut=b*hE(_y#t|^T+-f{@0U_&t0!Asm%87 z`xDo9B$`8ikB*u?4k@kh91@_5G^7V-U`%3@h9Nx$q3jB7P5S>S=c~-+P5aG%TC8x~ zr`z)7*<#U)fZ>5N?=Gc+xo4NqWd{GTPY#Kg>B*OX&KxgfA3{4Ms`h{ee0?LYQD=PT zcFaQL+^!<^GK~O#u*4x+SoqrlhIm?zkR$h~#TBYJT>XWeALbqTGrtF-^=v!5AW?&b z5(stcZ1pgW24!MhG5pr2KZ~5PKYo5i!tmGcny=2rOmt7 z{BvbkQ^o3=NZ@j1oF^dL77M0 z*}OWRhx8FPQmVTUu4)dB=lEYQ0s>_oo(ASND2fOyDqCP7Fv~(GmdpqJ4^w;~vliV; z8T!H9iAw%rU-NRt4&%AqanN2C;EfuBpqB16#ZOKrnht-7<@e@M!+f+_njRgr0WBBF zrNxor@bb7F%>lpD(Mxet*SE)ikLRmm`pS+af%^Jzu1GhPwNXa3l<2k}i1_5SV z;0o7mZyYhv%*s%!FyV2_cMXK#njzJnYtcRV+!PO^t8FeabI}nF^=>b(ud^k1eSf=v z_JX;x^jhsJj;?88C_>XB9*2YL)JB_57-7_nvzM?t6r*-nAV1o>NbQ+Q_nAiCEpuYW zzic5}x5Lw^ZuM`CLBN&d$gtPhhms?mN-hm?wV@jRsreMlZ+_vHTM+ioV{vg1TC$9p zEC#~Qp$AQBgI@_6%zo1}pZ$tT?-}y8D}Qd1`&=3NrGG=o@)T-Be)WUOdcHS%#&q-y z&>{d-Y^Oz8SXds3N=xo&>Y38@F(tl-hKH~H{Fv>R_$fnA4+Y*bI>rAjF;zZA`MDJh z1-}dq24~JlX)KH-o!2^Pzm2V+usu>7k$d@z3jWOA2$iJ7P}o@_{P%~1cWXk()5^n_ zJLvHzg}!#Hk55Ly#ZEkBFPS_Jq{6=R9Xb^(;%P@Ds)u zLnlyEJ)A9R#Sr=^s>EZz0+9%?l)Hsv`4*ex|$ z?N4NHB@$=RRAVft0FMDzVg-sTuOI{om0@1zX)umt6KI zK(xPB*?St)zT`=geYO`KbYl6^@lE6^HXL1! z-zBOQ3%KDTcTR<_F z^G7K>WK-6obgOG9A*92eX;FgJpxrK(;( z6Z>1-H*sGnY|m^AJjgf4Z z-fZNc51bl(t*|d|PX&}*x2vqU^0 z&QtkW5tZ_TG7P9LN7dkq>;9e&=3!WU~IpP)62a0MQC<=^e5JRAISfN z=7ZTQLQ>3QOER{pCHZ}&^}Ey%_$SnWNQ4*E%N0d8f_j0Ghl%78zIo}{w~UIf|3wnE zveAf+LNdzpkG(oug(;~B{h()34EYlu!uooSZqZvg_*{NhJxA&K49p5hGPKx zng)K2^5fynMqnl7bKU`3gZ8vWzy%8>5NTj)C}yFet5Gj&{e&E0%q4W?Z@4E#jfYr^ zq$$`#T~p(kJSHNiKobG$EF=^ZN|Y(}4Axt4Igm0V1ezN1#6onMtT&&bHLh-M)MT+- z^Wd_&>*T6|ddn7LD~a@`BW<-8O6}_?*WZGOX|5~j4~Z8w#;FIHn)(IdGfi7Et@=OR z)0axM8}?eAw$W)u#8Et>1&59$NKHAy_crEimDjB-=M9&A&f-ByGl^l zVk;1>9{LsC%E9kLMQX;J%@uXSr7w3x!)v2W3We6Mw>XgPHH=#{ZX)c65}Jy$|L9s$ zPHY$imOA|uv!SQKf5`$QWd~9#l|#Z6XE=THkQu9k1X3Bbf1DVj6pGPMo4iIwjTEPa z#u{Vk9gUqxP6kd1%yl3OwK70JiUdkMbCC?ATv}JCSA?4++?}l+^UfL*N9>*gT?>Vu z68fB)4y4(Mq{xb0aT%nfRDN2!z61v9=!obze{=+9R&5mKxf$51CNl)7LQ zq$BazI$2)}L`d#vIyKIWJ?wy@$ACfR_VE{mBwhkq6g+%h5M9%jIr+i~699WMWQv=2 zV96Z~>&SUOd`ZIlD40N|paUa)oza4FOZ$*;vI+AnC z%YQFCy$#}3P`!aBl2YN-XB_LT3?(=-FDfEmkpj;K3+aeVp%4lGPNsv_l(jwcC_W?= z@(w*%1q!PZNkdFO(w9S-hz4j{=W-WK4yN+sW-vKcR*$Fuq;G$jH7nHCrOW=@JrZZB z+3o*&1eoobFG4c#E#z389G-A**OMaS-?X%2>0|ogZsu2jJv|;KzCg%Xx9J-|903;vK$p^%0f~5!QRXDAAk*o#6Qzmi?$wZQ@?Ph}bZyNG~ z!bvH#F#9Oll#L{f#m8vy==1u@7isA^!IbLGJ@99L40ULtE!U8&(B|oId;y@ zhZa9>*F#C6?U7cAqDWSkP|;}j3iE%EoT-@}nyXBzZ*SN5?ui~~<0g8FUkkJ-HA4|e zg^MfkjMmK@|Gkm2@xGK7{b(Uf_*9Z(D6ZQBYY3CZgIJC5K>xq6@@WuWqh!A*DKmQ^ zKG|VB7OQHgq{%_!l?J8cK{q76u-B@X(%!fhpfj8yAI zeXztd5W1H)Dma)q1J}-6I|dW+NFhPM^8AQ97fQK-O$XDsP6-%lB(5`ZM=h%*%kRY&AUebn?`M2H3k;2h;|qs)t-m#sF}6YWLh zsk`Olre|+eom@K!Q+M)12#K-v9OCg&7HH|&LhG%m<5D&TU-fycPBgA1tfzFechx{l z9%n1OfkbP6nCB3iBqD&`1-zrYa)7?&>=CM?z=%PAxfbJ>ub)e1a~VW^BZ;`I8@+Fj z9u5`doE~#nN}A@Lq8DiAO^Q;oq6~}reXJ%V%d*#FOTIoC;5$F=Y1xj4q3U?GWEFfQ z8sOsM1mvW?5^xMgcZ^yP2)Nqm;XjK$_bZAyO#L zs)KNVPGkmJ+Jm{W&x*^qA0|fteFk*yuB@*cfXGV~P`IO)7!&dRY;!fA9%JrN!3wM2 z5(BwPjfTHbxE;?A2-+CuwbV0Oi|Lmp(2YPD+hQYaSe_-s`V}?B9Q?-Dl!bxQ@(2gi zrd~*jYI=O5CZa6tZmkM7%xu^XTO*Js^c>SCk7}eN_vdK1IOqH|<{Ex_`f|Wz0v*>> z(qA33*l`#%LNETeX8u^*xQTpqXi@+z`=CkT;`+crgm}bR(aMDS>+)<>U?3AdW8MFm z=?i<7M{%no36g2SK~Jm3Bm>1>dMhl&Ct@pRhy;{4wtH(H{m+Lo_08WS@E4a+pUz zBgfT$gW&sS{@H25789@>0DFD2#EIf&VQ@xVn=1?N zsmb}$xXoYW@hTg8^wCFN(_yh5Hb`d4E?b>TdyWllz4tMA6Dba9R=O447LQ{Qelz?+ z6orTz2x^L3T3Vi#i>y%03dld<#*s^ZnzRK{u1Esf8;hM@kMvc(`~85b^Q$rhWnDGf zC*++C`B`2(2ZHHU40f$o+QfR>^0tZ2)odarFh=0>i+dFwVh*a|i%PF>57dQ-VdGpp z42Yk3Oe`w+%Ft^$dh_C8&ER&1_|Lst^wYXT)*HUpEX(la8kJHU z+*T46Ei+eJR@Pc-mQG!ijd#hqT>9g&3YN`2zn!BJp<>uTi?+k>?y^!~1m-2*GSndK zoJ6onLiy}?EDks_MPodv$7*FFR;9k;@O*80Z&!8Xs@FYgLmX9_eAfr2Typ}0QKBmKm0iMnvyWY{1jA4Uwtll^ErcBWVY>i>juj+d)50eN=Qe^$=FN1c* z-(X(-8wv`yn<+C@7UcC{4k?}#PZ}}(9d;(Em_)&RQ&OFr{C#Dus~FKw3xV>w5@>Jx z_?|>wt`|{Pj{+2_Y3apjRVx{BSS)n4Cr;&IuCTvZ|Ln1GocOh>s=udwc*16N%j9D} znl+w?>~9_l0q?g)rmFL^OB(I5pdEc!Q)_0QvLR82(Ih7&?ljGnQC*g4)LJTD-KFRO zk)U4rHC1PNN#T|epPK9CMV3cO)Nh=75-f8syTs10Xv)nCBToQIIe6e3A!Xx6*c?-o z$WxXrD|Z=!Z{C)Jh1W6!oiQw$<=)B_J|LFOU=C^4B8iva_MJ!$R*J{6^nv>Mn!KyW zV4%#k4o9fkeI$m@b|r$?m>+3IomB;4eiR(m+#7WW+)%C0T{=4kKcUm=NY5Pp>OT5B zLBhq}4~{4OWN11%*u~A=fv2l66PA96;9zick%0osSAm29-}e~K_V+S`c`mpj6@wK3 zCj;}%n0;%wQs{$!c}+?#v10*e3U9t-Nua>P_>o)nRu;m!q~B_KW73ojRho8Y-`S-I zcA!U=%LN?&c2dNnQtqg1Jl}aih>k>v-!u+e>GVEhpu2~Ye^*buRl&0&3vsU3PK3tj zxiJtG6MI|caeaz}W-y+fr}&1>;It!q>{wWVXL&D8!Q$@Yz89C6Qd&lnc@n^KZlN4& zZC@Hmppod2)D^p!eH`Aiuw>esA)rHxz4Fa@r>==}--8Y6ihZR_f_Xl)f=8ex6dr90 z2X~Yf^c8}JDs?=$zGD$5P)t6Q{Jwvn8TVXA zi6sX?%K3xp!1v~q-4$z5h?tf3;;CMir~fMD7t6b~*jny(u3ru%~B5FvV-`)hW(@wmVx9wuvxxG;H*lq za8!mCwP4f1B9n^bJ$tOeB0n!7e8jWO?@IxFIG?CP6IV|jP5rF8y~d<|ussM)Hv*xd z(d%mmo1N3}3=TyXlcltwSg0^SIV*$BL7JJv$P8jtqfMYyNgiTNhVr$ifmnwr zrM1?)FZ$XX*YJkFOFW}2MAIjSm55^ly)??z#I|e42SLU3j)c`yOIE-17bN;vyoNPj}hEB2H#a ze3dyV(P0!~RchYEycz4dCO9#4=M2+$`}#l6VSPqtodlP5DJaQ)AhyHx9-@141V}@* zaRz9FqM4LW$V;#MiRNG3JW5+%|6Rw1VXVjGJP`h*YBIfkb5r+OZ3_%MZq-wT@hLK; z7FyV0EJ&LrcXrIl9tDRd2dIE&1&adOk?;J>K=%8FXuiwGtRFaAEC#p%WBR=@ zK;$R3R0uyNYWQvpFaRGig2E7%fdXn}k^{c3)M=BtTO?dSo}d!}HU4YIE2DXg-b(5! zN&xB8i_}WvFaP%Ym&a|*8cc?;-&F1ft~a+Im8jM_*r2G6PL~RZXieMhr%pWfv~{Fnvbo-Giqxl_ufw*ya7c}NJr;Z&q(!Y4cJ zoOM=L%$r}{4#>rOT;`^ZvwQmd&2G6^N-F81Q|<7&XHJU0Us&RbF&~W_d$wOIXE&Kx zV=&!oYPurVAbOdWGq&veOaa8vSpsa79*s_j>qvfel>R zc+RPpn%dL!_I#t2ihu)z<#52tdD@fzQHtQ>PQT21r(cQIo_tGASif|EHJlI7cKQ=} zq0w#VmgsS|M8NI67mqCYSfza@NVmmiQXl`|W0q+MvH}tuYMF)<3^tsCxa@oYV_4HA zAzS;WuVM+O+s0B)nzXr`vWc2C254(_M8#v-+~Xrt&#b%qWXTdv>CW?)NyCdjZOh`s z2Jo_lIW{-)3B8;nx;!sE`n~OqSR3kSw_8XlaOH*q;~+Q<8jEZ3bI%_ZZ!$6Ou6C;~ zF1GXU3+UR#<4$DGSFr-N+}B$lDd~@_LTeR_hMJC>psqGjj+*SZmAJ~)M>lgP7*Jz-)JDox#Rt3nO3vG@m!-hz{O@Om} z9~Sb!I2W35O&*Ig!|sJLcpdezc+95nFF5KfwsK6szEJ7J+L`QLh9 zj%9G`cSS#Au@ypqb)5HX7b-MG!qwz*IIaFBiotR`psaNv5zr97pcOhc%A|pqzLOj* zVU&LHdx1G5d$0)MyFiOVQpy%XLy9_3>h!~VFSNudh&TyL-QhF`b%J)w_0j91T$RSl+DeyKv=F$NBxC2uxQm?KeL%+l?k~CaBN}3PziahPFKR#-x=A z2@%2VExX_Hw{A!tgj~HXrsfK~t87Ajjw9~BhZOoDjg)r7r0Ko;81s7~yN2M~Pqlf* zA7f`l;J^^m$1R05*TX!_vrB2xb{`L|TY7#3q77M5lQU5fWecimFnQH(gZTgO*kg&HrKXN zI>yNOhWOk{K@)CdO^3A~e;w9?v87MebnW(56w+#7Ub${>Uhju0F?a9f0&YiUPs&uo zgafpxl$c+lMJI|GG-dw4uX(+~on5xL%PYB`Ma>@kX@0YSWe;dFBZ7aOG3h1Ro0O25 z=1>SG#lx7=eVLB%ByeOgpi2TtylZmKHWmy`Q&{UZUd&Mq?ILf*Dn(<9gsElvTwxEM zvwZ26UUwg!Smm27tQK&-1t@0dYsf@0|jhBe8fp zzWK#=Z%l4GZK@>J7`2p7{#E!#x5nqIoh9)Su{U4Od^Vcz0au(|%lo9^P4ub}JtMCh z;db#>2FI44kT(xD<`mlc{N{E9i@{l^6H)<(BW{-*sNPgsyCYfTbTNX+v^t3ef(QT1 z?BXuk_xWz7U`uR(}tS1lmc_-SypI_n7dxM^@i;r5Zx=v37B?+LzDoMu9Gm(kHw#u z-R<_W1)Hkv7+$E=a=GxNf2H|8wjnL^H%cL+IB`w?sf41O}!eG?e`8cXR)%%v&l7QNNsl-G)A$+|MRd6rvG)v1$ zW{aq>^wI#8B+io~RFODKwkq9AC{Gs;8B5-`glkM4T|aBqUtGtZ4u|3jc!G*I@i4mGT6@pco7L)AS}=II zu671%&IaW)k9U!J0?#&11@OC!5}_6^_BM-##-OYpjh?IFg1q*#1z%M*gE!!vu85Kp zR>5$aWa)C)%Qbo#PHxtUrMeY*&6V%JTo=pTE)bxonsuq(2rvNO5Kd_tKRH3_K#)nN&g_J^J>iD$Q5Qc&Kv{S4KJT zF2TPDC1t#|!759!n?cmJ%@&GHwz!M-0K&vd@PS!0A+p8ctlKk4@ZlBWSui#dhKiiT zZ8FAnGc{)?&d=#EMsqmOa4g;%4!OU_2dNX>~rvP_B0jSWcH#KKnK%q(I1DPt19w z!%U6xaa!j!_w)%w{PkFvSWUkXQ-X1UQFq>bs4(hO6dM=__w=T|jy%lW24Pvm$aAAZ zv)z<40}m%3?0?8;n<>uldNDms?^6HW2Yb1;jWXx~_ZjV>bNt{mc6~Ed3XzG=1@#)$ zbNce9_fxNgtDWSg|JoOPsErS863T3>gfwET@s6!Bc=94f^$6=^h4|%=aDvaQ$iz2~ zAI$F8+heQWYO{SV+_Jr1Bu6SU2I|6%{7F_?7`|Zp#s-{50+1c+<~ zTUpg*Y|wdeGx_&lruiu>LO_t3{`+45F2PoN-j8AH@$HXLgCq|%rnm@fuCW}?Ph-69 z#@o9LJ-EnP^c7PrQX)6cb^=aG914WUu`T*PnWG0JaVo)R+iO?n5IHeN8IE{#5o) zw!@^6<>!lRpEOdTM6U1tCot`oo+CTh{Y0!%`CkcGhZt$*$B6>Z5!SeU<#!LG+#26c zmy)R5tjEkYKGs=RXMOr5(ot5GT0Ok89kRYAPVh2nHu?XwcUDnRMe!bn0cDWxh9LzR zS{mt=0SO0?knWU{t^uSQN$HZ3?(R^A5a~u5Nol-?d)K{d-S_)=-}b|-nKOI;&yF+S z?;9IdqRX9!fCFy&XUtm)J1h|9_c!PMcOK-4nrw-lSDv0#_EUrhq4e~ArYCYeRU(G2 zQMmKOBqClntw=XHvW<(@M)I@{-%HMR?GcN}e@fDb2ZGSeb%xKgdp(IPjt(pG)X>2} z)beTnDR@-t3D|%sYPI7^^4*((lx6+^DYvK(w6f-qB&VK=iZq7IM2<<8Fs;D}*rj`|i=tq)#tG(<<69+D&Nji57Gus%sSO z1W4X-z?)^4<`!Pdj)PxL*MwS0^OoS=LTTfDpMnA-`RaQ3^+>E%hIo zGoz~G0hsW#<)`Vc#D5z5%C2V-EeoUbJb))+ohcOEKRj zeHd+V|7fA;2Q#H7BO7M2k%+>{Yv*xV&FrG{4a;*sH>G}i%b$-gi3YQFD_dz-;8{O7 zoZ`i$3t58dze);nmR0wcKLW%hRHz|3n9+Uotn4vwvM%>`EMt&ig;s|QZi3nMrod_F zC%sXXWk5*iSca^GzY_R@!!TOB$Z?P$2JF$ee{5*;-e5K>7%MVahhlYf3v86ur0z^O zo?EUTR#ZvmbA7Er0UEhT-VF6!j3oV2oOxY<_<2@hNZ-T8cI3VW4Xqkfj5ZWYOHpwV zMA7D;=M}YnD4vo0gzunNJ=<-K89-*TlpjAq^8zwD%*FfA5hO!@tvV(g1M_ z%1edQ+^BE3C)63Y@AmmeC|O9kL%4Sp=Ih0a^a%rjNBeX1!I46!s2EYJ*!9b;X;1xM z$chtG5wl^ozHPxq>KCB)3e36FokYBY=bZ6Kanx+)n1@}B8c_u(Z!OnF-Iz>j&IWK~ zF7g=_XawoTtb>J8lXJK_Qi#Hy4GxF%K^2GJCv{Lzd2?5-+iPSZAs zqC=y4_9A=w*|mcuFmta9X)BBVcRj8NG@@dzG4T=U+aKYCRFS{dZ;xE}-q=l*&E!}4 z<=)AeXuA3|dNW>FNrE@R%jAh}d?}hwVw@}&u8moJwPKEz@%O$bXQxmXyzF7I>ClP{ z7z891S-JWH&~tTd<*b;PgTh}tYogqPb;ph^{t7$dpXfQWa>F9`hLU4>&5CGj5qni% z$bQ_G!^h>mHtE)@$4f6xM;0S2Zi{l~7Ff$VGo*6be~UyW2Vi?)FpKB>HekJD%M#^8 zMD})0g<%$-GBmhO@QP&HNux0`qH7Get-oKR(x?ggyG&+p9d-Hj-yq@`(e#aNFLp~S zudU(T>X&&DTXajr(RJcWublIiPqCAhgfYE#Q<~#0oJMP03&mI_dhaD$R zqnE*9KGe%$zwhal9xSNcZWih5zr86Wa3Ww2|K4RcT7s(a=bg&QD6~Ab?m9`si)~56 zZKyXOPZK86rht5^Z~$9TH2+#6!?$F8vJJjD^e%l%BSEI`c|8dRahfw5o&V+F$WTgr zQ1GZ6WO#}v7@r|l??I_x$3C|7%9F{Cu+3fSV~q1+Tb35Lz(GN?bfyCp=vepO@Du+F zeVln@s^1{RAk<*W{T5aHFc!Ph%`EuP8qT6q{A zA@aH2O-6XeBAHx;__Hk$H!l<6Vrv-Oo)tfep~*ck{twgL$_&(CzZ%zcCjqXYpwZ82 zHpY^_*7!y;VX9NTI2JSWZxb_9@6M2Sue@w?%A#DIFx<)%)O*srvI!#$Ga z^{XntCE>}kOWiP`Vw6Q!?__sPFmBt-$Z5E*wCtjTLu%vRRbP z-s?NRw81kvQHbQtX>$&Vy9wvw=$G?7-Z;1UlGtR~_HJ0&@5#FNyCb^6p-j1ZI3){` zFZ6yw)JGdde`!{+od2#zK5fDsoB!Io_I2vO<#!3bnD|}z8{?t_>8>hj&rGBk@>>`GEKTcFn4EQF z+7GgHmJH5&&h_GUxkQfAq&GOBFNA=LF1;miL<=72P#)F}wu(#cD>8|kim88T$#A&9 z5bfLN+Ln&9@Md4F)XLhM$JwAcym#j^SL1?TqF{xTk4Kj`l%BcpyohbTM4$il^>vy& zJ`&14d}S!EaOPJ`QoAeHl*zK#rTv696=&h=dFF{w3#AYF83*h^_eQ@}I9g9ps3^7S z3T(7GBd)W@9GLL%4n`uqzP(P&!I0w%Csg-;h=Tth>A}mCAQ;3JFGV-$DG-V=ni#nl zg&5DNx9v6L@SLTkM!~`;25`Hj$*J{on9A|2yx zLYrJ)CNA&H07tDI#zxgRx#g;#Cod)*;i{~NRiPlksT30@x2#aH#FE^ zhI65!X4NFv=~It{-m~wBP>2w%vVm^>X9LsSG%gxuvpaHhJC$H-v_t^`mjVJwt3%ZK zL3J$d)g zzRiZR4`F}b#NJHuvbQxBJ5CmCD`a}aNTkYjeb-UyLtV>U-LLa0I@T7)Q}#i`8zlbi zSyg@8S|b0KEOWlU49?eW&{aVVy|gd<5?#piu_Y?#QoxZle)*~I!*)tJQwHyWct!ei zWMaGoDlah?munVf9ekASRcP4Chspd%e5EG;wbzO#Sgz&X#TR^-Ly+*%_&N zUHJ14PHjSILU2# z0jH%pSVTF{;T{cUc9T#h?rUwK8Y1lFpk2a;|r;I>ODheh1_GHAfn~JDf6I&*n z5lhuY2Ev5YtSI7~wY*%w0|>%|yySgk0f|@SAXA^)we-BJL5|S(*|9)}jgHSE|N6?! zLIMd0&$`?LzX;qj_9IfTy4%UiE>g+3-Y=C}-l|Y5cj6`1b@9;pS5A*(dV%M}|LT=m z3mwvQE4{2%QuJfKVwrf%{DIa+$?80txobqfaj0sk40Y+3?`Xt5N0Nqz58V#_v+6M_ z6%%prubh;zT5?{Qde2xOpIiyGW1U!O)(ijEoIz|3uNg(=Ha^i-Isd|$&BF;K6*o2F zL@G`g`z1Ee+nraGG)6J7^tRjENUHigK{)qv?9;>|0qG7gX8}`surcNE+MUu~>0j1| zAhwuxC{#}Q>qKh0d)`R>aIcIi{K4XsmGZ2fsX^Z@FtyD$sj7d1kVk%C@$dSu#;c%} zUP9j=>!OrO21Ax`jh-kcwt!l?9qa9Prk_DV55{I;749)-_r1bcoj%O``6_SeneqvE zXe1P+wvw$Z^UNVZR+9w-8cttt1+zk!pPcl3kwsT@YW7I2PPv=&1iFVU;$nk4cLm)` z#BE52GqXR#gEX^WvyjIx>1upx5LtqnK+b3)jc*(kCGei%-(q;x^J>D?;s{v7N;@6` z#TjyrN;ehCoUqA4v$MS$b7;WQfTO>>2L6amBQm{~+@$s-6@$$%=G_H;5I!X6s-mpg z);%>M$c1I5ttvBrdY9GPf%(^gGMvdSf2CGk^5%!z+Lai>tS7KnXB?zw?2`m-=1y_F z4iTS7*+M_6guh2Jzeo_brH4X3X_u*4^=T1hvF-Bw+va=}g~g`~%%ZEDjAPF?jC?f1 zur%)*=93;aI5)|_ejt^& zG^-=WaruvA3h#-(Lj(}k)x^fLJmEhJK~b}e9Vo>Mo6G$b&_VJrknI$n__GC%&p$tV z&EIP8X2r~AZfP3Lvhc!=;ajbb`3TFCWqIXT$C7{EVQ#``D^2V`X!1wBSRPwY27El< zGIm2N3j87`Dz@bX(v3IMl(!sBb+Rmmibd#xc5q>a!|Gs@*#_w-bn(z#I^tx$_vmUM z`P=O?GT@Iuwqtno36K3?q%c;e#*jD0WmO{de(8M{+OY>2YszT*ec&_~ZY|?I5hMX) z4MD3(c_>qnm_WFyPClB*IQgl5==KR{sfdNIotPWkoq@Me^7oTivNBVSafXi0;B{_= zbD6W$e9onF#WCIisCM<#1G!UMd#l1cA6SSIqcBzgQXUcW?EXK(3jXK;Jhogg45OQQ^UCDK?{nwChmP2gO)4l!8;2Q1s zGJlXgomLZO&#?MCVU;jEw#0`Mx2BWDwPS*RcJU5BO@+%lZ`k~Mv18nxA6b<2%x`&~dmDgP0i9Et zxbF>c@gZShRN~6VwAbn{0B{rFOq@^Oi_y@e%6_fRQ8>5c$XBC{NDP$V2L0D66>&55 zgEsQ$pR;SOne$GmU;K_+Wd2G!U)jTb>x9H*Ebb{mi)yY%pB;d|V9?=^bFZ%ThFM^- zoI{{c>_-5``hgw_Iccw2|>=mqB9ysBLdVf+LL`k3sDIk4#+X*pxCR??P_6A zqIH?s8jOQga_@tNtyu51YnG3+u8m_V1H@4 z6k=+rHOFM}{C0-sOi{r0QQ%u9;S0^YeB~|H4vw|Qc5e^Na6*ryaPT6hoNuz80?2Qm zr$;AU-O6Rlj<|J~t4#+1+Q%+SVu4)~zH#LOA6`{CiUI1=a2e%bud74_fn2V~m}T^3lW z&o5YvTvVielVw@fn8Jp<_ZX$+X4{M*w&h> zJVYFryiRx~v`2C>HiepYzs@56$X&zeVF+4&kCM-#o)N$6(kWKW|c?)xSjFCNm_=EVUL;#qM-h(Y_u zYL(^>WSj=4XMA)Yx=?Q})oebRQe!^af=EEZr}ZlJh0ztAy;_^4VUM4wR6F)%BQ)497)nbQ}mDM#iknr0ywtDtsZ{{%3bUdT>bz`I;}!w5}{{8jqfU9 ziNFe}lxR__v}^mTlK!h%P!d9sWeDElYK_%X9ga@B2Kcs4&WO1TeU28JPp0~br*YT7 zxR#t~#e5)MpzC>wTiP*kaO83p&@;mLdB+|yCH!frY>CFGC4$MhpZm3Bf@tvHX+I1J zMg;)z|Ku_H{A^e1Cjn2I&=-PnY-cGafq4s%j8R%F*V}3X8i{W{Pk#h0#NUDAmjTo+ znO^4UmHodH1!+t6{*PLxU{(28M<^t2tcTC901v(E3FeIpfD)r@{%mqK{Wcax=O+YA ztvul25G*n$L&+)tQ8Xj;EdflUBdOD6+T*>wXstdxIkz|aRcrA~61<*h7P2HPbj}Qn zMu5;Jfu8cF4TFqnNO^~D?G1`7jZ_>FdW%7gnG&EfL>cT0Cj#!0*kClL*p5jcwggxN z5#D22!3CA?J8>15e6X>x5s@qoP<>>e?l4IFPEkHU!RfJsq@MSmA~6kPB1t)V`jCKj z6@O6x5Lui;j(`vqKoj!51SW3MKkg^q)EU&gOd;tr&NiaCqj~HvskFz z%<6G85-2vye;oonY zv)sYZMgAzB^H3{Pa{WXDm z!EQDki^es8Nw6F9aVv=lY3+E1{uiSH@ck|@cdH0>BNU|?7; z+n5MwR`6{UB!O>N-Fo=zy}w)0$T6bliokH=M|slzwl>43>4kFgt#YAtb7(gD66bk`e(r=Y7CK&@C?1FsS4!|lrSz+C592Fq{*eqGov6q)@1Wtsz zGj)sH@{;*L(|X#eT!1m>(KhC4`_6%!F-IL}0`UfT1_6=-xW@m#Twv7xzjyyniedGi YyDqUP34`MC<6j6U$*Ia#N*e|J2fdZVBme*a literal 24393 zcmZ_018}85(?6PIW82Qgw#|(FJ)Q zr{^~_J>4f#QC<=O78e!-1O(xSl$a6-2xuPg;|L7~tRb6bVFvyKbykuT0jZhBKLHkC z9HcazK|tWp{(V3}GPALP1WT4G8ZH`gvOLE2whV?Q_C}@*9<~lZY7h`U4<6v7t*MJ4 zv4^dVoimRIKk0ucc!1CUsu@X%|3l(p%}=T!r${Vn?_^5M&cMdNL@EGFOiaw@WMamn zBqs6S=D-p^sfCM+0}mskySqDsJ1c{|lQ|WXmiBhU|M(gj*}J;%lal@u^uPc9r=KpCX8%W$o%4TR3;2SJ|Gr^l zW?*9c-@JiM`To`Nh}zpaIGH*-1NjTE^ZkeN|5f*Y}?^-h50zpelODgM8_6r3zgfp7hv*3AEH{eRc}m!6OD-;4iWiufNb|ECtHW&v0} z#{WHJ0Y9_Bo^i{PVxtpR&9G2FkbG&VLikxH*!XC!W7VzSfio5f~{ijJ<*>EX!EH8?ax zAmH;n7>RbaTz&WOK)~m5JzF43fRDeZQXm-;Py*DX8TBFknw^o+{*2&Pr^8#c8xQmSrJ1ZpGbs!y|OL`!?CvOjdQAv1v2G9QRRrF#1KKmwCy#hqRp`&Cw3fcpNx8)y*jsh z1L1I(bQX){i74KFr;EQG4~9LxycqPmH@CL7B9bG~N%A?tiaSmaa#B?JJa0{egoILP zRHt)z&2{D=iG`3&gge!$wJb)*^GV#uqkx=I30AYW*1LUqmsiwkba3$Tv+p7kuO6>< z1RZvTszzK-=7&a$Bk@V{$$&QC0zL^F9Uc;Lf3g78EdSTe%;4+xXadmH?>jg(H3dKQ z6D=fw2He7|0v0FoP}JIbtNo$Qcw~}nJpdP1#F>zg&_aupSc$U=4RpQIAm^ZzU_}FL zAOzjNQnj*F7vUgT#91ROSS9pH2dIu%(d2SuFdkANR%HUvhTMHXf=FyXYWnY$brs+Szc;AJ z{%X6+ey3YbyXjsiz5+W4sIahn%K7$H(I{IbBV=0;Xqx~Wz;`7;1pNuAPbSetuUqwGodn3uvh&S z+?N2(w$>3z_e;PQtbkqn3ac#wAVkQi)Hb*r4U8KlKf_XPO1<6X1OQ-)`fII}9ob3>9DM3R!v{-nWXWXRx=4;**wkL^)dsJK-`|#lfs^wL zhnAje2vVVH$NJ|im^ktpelsE*o2=YA4Hz41%HpW;4B8`PGM%PUNLPObJXZ|l+kn_8 zUfuT};K0651NH#|-rnAhQ5^X(1P@#eaI(@+j;E2W{{Z0fW-gRBL}kFxw|$YxT@wIB zF||e0WM26X2c|~{3}~EA!ZN6*#%7ttKafH<;wPz2}BrG_I(ccwJ3Y1&7a&gNdZ zm|Oi1V|d7jh={KC_eqaGcOATc*%=WMN8U&P>%V0(5^469ux2iz4FJWOafKNb5c^F* zN8zQT!|4r%FaGOG4ocXrl(aOevwtt)4^C#Jgh3B7U@hpw!vjWJkGO{i*9B|JG#uq? zZh{Ni2eRc<{&UyZ;8kK7`>M(;?7t}v5nN0jt*57FX=yo=%fC?HnGtPBsb*zGlg{Sc zv6wyeua8Qi@j0iGarh|aX^2Dubn674`(iPzEXY7$gao6iIgJ>nUFX85l+OnCsAfg{ zbTc75xmY5-P+i)J>o;o{3D95+pChX{CQFYd(vVU9E0ldA-|bbP)BN$nkHi)}QAi{gv>3p2l}WCtQ{?IvG`mvx<(y;IIO1 z`-TbHtQVEQj3yUocJjcX+ZCdupa9Nr&#iXXSUk>Vk0<#Yo}~_Vd;Hb&!$T8Cvk1^E zkpLG?7(~uP=*5zogAsExGp%NC1B1<0N5QXm)<}*C#1PwN`G4@g-Q(}CpfJPO^FCj{ z_3fKMk$#U~s%fxRiyUzB)2um;oG62Wf-y>Kl7AI?e>ai++ zWw#F>4kYp}jgr&s&H??1PFPh+rZG*`={G2BWJi8m0suH8|Ndyb~%rO@ZmpA$d$ z(Coi^9feY-Ahhn3b$jJ|5s@Jv$~U6+NHVNI&o|@9I$T!K#(nxh@G58 zk`)gQ3RFco#GlF$9D{_YZ#m&+QQeApiIYn&I3+nk16wBs+6`W^m?i~}QcaO? z`O#A3YpmGhZizM{-~j4NXB+eGS&;twd)Yv|Ssxs2O&LG(bDp5ZPhPFCRx6^`KkV}(NMh#v4~9ngHfC_J@kfM7l4_}KmH z2US)1>iNs{-oVSBp(qSHJG;|#Sq1Z8?fSbB)zsfx$<^GYx77T^ z;EfW*Jx9tRRv!U`tBsFI_bT*^l1QsYW?+3{|bD{WB@C=EQ)T1W4t9tq%8sDPegC zNXS&GpCVKqC-WumFLx_dn#!Y|ulJS~7F7LTMve%9hXz8}JTB$A8wc*yrfANvVs9dJ zWJz6eXqm>JpyvZ)bgfvP;U>l+)Ybnq&aPra^}NgQ<^_QX*Oh*kr}JGx)=%b8}@=XMT}d^0L~IN;GRnkO@!1lH?#lqHSq1I ztc>PQxdeanXYwA8yp0WAG4>$=8w+oSpsj=iq`YPrUGOODZKT>Y|0jTqw>j+$WwEjP z&(XV}?_lcZWLx}qN;CK7a^B#q2`FRDNTvVCQ27V!>>T@^_5Q3`AD+!{&~)(R(hX%u z4LF0i%gxb~GlIM$fW;(Ah5T_Cw)c+@j6Y=}-6*K`dUbjankrn{zR63rslmIIST;*w z&0e714}82m`TS?Ac?d7bs?)iQ=7J@_O-=xaCI)&hvf=HpI7dDN-VUpFqp;+L3#N8e zy3Cx7ALXZk*c@cN`{m{s#WZ#!ce#BOw}cE9^*E-Td6(o7rmaPOIUXKoN~nOr&c`$p z_fhRVTeBl_luGs5T;|a;@o!4!yK2og zh#ej@k{>KbYXR|^Glr9GQgJ@*B;dP(rZ*trY_M%8QW54de*1o7)S9QLF$Eg-uO2kP zr9~dT0s#cueQtWDdPnFD2(tQJUKk8y#QS@DKTD)Buzz?4tyXC@TF67f+x&<>P5UyQ z@Hc+rtaG<-u;y(8*oks6|Iw!AO;X~B2f`$C7ufYdc-!^U$@$QNO>+=W!w*5~JdfcX zGeb#@vzoO~1o^%&fb~kvqRO;e;0OGwUjV(%>UMxOG_QsXf?XdrIN=CjsmEEP%dbF) zfyf$j5;ErCGt;0lLqs&W5fQkz;HbX+k?5yaX>3$Dohi0RU{`vE#`An{O zG;phavC%RTx5#O~jnCx}uyV79re2Y3A)5+~*W!y|?m2n{@>e->;UIsCyCWTq$4bTq zIzd$!oXqK!=}a@;cb-?f(XeP

j3V*$mUR7c94nkJMrLh;svrT1m{ow!`TtDk7By z`MQ3Fs`-pY8?!%u&nTGQzYpxK=cXzH>&=fZ8a~Z@-oH6qXc^AXn;%O&9>o&X`yjm0 zM4j6;vO{NU6-%(;+g{ZK$elv1UbjzV&f_kaHad7@n6_B=q3@1?VXOa_z(`qnjRgH# z;l=qb0f&oGGc`AJ?i~e8&7?&0AC4NWR}g}qpk`!~vy+P=-v}`Jj(tZSZ4BtnR%xEA z5O8NCP3QfQ_oLPT5HGffgu))tleK66_0D#6T@Zv1ObLO@OKRqC?WzQQ2<4NOLITxt zXPro=pmgjI(|&b~vk9`WUj1duH-Kq_8f+0<4bmSy0`>@Al5aphJrvp?Qd(Aa*Pv1| zG?=0}9Pi5>P7rOlLvL_IGwwOzG4g0<(1iyOh+Pe&Z`>2s5jEqplWNC7k|{~v?l!f? ztW!JUwny6IOFMO4tq->Ggs#+7w99!CeglirrAMb^NNl6 zq_G`SE11=iXe%&*9Hk_i2B=${nNf%x0-5dt?%<#fW*?p{tI~N2BC%SZA>9rIXn1P? zm!g_|RT=C)@MW=P^y76q=72O<-bePiqCcdPKna$R&@$^>-FFS$YYaAo+vjgXHczgY zn+t4mrwSNa*g-+p4@ec$vNxp{fhfJcxl#Qb6d^`TQd#^c7{Ql`+L&i~GN*%r}t^oGWk zp|BW@frK}C7c`D$NkEAWV}|Cd+8HD^bfNshg6QZ^W+df|h0mvw;Z`VM zr9!|lr&eqN%S=zApmgS?g~MX7i5S2IOT1%nTFo-un(k6#vKTD112UQzrX^#&J8o#* zaGcqx;3vP6vpQW{4Nqj$|IL^xr=CpRhXSr$iyXOvOc+}Iup0GszQ!slrD?o1*w_dc zPbe6rj-?DzJHAAzV9ysVBPG)?ETBa_15uT1A9xY}bY81EZanzb1ZlgqHr68&9)%G# zoy+?B=SPMxeF%%G|J%n8k^KoqLFeQ)3ljgWCcKYkn;vE_lN8j=-0Se0n}bI}uCKq2 z55LGAyf01~%uFYy?JzXfhctYd-T*YFYGqD(jXz{8C(w&x7h>s{@x@hA&1T!td@l>D5)RM!hVce}(k* zAbgsIk zpU9tMdq8u*jCaVbn#Xo)o__x&+PhZoW4qf{LzZrPQBSokpw5*~CNC;A3(V2`X=vgv z=Z&Jgy!c4t?cI^&nTZShAQaEU5YIv#h!_wL}E-^%v`2~Yd;a{4+4M6jgW1VXr?W|lYV6nwMMXt*so};rfDXOY zh|C8aov&~6hF432S45=eXA}t0m<*d$I%~EqWc$B&a7zF)-<`_>78=ECphD|$n zOwG>YTHl8H1{C+>;6L*nKwoMVuw4|xB6R#-#%BpSaxN~s)l$6CN&SyfDlxXAwmffG z*iMD>?Ga}ND~7iIG+KtT{5Uz`Jjku>_Nu-m^0z-uRRep@P-Wp>@x z-)=HasyEfyozU9!)pIzTYud^3eE%K+o&3#rys3IO+`kWFT)}$ozD!Mc=GQNbEP)$h zirb~i6kT2s`HLPhBy4IGD9!HI-c0uVwGU1ssmZM8TBOih2sQV!^SwipG{LX&njC4K zv7r)0^$GAtUBf2~eGPkWk(y=)K7$hJY{g;n&o0fMwR*Olj&QKNdReAvH3q*6Jh?AT zxpE$U`Xpw<_pt%Y9%kR1@~_daVnhSM9=iAw!xn=h21k%~xW7Z%tlcb(0fO9KI@BNi zbF=w4&*8^+U*C5PeNy9)aAupFN>JtFO0sjpd$0g)?oWW#a9+C!K_)5+c%CrAhJ>z< zH=*DShlA1>$R{YqyHnZ~!5Zz3f)*^3yo4FbC~oxQoeVqG_nQKKUL$BT_@bxt1N{m6 zOpY40Lay!9wdx?B==OGQ^l4Sy{uYWfJ7v*$SKXc;D8l9Aqhj(tRy4=K+s;_4U-oWo zJD&mRyU`aN&MglWP1N=~aob(B@9*NH+4xSym;0uQ2N);BQ`N~s{5|)FFsNy{eB!tn z=_(YZW%{1C$&4{r%%f#G3HIDR_s{r^(PhY@tsh7C{-7n&ZdEFdODUB;MOQ4W9rmj% zUQeGm^qG2I@82cNpa$QfDDtoG&_jCi*vj98W38U_D;qJEQ+-$Apu^{woA+;1N=+vg zpB}ALhOLJ%80K3t;aHDIK-n?%Ta#%v;6W_k3@@G(KZEHvB)j!xEmOW z@gdFETsai7L;?j0quy|Lh#lPWvps`&N7A75WxCcfFRA z-mTGET+WccVi-(ls5lRv1Fu6S?VYn;aCuJY9;MyNZ5`isr4a08gXlUO<6~-U5)kxJh8Xu%sUijv!BEOFY=QCdHSfeh z&%lB|(vX`TrC76@bTmnxZ#F!xBfRUZJp5hnHhQs-$KSwn#L_r8bHBMkEC2a+xoNGm zjK}S+*Uk6z*O3QI;!o$PSBd~1;A=Mkm)_Smp*Ia}1~2VlI9tR+xqx$z+5`uk=hHe2 zhD;2baqD{gu|f-H=d{ink?wr<@k;*U>vR=y>4w>zN&`?TId+Fc6nvZ!j+B*38%!-R z{isRq?ymBiM!R$3A);rC+Iv7Tt!HQ*LGMFgsTIfNszUV|)SsGFho*YMe19ZB1OG|S z>8&Kce(yuVNODKdcKq%4?~>z00|lnk{o$5S+cMQkwW9^#!R^rfxYAH`icGfGeuzK3 z6M5W#A%2N;a-VanmEGr0?yFwzM7v8i%If*oJpY_T3uM@cEvTAa1DiCC#bLKT>?mN z{tA`h$$P2A-~&-eKcZD1=i1Bi?b1$0ii&5dpgh((q&b~@#vpQNN*D~3+{7o2F1PgK+EB8`PEI8;SEEH`L1Wf`yd zLstI zdk{^?f`FcF28L(8AY8pceXW+k__8YN#DXR;k5`LETTlJso46ocRAdI*`sH&6@0+=U zN`<_e&>O5u19rZ{Id(rTM_5{lQ86g{!J$3Mx*AC=Lw2Z%_F3AO2lV$37yr@rGfv4B zPkrJP&Y$ZU4K@Z~ajN}J;<_(jBBb`758M$-k-k?{%K1W|IUK1KloNtOVNg|nh1~~k zw2&Qw=vA}Yqf7(*vA(2ddzdOk1V#zsVqVoB(ZtPi=1WptE_>6$(Ya6v$QD2D-o>ka z-V)*YySG504U0IzD1?UiJG#FQ(A$gc-;v053%a2?3rX?#pinN>X63z&rUYJo3CEsl zP8AcnfX$OItaWh^B4+VT)Ou-9@ROQidc!V!#&!d?B^RjJZP8T(#8E@% zT^U&a8=T%W>??Su&ZySwmu>MVXb79{hQN!7w0vz-fA|@!SUeDA3tA7y0!)$*uUj#~ zBdA(^(UOr&hVNi`Nn@+YW^&+Yl=oJbri-suYR&MqsR!&w-M#g{LDgg(CS*82$>6<0 z&tV;e8yhGpMg!N8=dX+RYZHzvLwy#&@g=2w|iPIM=pTz?zC7?NO98P-I3pv z3V3`#cE4{ktl491%S;k%0_y1Sp^1O*s{()f5aGn3Sa+la$J4Kc7>q&VevbVbKIjeI3 z!7#hZ>8NaX-H#S_&2!yHPaPm$1PB~yagCT`s=1XcHjWKu@jUf3%&rqp1O5)H2Z{|` zffJcyYy4@ko-!Bl(Dl1zd%4gqSMiyiDeDK^YV}PaSlgyUUk^)Dfjy(c$<4r9-SqSx zzuVU39mBpzWn=RwUXump+tlsv_-Qw9G78WtWt;1rc>Xs-r5Y$jtaq_3^dV3nF%FdLsO z4~N6*i*5HusB&0njP}iRbZRsEe1$NQTF{&t;bx7hg4pL92ZEIu8Od<*>}P>1lfmvD z1yTj7LlxtddUrPVXTjJ@`n(ehU;mI`9xeJJ5QMoP)E|HO$_ZY9X1SRb+6o>Y_SYeK zW9iPC(|qzBbIX)f7iVX2;CTYg)4xO9(BYYV^HE z6RkkH23(JiO?R06ts%2AAa*lTw0(~?f)i2;@80u$^aX6wJu~`)_rlj0`*NR(40>gy75m7_DpfRb>rGgw zu=_BjUR5Xk^E6a=8lMBVq_~+&^`SJR>5FL&F38_YD~#3;jfdnO7YtT)L6U+vRPU*R zVg7mHY5Kn*hN*nG0l><0Ro`2-+YyG`+y`@xba`&G@7FpnJ}Wt8c$(~0hVgDMvjz&% z&*5Z^+9``&lO4OkCsI48tMQc{4=?vMoa@VOlJ zWpVDJp(3+DAjT&EE@;(+m^)dYJC4-2Fgrc0-g$%X5i(w)XC~j=jRs!7lDf(h?4E$D z{3kKwBf?NTf0ZmPqrNiP7Lz}eX1eq(YRhkYI)Tjhei6v!erE2_fHS=|RC`yRPhwmp z$VwPRZQ^ynCgRbwMwJ0YblUk992SX}M+Z0+=k?FtmAo;2& z^Axqx=;wJv!BI3uF!SD9ui3T5fo9MV9L%%m@xH_B99o-`(^fwY1b1f>mJ&Q%Tz2Qg zYj_@m&Nbk4WX8p5Y3Snenzy5$Ou&9s$ni6!C7pv?u{4A!$x94;t`w#l{v5!S7pQ<& z&spgoYX#8lpG{62BbF3Gra@KfqgbpBh9RpR-||hYRBnZnpE0_CXsvklUCzHi{Vdg=`2S7rJ> zed@NsL%7<I$S*2xKw8=`zac$Ax3R(- z?4}kHgNaC2kU5$T^Dah=IFU^^PNyv;9ZBaEgFfArzMs(>6$$7k=(CZrFOND7F&Y{X zlEbslq|zRExIfdEZ>zO^3qn>wam??^>GOo>{0eQ44I_roG7UJi;`oYw$YbM&*cD}i zL67(6I={tT&Tmk4FmFiv(GfGK5xs<=zyB7DQeo#@)4xC5`zz)H{!VB0J2!!m1i3>p zEVSJ&ZT6=@H`7g4V(cIn-;lOp(twYVFNUgx3J1bLe-aYj3&&Ta?5Ti5Y56p8ZnlzZ zv>U*F^N#)04Ao`u@ZQrv{DObRQ>9XqB`#qLY(h!L%C|6vTp#KowJ$@J?gtO&M{=Ow zch;jQyS{=K2w}1+Sg}A?1~2HGqL^f6g=lqX?AwHm6Yqxt*n0N(Emin6dhdjHJoPoQ zHF=_ytH4d?VyVy%kp#Zcs*T)wg;1rArG!Cz#5-d#;i*oJTojDHfg!z|wyS z!F;(M9$zLT!L8n_fe!3BHT=PYmI}9SSzV;xn>tA61l$R2wJ+>{G}l z-scy|VaR$LHtiZG2>A|(X~bjxgxC?``rf~iX^+mW!IT4F9Q^pRv)dG$_fe|11w3W( zIc&&tVH_F3U)p$>1N{rmN0n`%TQFdaK6Ql&6(Y=^wt?ap%E%;WFy|vdHBjMlKDo?4 zBYYlk&nnVG7%mrc^K$ax-F9_f9iB&Zy7zL0U za}@@kzw5la9Uyx^#3E67^Sc<^jA1C0?qtk%Ro4|?NEA@rMC&$4trAvZaF zqzqq=#yArbL*nuJK|jM_7Y45XFkaX1e@vV1D;6WUQ_7mvsg3eRQwh}jXbrtcFzR>1 z!@z)gGg-Txuc9~($W74%Xe4YHvoLXuoGBncjMB;ku#}wq5Q3lLK z0ZhlGHm}~DOxIX_SnIVKKlMf?Hz;CqslGj-7iomw0>fY=hj@*Vk(o&UxxS%GrGE6OjL#rPW6*{r9Zn6CcZLE3kcK(k&z9-8OAVWs zf0O)k?ho#Ou5QUT)~Vp_8fZH>?zw>(Zcq!#ER|xWkbuVo0dLZZda3kCSF;0R^LCLy z`6h|n5FkknTc|^9f^KwhhoZS0DRBRD8%8BN5Ru<19LuuY-YuWq--2jvR{+~672>z> zn*oCY99|eGjpT1+7lR9VU&eCr8`|PG6lCI^s1QKcOoNf(?^+QB7|;O}rof)8Jz1RS z{sl?8Ru&nbfw?yu%(rV=W0Yo0o-_vC{n(TenKZk3v7=IJRu`y1QI8J#`_|rGA=XB! zZHoRlK%X$ax9Ya>#-aAd;6ydMGlk`r?HYcYMWtda!rNwM_z$9OLrQUMz3x(O0^#J_ zs9>z;q?Y~teTUWjd6wp1nqAE{m2ib(y2h59+g$CkAptHW+9G@#0F5|Y^p>gxfNV$+ z^cbla6cO>t+FFL7(_)Zp5R{=+%o3-7N+iTfoyo$qc)C4#Twu$|d8$usIr&SX-Nt;1 z9bOs$oPmS-u5vQy5apIbCOBSWgwiofOx{UWQv3k@U-=jq>Z4|<}eYpTwKeiGa2QG~>n0Au-%;wxC} z-^R6p5s0MzJ~WT3z@Q3kPZI4>*U2d1Uz2U5^QNTW-)XkuE>aGbiV+$(J^A3#Vq>>W zNO(h1yrVbJ(7i#$QI8=6pym;LXat%H8i}{0+*yyxWpvw{5{Pn5q~XOS@hY|Izu{qT zGTy_z+qN_85i_W(+v!0jT|S(x$gL!lQ;&H5l^rnK>j= zx4$-5XlvrbOesD{;(}TEyuH0yAZ^qkokBC@T`q)-=X4#k`dac$aE5RUF3Dv9lszX} z`e*x)#F*jr;}QphR(n?eLpZem_p*qzpNJpZvdL?Em9h)KrNhmVOHkhT8ia7VO`q+a9fKEM8<`%>DKBw<}EP zEOYdgO#DP`Ru7To@n&rvVs6+Tq&Lu;i$(KYcLYejfPKRbQLX%LgEwA4yk(W>!ip~Q z@IdcFk?M7P??WTS=?rP$@zj*NiCe1$p&#add$u)KmE8TcF*V;(TI!{#fiXGL?t%`h zZ&H7f>0mDhb5SQPZDJlj*74|PY<*gUaZ4k;z!Ip#{`qrDhcF z6SDUCS@A$evXJDHgGF#tod+ON00V@qUa^u2T_*DKqECkPTc~;RKN+Ex*jT`zI9UC{ zseOc>K~g2`0!EL7{@E5m)lc>9i6kgzmoz(QP&iTm4ehZ3mUhgzsoyR+%~r@Pv7}oj zk+M}HvSLtg8`^o;c?>pP);)erF^DM+9%!U=OBj8IuOVXPbtRqJ<*t>@-v~mId^j-L zKVvfPguOb^CrU}2OTeA~iVmKD5hv=FmrcdruQDTj3O_$8{XnASnhpjN z3d*t#tXzKI;hT@Qt34D{kEp?*of#NYDnnH8)OWfa?RDY^)he@J6cHAL;Kj6CJom;v zKIZ9ib%!L?*27=&44;(*uWWB+fE}zx;X=&?U;s&XOmvcG)Hsj#_xo<$PanxR6NSl{ zT;JqR4l=6QGK)EpqmQV<?+TiEJ8;~S!(=4R zTd#NtZ=CkSID0Z6L$(7uPUB?V$eH;ezflruT%6qua|99wp00>Whvz2mjg7Y7dH%>s zNSMjWzT4kT7d{+MjqhKbEY$H)GU|QT?|wJg4LB{3B!ceD;lnskY!&)=&3!oYj(uy5 z3lCS$;_wTGS`reWBI^>XVGJxX?&EQl&Kn1V{QzFlo4UxpZ?xc)$(x4LaC@?pCWkAh z&_Hk5U8foBJ>5D~G-UxwZA-!d7T(`=PasF%;!_V01&dlKkMV;KMvBgs^}`r@W~p2P zj{_@j&_t#MCm2?sHO_bMm6@No(+vdtr>S+`9=B|_+-#-CH|C5VQ)*kGxs!T$z({9u zO)HJTng$2&t2gXK+q-;H>fLvA523)5>GqBMd4T@ZI6_~=!jOXzV{6GxPaHhF8TAks z>&?}T?d{9g2XlYM?$zo_e+`ZEW!t?z+Yj*Zc^RrV)Z{?iw@U&}%vMCAHvN)%UNOQ( zT>&WnK-K8uO{jrc#=Z|;n+YfGHJF_VwEH|w%qPV1CpfI*$q?I15<>eAwJ4(PZV1@8Y*Z(F(%~ zTmbv8E{fQ;h`n^nT;4A(e%-O-TU}l+M(y>vX@;G>;Ez93wz2(gZZBDE)MQee?~1`- zzT4ilDs_dSqVZ0GT4E7=t~49~2rSlm*$h(3cqA~v6q>di9_G=ZpcW|=U|iM6EC04&dWc#V*>TTCil^j=4*94$CU>T`KbW-pDq85@LWs1YJ>YPqz(}CBFoTiWogkT6QwH$w^jjc>fXTX7_eVC`^sbY>q5CqKKhhh` z;dgT66r}_L_)9IEEufegu2yMU^{t{i0;lb=}IJUEl2_V>TPywqlVa zyISnzRvs_L{dtd8x4W_nt#;EuVCmGpWAuR22m25|70enI3klMl?{&n;lKH^Y=v)P0r>hX_GN}UsFp%%E^#b9Nl>i8-Q z?IV}r!ab4!+0&~`h3b)rT>93#R4A@#(G93EXhX08p|xjBi^AASSpJep$QU#8nPf4& z+sU7fF;dNZ84|H@Z)mZ2gbcMm4jVeYKeydl$HM$!LH_qTE(b;N2o~$O@We3XB1r-1 zRaN7!n$Kl={L@_q7!|u!GV**o1rvdr3st~_Qv_=^?&;Ztra1FnyZ4o<%RSz^UBQi^ zRTZtKavBtv1}OUdCIK&36+S-fzW4R((cqZA6A2*^pGTL;3Asi{>;ri~?!=j$qEp+8 zH#3xvih=!J+BWf)$Iw)Mx4rflL%1V4ET)nE6mtZ+>M|VYT-TY`gNQA;#_wmQKrTp! zZS*Gb*cX6-a7Qlx`%BZ;miOiTg>q=wcG*rd@R{MI!`JA&_*$FDHG$eiWZ*di#!?r} zqRvp1vHbhH2&#UURuihCC*a;JTfeG?&r+Sxi{TtEo`|yd6pSn~U@3zYsqVY_qOJT< zg@3Jv=hBjD^Vj$2QWAZzzir5`v+E{s$(ygZ3{q@9*ao&Btbu`>=z3qYdkAl)^_{PP z$SS(B<^@wTa2ug^&^d*M>dhJ(elbfQUQw+aJiCIbTLJ7G5Z4HltJwo0`km$&3Wl&F`i%q-M z(S-`z&B~!aYbKtE;P2&SK$*xym=8=Q+YKa1x~7OI(Y3Ni0s`zkxBDeZ@@B}Oi)h`e zFOMh)gAOnrsHnCLT-Qvpm!nv_>sXdho;FDiZ)$toMx%5Dm$gr+B%9L53*g-%tyncn ztd`95jmL!x781Oirf2%ae&JZ#e!KWKdaaEs^L}Vg>xxnJ6zC8sc7U@cK1ba{VPqn~ zU@_EN?YPRdP87N7qMUE2tZQEFP{)v!C)o|aI4(B$e2%n`%4l=eQmr{hyo2V@0%0yt zhNJ+LYIb==_N3|feDGlWiYr^r52OsUe4H^9IMVU;Y4z;NRexru2>JGMYko{+>Mp`h z!Xm@Ikzf~ad)9Va4TrLl4#-;@L>8S3FhrL|`@EGSG5_pb4oBQ_XMD+WU)xd0iCb$d zcpC_Z`36NBX$rjCJH1UQljavuVU9~F5_xn}t!x#ofHn1CFV<{eS;wOaX*ZL(Khu7g zEhwMTwu95?OM|Gk-$99^2iJHTi6PYUOL*v53Y~!Nx2Y8eW~21}DfpbQ)i2P4eE@_}jXhWj!yXkXB(F(C__M+t1MHb|akmz=JyG zp*Aqstsf4z)C9}??YDB~X3v*bAJLzcs!MpzW6)qXv+fUR3mo~mgqS)cfya9n6qY4Q z4x3JxNbZLYiiJ`R4C(3Dy+zI)0^O#xONWv8tFtMi!6B01PlO8|}am?RH`kqMIwO8us$6b(3H-!9lfu*?|D;SUDWB z@kcYzG-Q*12$2^yNA_llpKmhPoVh`)B7HgNE4R}{1;qZ~H28RlpHy!~|Ajq6fn?~g zoF9$Mgb#Y2`t<_Go$)gX=aFl{)DqlAp&i4`K(P9-yQX!T&~)ODmKSun@0lcsOgoyO z!C0h@0*#8Xad*=v!;b=}X38xlEB3QGq=rdb=Watz#vg{?e$PhYAq_~ zs||!X+zFg%`axfJ5#OxR=Mu?-I6{QN=*HnlTM<4c!@THUYu11DXm9tewnWPPC7=0x z<>c@X*(Nm_ZFBxJ4GbIX`;ka0zTRw03z(=zWYp&?H`G&QbgfQxa+{Y0%@fGRJceQ;a!m&@Izr8Ff zhfb#fuu^?#ZpP!Z6I#6))i`D}!oMj*n3{Qx7Rxe~5O%F>rr!#fNb3RMK5+Q}m>opy zA4%W7gD>T0Jd7=rDeQFar9!}=Q|%rc7%w-AuVIUICZV-365wu5WpUCEA8cjWtYdqg zZH2S=P@kaUfN?NxonNeVB2-13+gwW`tv=^Y)}IRHK7CtI5JkWTwIf0P~bO+8ho0SXo$VbUP>xvls)@!28S`Fr0R~F_X!94051G?Ew(%=TghxqCb#^z~^J_ zyMghrmDNR{PZ6u0s9()9+WR@j<~|0UU>gS_n+wS|I0 zDfTOW*J98_mrm!%m;l2@LrcocjG_Q$B8KSKTXF+vPBnZF4nl!heup>4n(6k+cSyBY z@f{85Ytlri8>RFtuEAnE$Up}ab-sEz)0;6ZE%gGl9=~}JPd%qu&TV#r?sLIy!_U6l z01VEXu#ca;Gm}4LmwbJFUnB~;GsqCn(nME3pligzg+6>)oyQRaY>vB?o&P1U&{Iki z=4J;pq~&zETv6by3}jO*w%G3oc-{YfzBvE}8h5yy%-{W)>BE-|>9^;itaTXy=ERtp zj}Y`nYj7Kk%Mk0hH6^tXK0}(WY-RVwCfJ}?<+3=Y*pjO=XsQgL?;Ku{SHiW^xXn?r zD@ifsUARs@*VJ9v)o%(Bx!vK=1}Im=w=k zf{0)&tR>OAWy!AJ?*$9D>Bvi#-Z~oS8pM!Vvh2?ttjEJ@9WfWt#1Uj2^b7dI-x%Ks zPfkfW0?c!jkT9B7?ew_mgs|a;Vs*z(E2swr_xt)CAkpS@4=+YJp9&g?M4m<4Z{yr@^unUTzg}JM=FXS?)loo=v+x9>ev|c$d#`Sd;0D>^@h8 zM0?lMwzr1J{9w5)e|Up7FpD=DlfI&K1M)|Uw)*NHB7kfLhi)yzn^jz9*xy91e|a`T zt-D+|J?;(taXNG9-ZmcEkFO<|OD?p9((Qd@dCi8@p{UwiVBfyUq$sr^mwvTx+?Cn5 zn5TZvTJ?IXd&&CyEKnj!d@Bpg14B?0@?}5=_y2{toX#vBN&f!LY8QkEb0Z=mZ@9+ui(`!~2b&=lj%gN`AbT z*Rim-ioKo|%3-Vj-fsfh^^=%aesAoXjgf!pB)=S1)S9XDiJgYUfRpy4vGi~8d|-Ao z5K{NuO{p;#HQV=GDlce{(2-l(JbA^flA9TT5OKn#_ieZyctSqIU!Cm6>0@Da{a(=7 zTIeDwhK&6q_Q1ThlO5)`zjU)uJX)t?*(J%&o0BEmenP)XqMjsA3YkLt6bPkTl6aJ{ za8Mn#y14{8`tH(E?BFMmWgM-|i5AM>k`ruhrhN$&*vHCH1N|jTk3+=6COZ|7!ZxQw zS{%|tV*jR1R~V!-rq9NU^;RA*T>ZZAR}pH@&yYqp0>S_+v^S;AlTfR|flkD4-y2_zqFfwbt_uQl0{Mr8e` zwFYx&+mK${*4@5I%ImE9gL6pO z^M&}dh>SL>YuBx!+u5}t_(=i9)z0PBQM_p~KEO6_*@k$ViP~p9?PBLUW$*)+td^-d zFgLs}7z&6-UDhiMRp7!2!r3<>u~^Q}aEaj-m;}7RD-~2AAOxWQeha|uU(P2m+5Cn# z#D%rL#{L^GdBN%GWPolt@XqC+8bKOrXrbt^SUdbZSFCX7H=X{MOT6-!EH?V5g)oe; zk+E*yqeT0?Br!ex-GF>XO8WcGPD24YJT?=EjNt#%&RPCN8FYOdkXS@IWs#1hdud^p z6iGp(M3k16PM1bv5d*!SnKdz1Lnlb9Uy;nRCwX zTVh<3CgUZ5b6CKF&WD90z~YPxKIaL^eZPg>snl+xxPZ}MOcVD%6Y(lhq!fesF7plX zl%I}T(cu(wrz*}_ULmKw%t*jl-}9nYOG%fph78B@Xmj)wv9169_Gbi^InE(FDKJ8QyJcOVS zb+E-YNDdHNM?-{KAwt-8K$W{ZCNIcm%@ABZ6AKUdIxDz%beIeZwOlm@3q>8O>2XIXO8euQ)Ki#AL{Lk#Omo zcj*z*0|6R&+peSZUx@Cd`2&Sk-U(XbsFx0%7oqMFA>Et;0wj#Nl9fOAx5|W^@WwN`Z*Ups2OT)wzG=tZ!8MkAwRMv9%SEcLX(X@`&;x#ku+95Auov9v3 zT&WW05OnMGVt~T-Rkye*SFJax;(WmMc_4#HNxmwqAczyzv8alpl~3L$2lL?tLOPXN z8^W>%F2=?Aw^B_%`I96f@HqrIS%c=jCKEp(;<(KxfOh-8sp*x#cV= zo`)}eQ3fM4#^d@UlUeqRi4IfndSn_`~C&l)<}Qr zmb@7pr!7JHzR!U%wIi%&ZG|c@y+4?NtTZ{AKlz0$pVrv^cMj&lX{p=AB60U*XoSmN zpRLgjij%j{5>0QQzda1RpB{UObmO<;;M7bDja35D3H_6|hbiHRnIqB*>v{&x`r1K} zA+aoc`&f5~aJ)@?@-Br7@ z=j-{Tt_Q_C;!`D$yD|K$?oA*n1|H$*RAYi*nUhZGhaV(ut-6HuKp2t zMt)av2mFLv<&t5zuJ_BA6a2K}ROq?C?S35Eb&qUbPWe3@dY7JejR%$F5SpdDli_^t zsS%_Anz-p#?4`&_}~BLCn6K8HO+=c&BUpKT!?78Vu=q_oObktpn5{l}!V z>LiKUeZP~PC(>ekyiuM2DG<@B-e{2M>5~+4$ciQ=O^${$s5W_*jVeP?pAh7K=9Ir{ zMvNlL;Y!!vvx8rZBEx{}t=NaaXK_isl8KAw{uF1dnoH!}e9Z@Q+#mgQ65dHp)n1S@ zkU;nvZ3#NEtMeLcbu>ag3m2rh#ShQUwO8v0G{Xn)X=n~IEO5;A3SIpeh`BRZQ>l*e z*njJx`p*plyPH_&Tov-`Y-3oqd?HtE6q7<_rWgq-$a`F@JUq8`J&EOL7=`869W5RG z`b@6=dci%aPx4=8d+cLTX4h&w{E~5qNQH;ro8kU_?nMNojhtaY_+7dC^vI0d^x921 zpF>=~^&0+?JO(sdYE`nwCYNC4wcyFIOLBa6Uid28KI-jSlvHhofe5T+*c^JDY$3AE zd(uWHx~v%T0{`u?Z7_HZ=78XZauLp(b1g z#rc#*L)fOx)XUJiU0eD|-r(XoUE;`s!?x-xnE8hf4V@0+38YStw9qkaI$Xuk&(*GK5I$^!TFXSvv(7mP#sj!0LiUi%yQ^0@qOMr~&L zm!{-6dNX*0(OK1%T(v11M%BGj@mz*61Il^jZ22{3y`)^CTnu8cbtRLFTAJTS`s>&_QfaZz0|%rLr-FJZixVw+?Vq#JzksMBh${=%(|e7Vw-ra z5sV?OXKZYO`xqp?85I|&t*(9>%`j0L%($i!@Qo}lqowq+2fxlzFpD<g8w3RAnrC6y!hD%?OruB)2%L zmzEGHM+EJj>1BUCluK&2NLOdR({_^NAE!NG*Y9hEmoFv*5rbJrn1Y}d%!jHk)vm?8 zC9ZzC;>=Q!GBAnQcF097bGMzwcOP@B=Mn$;Z)CU-ZY6I~U^=(9C7PLLn~yju3^g^+ z*X1=96R(3)(IO`&XJKJ+Oc>0rGb&QyMtt#t42UMF1xaF@N)-hS!Xm|!g&>?oAI$&QT!_Ee9D~q zvulf`10vuG=Z!oYV}p0n)>wXA;1T?VHNCH78)9Xue z6-E=LMA`C4_7Hc81$&qPQ$3yFD!mznL~re42?;EcU0?^vFHEoUxe9&%X4&?L}I9EynwfUvEGTCW6}1 z;EQju0jwKFOiD@$3WWkn=+wc_Cubit!{oQt*sDH@)Fl87X-D%!p1>)*qw&=r)+swx zo7!{O=aa9jSnEtw-unhXb_K{di;Ig<89K&5XgH|9QPDal>#;q)ylhT)*S)mZ53icy zXyUJ5W1NkafT?3f5RODVv~sE8*LZOyJ~BV%H#eL$+G*Z%j z{0IaVLQ}80$}(*_W_#aHWJbBjbT8Q#;N7(s5OKcULH^=WJP;pxk(oARXst6Yq!h;& zjbe!qnFg)6bKJN(;R0MOwuJPyfmg><-V!cmHfSCRL90nY0eP>yWViJh3#kw4yVh5$ zyOf?^?`CUm@QK1^T~w?F&{!o+TmM|S&6*{ZNIP{v6dl`yPw*1sbGrZh(!h1uhlGZg zp!{+69Ti2bLymItJ;UjE20V5*H+R8wxbb4T^_~@UT`;@Rem23lfdUg_>$%gZLYR=^N|oaM#t&`g+MPevyqBENM($s+y&@wVv}?XNm91 zz(+svCl$#Q6@Nvc?~}dwN4WmXSX|N7wPH4VU=t$50Mc`J!$c(T(!2gXNC$NI1e_mj zO$QcGK26LLw!q&G#@R%rt#3odPJinfH1a(W;SY=5>O=n;HGO!=vHGkkes4z9Xc;Rd zoc8yG%w#DZzVhY%ELB7vd0Op5YE^Dh2FY@@cvhgx&JeXhpI7Z1P+6IKcgec9qOc1- zi@J=)r;a%B2>*IkgG^QR+|oekQ-A+1LV#!G(R2S#i_S+En_mdw+haW zb`y!44Xg+vQ`r{FCN0Nl{HX>B>o=QBSs?XG@N$Ev;|64Fipo>9!`2m~Eh6{cuhUVD zxoRiY+{ocQCEv7+|De-qm3xb)BB=A0(xs#+Dr?7qq^{KbT|}?5U)+nOe@R!7k3vyq zKtDT11a~#?rRXQD2XTrQnTy+01$?3Fo1IhXHIY@vT&4TWgNeaemY2|~s{W_pzX-AK z-CVcX%vlAmf3iRsI$53F9E_v!y?(=KXEz5QqHX?R)t-c=4jZq}etH-Me&F4F!73R( zq~5Qt9*t;i>m8}A@|HPcMISpcKNIHL`Ub!5QP)*qIUsrO;i_`&G%0Ro>|#PeE@+lLlKimsQSO_E|n*l1Gm3a?`0)X>gp@$PKhSd1Pp+ zx{t{UKmLBwF&*}99%~t^{Mppc*jdi5itPKTq`=7Q9ka#!KHim&RqH#dT@bCYdqe0w zQQlkh(2E~_wf3>i40?Z7Lj7TQmL{dBF<4sBE+@=^VJKFN<6I3yu)v(!9JA_@-0eB(wcnpEjzS^ z|MK`a-dKEm&Rc6)c5)!kHy|bM{1|+ts%YhV+|2YvhL{h-;6(Sz9ZB~*Yq=}f-E10- zM!D_E{0au{l>6rF{Ex;VmTz!b-Vy+8K>w{bMbxQ$Q!O4{?-camwM@cOx&E4dCdV~i zH_1Cs$S zdv|Y)G{aKo5b|WGlBL9O6^bTumUo(?%=j_jk}2DJ4?F$*JysPxI>oU4^l3R_4r`od4*2WjYJ>9feS*y(nESuNakB_S=-NZA-zXJBBUl&$}4+@4X-T5IO% zl0~ug0)zHeIGNC0kjc})FF7SFyAr)<9U-}@S6ezeEL@?FZvwcwNSEy<&%M=-LdE|i zHTom;RQZlc?nzfWEzH`DQNVX7r3RVaS2=}DjN}7mo-5cm)~2#0v9md;yLHZM8l-25 z0phB51gvaq`5LK}z<2l_ZzCKK-j55I=h}*jj{ql#`I;GbI5f<|(^`~)!e5v$Z_l8w zJV$nn)qKA}RH?O(JmtPYAU?I$gz=$U>5!7DY8;~+y=w6)UB~I^sj2qFQqJWOw_cQ# z6uNXB8t}LDy#1yFR2s+fvCv*m$L)fS<6=i`sVw<`Hc!ijTw(a@>q`_0i;F9Nf8S-( zX=rFj+Jj&%Un47U15vH#@)yPsUyaYgkQj(IJ+L13QaAsKm3~$dR!JnJ< z$q;vLuFWfaUe_~^=;RW()4VMJ-drjWi!nil4flNQ|klzc29JZt11h zZgS7_Nv}QYe{$dFyn4$J3eo`JNu;8pUDQkIw{JvlAdzJVIL=#{gR*DN)79Kr#yxer zw|zgE(}Nr0|D^|56_Oy>C1jY?LIa^0B+IlIxGcB$uc^O_f-@7I8;*v23ob6G-8VBT zkIQW`BJ(E~zeh=VYaDw-kGaCt$&M-5$4*4kJCJmfYLiWe%htN>z&hPHMOwt^zmK2pDo@8 zI5Lew_fC$F#qk1i?0($r%h(ny)4vs@&)Y)@0U*Yf7TSFO>?dWlmW2W&1qaweBdADx zR6|`InL#Q!v)wq_2g}q@Q`bbRUOf}RzxB}bC5mxDE{QA zz$%)#JRy4la~`U|2sZRuP-nLYSqX$ZmT%fFR3BN-P9&nB`H&hc5p@sCnmt-m1vHa< zDW_R_ZRinygtrSQKZJ!ue#I>St#J%sB#A#>ur&ZMPGse0-RiEcu53eqg`8*&9(?RF zrm3kpXp(0=mAKSQgpHjZZ&c;fy!d{vg}-075TQ^>Tuz7`*`2 zUV9i3J<$e#t18Dpy5LU~K`0-s6E^}vhX=l*5;SW3W*=Nxe=?$$xR4$!g6`g!-V`=glpX+Ci)nUWNjx_0sD*nBV|Hn ztkfLWoyUfTbfM-OLzyfk05W#IQ`T}gIB@inG4~> zP$At+S)W%*N}>B|b~pmuw#Y5EK6c!r8wBFw$b)QF%P5sC`~XJ~u0$G)H~YAqF0rc2 zVGtoBXLv+3<7LlY|C(=5Q}pr!$+7p(0^f)^P?355{k;^y26f_nT%-@cp6uhM%R#y6 z^!}Uifsl~_effFzM?~uH&be8d9jD5k#f6FP;wz8r17_##cVoc3_;`4P3e|RF;o59V zOG{a_j=kK1O`$K;DVMP`U;#?0H`QH0gOYH6Q~>#OLj-*3i!+)m|2nX^Yy~MCDW!DW znn6W>*E+M@?waNk}=Kzpc^;V?~z?G4~nB{J8vwWo)Rg9EP#FVT}C3c8CA4u&Z0m2GUn2H=O` z_LFxeA2GOv(XE2PhO7k%;$PhQ@IlfQ=d2tF_^JAZkCfw)rDDbeqz%U39)~*-V#|?X zG7WuEoP|7qj2C=#b~L7ayhClU>Ya|2yzxr6m4rZbCW%*mYR|dl3ZKLEV705^a4Q)& z)}VRk+@PNYjT0>9%bcFOUk{It26$&tw=RD$&x_#%^B*lEEQEO!Ae%Sj(neO!n#0H| z;sYuu)IOeBjcQ5r|J*-t ze3kf@DYKX=TzgFx+7axqs|Y9CDNI%>9_=p@fdwL4m&R01+flXT*(_>(EAA$3IKAd`3?0 z72}Y{%E%C3fnTMdrUt>GVUgrOtE23wgZf=XZVB*xUTHsol+!U&S%K45+H)78TcC-{ z9)FxL9Zv!Vmn)jQzWYF5pNjPe`Jb32T4z1-uf3L50Y6fK8a$%UqId?N#O4b($A?E8 zldCqM9v9u@5pLh@;{vebog44;-w*z4lK(D-|F> Date: Tue, 12 Sep 2017 10:43:19 +0800 Subject: [PATCH 42/69] fix NoInGrad bug --- paddle/operators/pad_op.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 449463c830..99f605c651 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -47,7 +47,8 @@ class PadOpMaker : public framework::OpProtoAndCheckerMaker { "The input should be a k-D tensor(k > 0 and k < 7)"); AddOutput("Out", "The output of pad op." - "A tensor with the same shape as X."); + "A tensor with the same shape as X.") + .NotInGradient(); AddComment(R"DOC( Pad input into output, as specified by paddings and pad_value. The input should be a k-D tensor(k > 0 and k < 7). As an example: From ad64ca5da20e696d66cfcf9011d16a81e8ef8ff8 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Tue, 12 Sep 2017 10:45:11 +0800 Subject: [PATCH 43/69] Call Tensor::numel() everywhere. --- paddle/framework/tensor.h | 5 ++++- paddle/framework/tensor_impl.h | 8 ++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index fc54ed697f..19051db539 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -162,7 +162,10 @@ class Tensor { /*! points to dimensions of memory block. */ DDim dims_; - /*! the element count of tensor. */ + /** + * A cache of the number of elements in a tensor. + * Would be 0 for an uninitialized tensor. + */ int64_t numel_; /** diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 03678784b4..5e32bfcac6 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -24,7 +24,7 @@ inline void Tensor::check_memory_size() const { PADDLE_ENFORCE_NOT_NULL( holder_, "Tenosr holds no memory. Call Tensor::mutable_data first."); PADDLE_ENFORCE_GE( - holder_->size(), numel_ * sizeof(T) + offset_, + holder_->size(), numel() * sizeof(T) + offset_, "Tensor's dims_ is out of bound. Call Tensor::mutable_data " "first to re-allocate memory.\n" "or maybe the required data-type mismatches the data already stored."); @@ -54,11 +54,11 @@ inline T* Tensor::mutable_data(DDim dims, platform::Place place) { template inline T* Tensor::mutable_data(platform::Place place) { static_assert(std::is_pod::value, "T must be POD"); - PADDLE_ENFORCE_GT(numel_, 0, + PADDLE_ENFORCE_GT(numel(), 0, "Tensor's numel must be larger than zero to call " "Tensor::mutable_data. Call Tensor::set_dim first."); /* some versions of boost::variant don't have operator!= */ - int64_t size = numel_ * sizeof(T); + int64_t size = numel() * sizeof(T); if (holder_ == nullptr || !(holder_->place() == place) || holder_->size() < size + offset_) { if (platform::is_cpu_place(place)) { @@ -131,7 +131,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const { PADDLE_ENFORCE_LT(begin_idx, end_idx, "Begin index must be less than end index."); PADDLE_ENFORCE_NE(dims_[0], 1, "Can not slice a tensor with dims_[0] = 1."); - size_t base = numel_ / dims_[0]; + size_t base = numel() / dims_[0]; Tensor dst; dst.holder_ = holder_; DDim dst_dims = dims_; From dd926498e7e61b25250c8f59d91afe57ab24098a Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Mon, 11 Sep 2017 20:09:21 -0700 Subject: [PATCH 44/69] adapt to the new test framework --- .../v2/framework/tests/test_reshape_op.py | 27 +++++++------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/python/paddle/v2/framework/tests/test_reshape_op.py b/python/paddle/v2/framework/tests/test_reshape_op.py index 50653f58ee..16bb6bb2af 100644 --- a/python/paddle/v2/framework/tests/test_reshape_op.py +++ b/python/paddle/v2/framework/tests/test_reshape_op.py @@ -1,29 +1,20 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, Operator -from op_test_util import OpTestMeta +from op_test import OpTest -class TestReshapeOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestReshapeOp(OpTest): def setUp(self): - self.type = "reshape" - self.inputs = {'X': np.random.random((37, 51)).astype("float32"), } - self.attrs = {'shape': [51 * 37]} + self.op_type = "reshape" + self.inputs = {'X': np.random.random((10, 20)).astype("float32")} + self.attrs = {'shape': [10 * 20]} self.outputs = {'Out': self.inputs['X'].reshape(self.attrs['shape'])} + def test_check_output(self): + self.check_output() -class TestReshapeGradOp(GradientChecker): - def setUp(self): - self.op = Operator("reshape", X='X', Out='Out', shape=[5, 40]) - self.inputs = {"X": np.random.random((10, 20)).astype("float32")} - - def test_normal(self): - self.check_grad(self.op, self.inputs, ["X"], "Out") - - def test_dev_compare(self): - self.compare_grad(self.op, self.inputs) + def test_check_grad(self): + self.check_grad(["X"], "Out") if __name__ == '__main__': From 0289a0091f094c75190698df7e450d8e1a70bbaa Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Mon, 11 Sep 2017 22:15:29 -0700 Subject: [PATCH 45/69] follow comments to cleanup code --- paddle/operators/reshape_op.cc | 35 ++++++++++++++++++++++++++-------- paddle/operators/reshape_op.h | 9 ++++----- 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index 37cbecbf25..da29c89150 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -27,21 +27,26 @@ class ReshapeOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - auto *in = ctx.Input("X"); + // input check + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) shouldn't be null"); auto shape = ctx.Attr>("shape"); - int64_t capacity = -1; + PADDLE_ENFORCE(shape.size() > 0, "Attr(shape) shouldn't be empty."); for (auto dim : shape) { PADDLE_ENFORCE(dim > 0, "Each dimension of shape must be positive."); - if (capacity < 0) { - capacity = dim; - } else { - capacity *= dim; - } } + // capacity check + int64_t capacity = + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()); + auto *in = ctx.Input("X"); int64_t in_size = framework::product(in->dims()); PADDLE_ENFORCE_EQ(capacity, in_size, "The size of Input(X) mismatches with Attr(shape)."); - ctx.Output("Out")->Resize(in->dims()); + // resize output + std::vector shape_int64(shape.size(), 0); + std::transform(shape.begin(), shape.end(), shape_int64.begin(), + [](int a) { return static_cast(a); }); + auto out_dims = framework::make_ddim(shape_int64); + ctx.Output("Out")->Resize(out_dims); } }; @@ -56,6 +61,17 @@ class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC(Reshape operator Reshape Input(X) into the shape specified by Attr(shape). + +An example: +Given a 2-D tensor X with 2 rows and 2 columns + + [[1, 2], [3, 4]] + +with target shape = [1, 4], the reshape operator will tansform +the tensor X into a 1-D tensor: + + [1, 2, 3, 4] + )DOC"); } }; @@ -70,6 +86,9 @@ class ReshapeGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) shouldn't be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), + "Input(Out@GRAD) shouldn't be null."); auto dims = ctx.Input("X")->dims(); auto *d_in = ctx.Output(framework::GradVarName("X")); d_in->Resize(dims); diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h index 0e920329d9..26708e72dc 100644 --- a/paddle/operators/reshape_op.h +++ b/paddle/operators/reshape_op.h @@ -30,11 +30,10 @@ class ReshapeKernel : public framework::OpKernel { out->mutable_data(ctx.GetPlace()); auto shape = ctx.Attr>("shape"); - std::vector tmp; - for (auto dim : shape) { - tmp.push_back(dim); - } - auto out_dims = framework::make_ddim(tmp); + std::vector shape_int64(shape.size(), 0); + std::transform(shape.begin(), shape.end(), shape_int64.begin(), + [](int a) { return static_cast(a); }); + auto out_dims = framework::make_ddim(shape_int64); out->CopyFrom(*in, ctx.GetPlace()); out->Resize(out_dims); } From 5915138c791c7a2d6fd40c0ae6c942ca870033c8 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Mon, 11 Sep 2017 22:22:43 -0700 Subject: [PATCH 46/69] fix a typo --- paddle/operators/reshape_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index da29c89150..b7061153d2 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -67,7 +67,7 @@ Given a 2-D tensor X with 2 rows and 2 columns [[1, 2], [3, 4]] -with target shape = [1, 4], the reshape operator will tansform +with target shape = [1, 4], the reshape operator will transform the tensor X into a 1-D tensor: [1, 2, 3, 4] From 1f839a6618db31b9be26f5d2604d98ef4fd2f46e Mon Sep 17 00:00:00 2001 From: caoying03 Date: Tue, 12 Sep 2017 17:03:17 +0800 Subject: [PATCH 47/69] fix bug in prelu parsing. --- python/paddle/trainer/config_parser.py | 1 + .../protostr/test_prelu_layer.protostr | 45 ++++++++++++++++++- .../tests/configs/test_prelu_layer.py | 2 + 3 files changed, 46 insertions(+), 2 deletions(-) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 356e1d8b6f..4f68a89534 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -2034,6 +2034,7 @@ class ParameterReluLayer(LayerBase): config_assert(input_layer.size % partial_sum == 0, "a wrong setting for partial_sum") self.set_layer_size(input_layer.size) + self.config.partial_sum = partial_sum self.create_input_parameter(0, input_layer.size / partial_sum) diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_prelu_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_prelu_layer.protostr index 64d227565f..94ad56cab0 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_prelu_layer.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_prelu_layer.protostr @@ -14,6 +14,29 @@ layers { input_layer_name: "input" input_parameter_name: "___prelu_layer_0__.w0" } + partial_sum: 1 +} +layers { + name: "__prelu_layer_1__" + type: "prelu" + size: 300 + active_type: "" + inputs { + input_layer_name: "input" + input_parameter_name: "___prelu_layer_1__.w0" + } + partial_sum: 1 +} +layers { + name: "__prelu_layer_2__" + type: "prelu" + size: 300 + active_type: "" + inputs { + input_layer_name: "input" + input_parameter_name: "___prelu_layer_2__.w0" + } + partial_sum: 5 } parameters { name: "___prelu_layer_0__.w0" @@ -23,14 +46,32 @@ parameters { initial_strategy: 0 initial_smart: true } +parameters { + name: "___prelu_layer_1__.w0" + size: 300 + initial_mean: 0.0 + initial_std: 0.057735026919 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___prelu_layer_2__.w0" + size: 60 + initial_mean: 0.0 + initial_std: 0.129099444874 + initial_strategy: 0 + initial_smart: true +} input_layer_names: "input" -output_layer_names: "__prelu_layer_0__" +output_layer_names: "__prelu_layer_2__" sub_models { name: "root" layer_names: "input" layer_names: "__prelu_layer_0__" + layer_names: "__prelu_layer_1__" + layer_names: "__prelu_layer_2__" input_layer_names: "input" - output_layer_names: "__prelu_layer_0__" + output_layer_names: "__prelu_layer_2__" is_recurrent_layer_group: false } diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py index 2e3057f323..aae90fab32 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_prelu_layer.py @@ -2,5 +2,7 @@ from paddle.trainer_config_helpers import * data = data_layer(name='input', size=300) prelu = prelu_layer(input=data) +prelu = prelu_layer(input=data, partial_sum=1) +prelu = prelu_layer(input=data, partial_sum=5) outputs(prelu) From 5991a35e582c295629978f50b162b98d3192a3df Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Tue, 12 Sep 2017 10:10:46 +0000 Subject: [PATCH 48/69] Install the automatically built, compiled libraries under third_party when executing `make install` and WITH_C_API is set. --- cmake/external/gflags.cmake | 13 +++++++++++-- cmake/external/glog.cmake | 13 +++++++++++-- cmake/external/openblas.cmake | 20 ++++++++++++++++++++ cmake/external/protobuf.cmake | 9 +++++++++ cmake/external/zlib.cmake | 9 +++++++++ 5 files changed, 60 insertions(+), 4 deletions(-) diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake index 16e5bef4cd..01a2f4d5fa 100644 --- a/cmake/external/gflags.cmake +++ b/cmake/external/gflags.cmake @@ -18,9 +18,9 @@ SET(GFLAGS_SOURCES_DIR ${THIRD_PARTY_PATH}/gflags) SET(GFLAGS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gflags) SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE) IF(WIN32) - set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) + set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) ELSE(WIN32) - set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) + set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE) ENDIF(WIN32) INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR}) @@ -56,3 +56,12 @@ SET_PROPERTY(TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARIES}) ADD_DEPENDENCIES(gflags extern_gflags) LIST(APPEND external_project_dependencies gflags) + +IF(WITH_C_API) + INSTALL(DIRECTORY ${GFLAGS_INCLUDE_DIR} DESTINATION third_party/gflags) + IF(ANDROID) + INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib/${ANDROID_ABI}) + ELSE() + INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib) + ENDIF() +ENDIF() diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake index 8a594a825a..b450a30166 100644 --- a/cmake/external/glog.cmake +++ b/cmake/external/glog.cmake @@ -19,9 +19,9 @@ SET(GLOG_INSTALL_DIR ${THIRD_PARTY_PATH}/install/glog) SET(GLOG_INCLUDE_DIR "${GLOG_INSTALL_DIR}/include" CACHE PATH "glog include directory." FORCE) IF(WIN32) - SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE) + SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.lib" CACHE FILEPATH "glog library." FORCE) ELSE(WIN32) - SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE) + SET(GLOG_LIBRARIES "${GLOG_INSTALL_DIR}/lib/libglog.a" CACHE FILEPATH "glog library." FORCE) ENDIF(WIN32) INCLUDE_DIRECTORIES(${GLOG_INCLUDE_DIR}) @@ -56,3 +56,12 @@ ADD_DEPENDENCIES(glog extern_glog gflags) LINK_LIBRARIES(glog gflags) LIST(APPEND external_project_dependencies glog) + +IF(WITH_C_API) + INSTALL(DIRECTORY ${GLOG_INCLUDE_DIR} DESTINATION third_party/glog) + IF(ANDROID) + INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib/${ANDROID_ABI}) + ELSE() + INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib) + ENDIF() +ENDIF() diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index f9e05af59f..4fc8d43fc1 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -73,6 +73,26 @@ IF(NOT ${CBLAS_FOUND}) UPDATE_COMMAND "" CONFIGURE_COMMAND "" ) + + IF(WITH_C_API) + INSTALL(DIRECTORY ${CBLAS_INC_DIR} DESTINATION third_party/openblas) + # Because libopenblas.a is a symbolic link of another library, thus need to + # install the whole directory. + IF(ANDROID) + SET(TMP_INSTALL_DIR third_party/openblas/lib/${ANDROID_ABI}) + ELSE() + SET(TMP_INSTALL_DIR third_party/openblas/lib) + ENDIF() + INSTALL(CODE "execute_process( + COMMAND ${CMAKE_COMMAND} -E copy_directory ${CBLAS_INSTALL_DIR}/lib + destination ${CMAKE_INSTALL_PREFIX}/${TMP_INSTALL_DIR} + )" + ) + INSTALL(CODE "MESSAGE(STATUS \"Installing: \" + \"${CBLAS_INSTALL_DIR}/lib -> ${CMAKE_INSTALL_PREFIX}/${TMP_INSTALL_DIR}\" + )" + ) + ENDIF() ENDIF(NOT ${CBLAS_FOUND}) MESSAGE(STATUS "BLAS library: ${CBLAS_LIBRARIES}") diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index e629d61585..a887be2e2a 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -223,6 +223,15 @@ IF(NOT PROTOBUF_FOUND) SET(PROTOBUF_PROTOC_LIBRARY ${extern_protobuf_PROTOC_LIBRARY} CACHE FILEPATH "protoc library." FORCE) + IF(WITH_C_API) + INSTALL(DIRECTORY ${PROTOBUF_INCLUDE_DIR} DESTINATION third_party/protobuf) + IF(ANDROID) + INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib/${ANDROID_ABI}) + ELSE() + INSTALL(FILES ${PROTOBUF_LIBRARY} DESTINATION third_party/protobuf/lib) + ENDIF() + ENDIF() + IF(CMAKE_CROSSCOMPILING) PROMPT_PROTOBUF_LIB(protobuf_host extern_protobuf) ELSE() diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake index 45ca5542b7..5aecab90ca 100644 --- a/cmake/external/zlib.cmake +++ b/cmake/external/zlib.cmake @@ -49,3 +49,12 @@ ExternalProject_Add( ) LIST(APPEND external_project_dependencies zlib) + +IF(WITH_C_API) + INSTALL(DIRECTORY ${ZLIB_INCLUDE_DIR} DESTINATION third_party/zlib) + IF(ANDROID) + INSTALL(FILES ${ZLIB_LIBRARIES} DESTINATION third_party/zlib/lib/${ANDROID_ABI}) + ELSE() + INSTALL(FILES ${ZLIB_LIBRARIES} DESTINATION third_party/zlib/lib) + ENDIF() +ENDIF() From 27d7812b872bc83571dab418c0b4bcd448c42589 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Tue, 12 Sep 2017 18:15:55 +0800 Subject: [PATCH 49/69] Fix compiling error when using clang. The detail of the error is: "suggest braces around initialization of subobject [-Werror,-Wmissing-braces]" --- paddle/function/neon/NeonDepthwiseConv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/function/neon/NeonDepthwiseConv.h b/paddle/function/neon/NeonDepthwiseConv.h index aefeea78ba..33722d3cac 100644 --- a/paddle/function/neon/NeonDepthwiseConv.h +++ b/paddle/function/neon/NeonDepthwiseConv.h @@ -594,7 +594,7 @@ struct StridePadding { float32x4_t s1 = vdupq_n_f32(0.f); for (int s = 0; s < step; s++) { float32x4_t s0 = vld1q_f32(input); - float32x4x2_t v = {s0, s1}; + float32x4x2_t v = {{s0, s1}}; vst2q_f32(inputPadding, v); input += 4; inputPadding += 8; From 8a2ff350ed041e1805f860ec48d2a2c5fa4f9550 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 12 Sep 2017 18:28:17 +0800 Subject: [PATCH 50/69] simplify the python/paddle/v2/framework/tests/CMakeLists.txt --- doc/howto/dev/new_op_cn.md | 6 +-- .../paddle/v2/framework/tests/CMakeLists.txt | 43 +++---------------- .../tests/{mnist.py => test_mnist.py} | 0 3 files changed, 6 insertions(+), 43 deletions(-) rename python/paddle/v2/framework/tests/{mnist.py => test_mnist.py} (100%) diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index 58665e9f2b..07dce05df4 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -354,11 +354,7 @@ class TestMulGradOp(GradientChecker): ### 编译和执行单元测试 -单元测试编写完成之后,在[`python/paddle/v2/framework/tests/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/CMakeLists.txt)中添加以下内容,将单元测试加入工程: - -``` -py_test(test_mul_op SRCS test_mul_op.py) -``` +无需修改 [`python/paddle/v2/framework/tests/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/CMakeLists.txt) 文件,新增的 `test_*.py` 单元测试会被自动加入工程。 请注意,**不同于Op的编译测试,运行单元测试测时需要编译整个工程**,并且编译时需要打开`WITH_TESTING`, 即`cmake paddle_dir -DWITH_TESTING=ON`。编译成功后,执行下面的命令来运行单元测试: diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index 6b22c00082..4d7664469e 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -1,38 +1,5 @@ -py_test(test_net SRCS test_net.py) - -py_test(test_scope SRCS test_scope.py) - -py_test(test_tensor SRCS test_tensor.py) -py_test(test_mul_op SRCS test_mul_op.py) -py_test(test_cos_sim_op SRCS test_cos_sim_op.py) - -py_test(test_mean_op SRCS test_mean_op.py) - -py_test(test_protobuf SRCS test_protobuf.py) - -py_test(test_add_two_op SRCS test_add_two_op.py) -py_test(test_sigmoid_op SRCS test_sigmoid_op.py) -py_test(test_softmax_op SRCS test_softmax_op.py) -py_test(test_cross_entropy_op SRCS test_cross_entropy_op.py) -py_test(test_gather_op SRCS test_gather_op.py) -py_test(test_scatter_op SRCS test_scatter_op.py) -py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py) -py_test(test_top_k_op SRCS test_top_k_op.py) - -py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py) - -py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py) - -py_test(test_operator SRCS test_operator.py) -py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py) -py_test(test_uniform_random_op SRCS test_uniform_random_op.py) -py_test(test_recurrent_op SRCS test_recurrent_op.py) -py_test(test_sgd_op SRCS test_sgd_op.py) -py_test(test_gradient_checker SRCS test_gradient_checker.py) -py_test(test_lookup_table SRCS test_lookup_table.py) -py_test(test_scale_and_identity_op SRCS test_scale_and_identity_op.py) -py_test(test_sum_op SRCS test_sum_op.py) -py_test(mnist SRCS mnist.py) -py_test(test_concat_op SRCS test_concat_op.py) -py_test(test_squared_l2_distance_op SRCS test_squared_l2_distance_op.py) -py_test(test_reshape_op SRCS test_reshape_op.py) +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/v2/framework/tests/mnist.py b/python/paddle/v2/framework/tests/test_mnist.py similarity index 100% rename from python/paddle/v2/framework/tests/mnist.py rename to python/paddle/v2/framework/tests/test_mnist.py From e76eb8534d51f3722d8cced2ff5a69beaa0e6515 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 12 Sep 2017 20:11:34 +0800 Subject: [PATCH 51/69] fix SwitchOrderLayer forward --- paddle/gserver/layers/SwitchOrderLayer.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/paddle/gserver/layers/SwitchOrderLayer.cpp b/paddle/gserver/layers/SwitchOrderLayer.cpp index d7eee6eaf0..e97809141a 100644 --- a/paddle/gserver/layers/SwitchOrderLayer.cpp +++ b/paddle/gserver/layers/SwitchOrderLayer.cpp @@ -83,8 +83,7 @@ void SwitchOrderLayer::forward(PassType passType) { setOutDims(); resetOutput(outDims_[0], outDims_[1] * outDims_[2] * outDims_[3]); if (heightAxis_.size() > 0) { - getOutputValue()->reshape(reshapeHeight_, reshapeWidth_); - getOutputGrad()->reshape(reshapeHeight_, reshapeWidth_); + resetOutput(reshapeHeight_, reshapeWidth_); } // switch NCHW to NHWC From 25be0ede764583f851fc1863ad9d2d65cab893c1 Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Tue, 12 Sep 2017 11:29:49 -0700 Subject: [PATCH 52/69] fix cpplint error --- paddle/operators/reshape_op.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/paddle/operators/reshape_op.h b/paddle/operators/reshape_op.h index 26708e72dc..873acf3078 100644 --- a/paddle/operators/reshape_op.h +++ b/paddle/operators/reshape_op.h @@ -1,4 +1,3 @@ - /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); @@ -52,5 +51,5 @@ class ReshapeGradKernel : public framework::OpKernel { d_x->Resize(in_dims); } }; -} -} +} // namespace operators +} // namespace paddle From 5c4dfdebcb12d17b8fe3090b874a496ea38dfcf4 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 12:12:48 -0700 Subject: [PATCH 53/69] add more rules --- paddle/operators/name_convention.md | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 280ab8d317..182c74e78b 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -1,15 +1,27 @@ ## Operator Name Convention -To make the operator document itself more clear. we recommend operator names observe the listing conventions. +To make the operator document itself more clear, we recommend operator names obey the listing conventions. -### Input/Output names +### OpMaker names -* Variable name is prefer uppercase. e.g. `X`, `Y`. But when the variable is tensor, its name should lowercase. e.g. `matrix`, to discriminate with other one. +When defining an operator in Paddle, a corresponding `OpMaker` need to be defined. All the `Input`/`Output` and `attrs` will write into the `OpProto` , and will be used in client language to create operator. -* element wise operator, math operator or similar op, please obey common name convention. if the operator only have one output, use `Out`. +- Input/Output. + - names follow the `CamelCase` but the first character is uppercase. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words. + - If an operator's Input/Output are not meaningful words, input name starts from `X`. e.g. `X`, `Y`, and output name starts from `Out`. e.g. `Out`. -* we prefer more meaningful input/output name. +* Attribute. + * Attribute name follows the normal `CamelCase`. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words. +* Comments. + * Input/Output/Attr comment follow the format of `type:meaning`. e.g. `AddOutput("Out", "EigenTensor,Tensor: Output of XX")`. we prefer to more meaningful comment. Some comments like `The first input of Operator` contains no information, we forbid it. + * Operator comment format of` R"DOC(your comment here)DOC"`. if there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`. ### Best Practice -e.g. `rowwise_add`, inputs : `X`, `Y`, outputs : `Out` -e.g. `cosine` , inputs : `X`, `axis`, outputs : `Out` + +- The operator has one input, one output. e.g.`relu`, inputs: `X`, outputs: `Out`. + +- The operator has two input, one output. e.g. `rowwise_add`, inputs : `X`, `Y`, outputs : `Out`. + +- The operator contains attribute. e.g. `cosine`, inputs : `X`, `axis`, outputs : `Out`. + + ​ From d00e8a5f8350c38a9455b5fd604cac32e8b2cc62 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 15:47:12 -0700 Subject: [PATCH 54/69] "add Op name example and fix format error" --- paddle/operators/name_convention.md | 54 +++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 11 deletions(-) diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 182c74e78b..8000dc8f08 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -1,27 +1,59 @@ -## Operator Name Convention +## Operator's Parameter Name Convention To make the operator document itself more clear, we recommend operator names obey the listing conventions. -### OpMaker names +### OpProtoMaker names -When defining an operator in Paddle, a corresponding `OpMaker` need to be defined. All the `Input`/`Output` and `attrs` will write into the `OpProto` , and will be used in client language to create operator. +When defining an operator in Paddle, a corresponding [OpProtoMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L170) (TODO: OpProtoMaker Doc)need to be defined. All the Input/Output and Attributes will write into the [OpProto](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L61) , and will be used in client language to create operator. - Input/Output. - - names follow the `CamelCase` but the first character is uppercase. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words. - - If an operator's Input/Output are not meaningful words, input name starts from `X`. e.g. `X`, `Y`, and output name starts from `Out`. e.g. `Out`. + - Input/Output names follow the **CamelCase**. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words. + - If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule make operators which have few inputs/outputs unified. -* Attribute. - * Attribute name follows the normal `CamelCase`. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words. -* Comments. - * Input/Output/Attr comment follow the format of `type:meaning`. e.g. `AddOutput("Out", "EigenTensor,Tensor: Output of XX")`. we prefer to more meaningful comment. Some comments like `The first input of Operator` contains no information, we forbid it. - * Operator comment format of` R"DOC(your comment here)DOC"`. if there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`. +- Attribute. + - Attribute name follows the **camelCase**. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words. + +- Comments. + - Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier` + - Operator comment format of` R"DOC(your comment here)DOC"`. You should explain the input/output of the operator first. If there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`. + +- Order. + - Follow the order of Input/Output, then Attribute, then Comments. See the example in best practice. ### Best Practice +Here we give some examples to show how these rules will be used. + - The operator has one input, one output. e.g.`relu`, inputs: `X`, outputs: `Out`. - The operator has two input, one output. e.g. `rowwise_add`, inputs : `X`, `Y`, outputs : `Out`. - The operator contains attribute. e.g. `cosine`, inputs : `X`, `axis`, outputs : `Out`. - ​ + We give a full example of Accumulator Operator. Its OpProtoMaker should look like below. + +```c++ +class AccumulateOpMaker : public framework::OpProtoAndCheckerMaker { +public: + AccumulateOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(Tensor) The input tensor that has to be accumulated to the output tensor. If the output size is not the same as input size, the output tensor is first reshaped and initialized to zero, and only then, accumulation is done."); + AddOutput("Out", "(Tensor) Accumulated output tensor"); + AddAttr("gamma", "(float, default 1.0) Accumulation multiplier"); + AddComment(R"DOC( +Accumulate operator accumulates the input tensor to the output tensor. If the +output tensor already has the right size, we add to it; otherwise, we first +initialize the output tensor to all zeros, and then do accumulation. Any +further calls to the operator, given that no one else fiddles with the output +in the interim, will do simple accumulations. +Accumulation is done as shown: + +Out = 1*X + gamma*Out + +where X is the input tensor, Y is the output tensor and gamma is the multiplier +argument. +)DOC"); + } +}; +``` From 594dece99625caa2b5a0de9998755f587348cbe5 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 15:54:36 -0700 Subject: [PATCH 55/69] "fix typo" --- paddle/operators/name_convention.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 8000dc8f08..59d4019a3b 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -8,13 +8,13 @@ When defining an operator in Paddle, a corresponding [OpProtoMaker](https://gith - Input/Output. - Input/Output names follow the **CamelCase**. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words. - - If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule make operators which have few inputs/outputs unified. + - If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule intends making operators which have few inputs/outputs unified. - Attribute. - Attribute name follows the **camelCase**. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words. - Comments. - - Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier` + - Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier`. - Operator comment format of` R"DOC(your comment here)DOC"`. You should explain the input/output of the operator first. If there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`. - Order. From 15fccfefb5afe9cf145dc045c7e4ecb6613d8b71 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 15:59:58 -0700 Subject: [PATCH 56/69] "remove used words" --- paddle/operators/name_convention.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/name_convention.md b/paddle/operators/name_convention.md index 59d4019a3b..a090e0b545 100644 --- a/paddle/operators/name_convention.md +++ b/paddle/operators/name_convention.md @@ -30,7 +30,7 @@ Here we give some examples to show how these rules will be used. - The operator contains attribute. e.g. `cosine`, inputs : `X`, `axis`, outputs : `Out`. - We give a full example of Accumulator Operator. Its OpProtoMaker should look like below. + We give a full example of Accumulator Operator. ```c++ class AccumulateOpMaker : public framework::OpProtoAndCheckerMaker { From a7e3325aade2b36816026cf311f70b393dbeae8b Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 17:09:35 -0700 Subject: [PATCH 57/69] "fix typos" --- paddle/framework/backward.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md index 0859bf1d9b..d0494f50d7 100644 --- a/paddle/framework/backward.md +++ b/paddle/framework/backward.md @@ -2,7 +2,7 @@ ## Motivation -In Neural Network, many model is solved by the the backpropagation algorithm(known as BP) at present. Technically it caculates the gradient of the loss function, then distributed back through the networks. Follows the chain rule, so we need to compound the gradient operators/expressions together with the chain rule. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass. +In Neural Network, many model is solved by the the backpropagation algorithm(known as BP) at present. Technically it caculates the gradient of the loss function, then distributed back through the networks. Follows the chain rule, so we need a module chains the gradient operators/expressions together with to construct the backward pass. Every forward network needs a backward network to construct the full computation graph, the operator/expression's backward pass will be generated respect to forward pass. ## Implementation @@ -13,7 +13,7 @@ std::unique_ptr Backward(const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars); ``` -The implementation behind it can be divided into two parts. Namely, ** Backward Operator Creating** and **Backward Operator Building**. +The implementation behind it can be divided into two parts, ** Backward Operator Creating** and **Backward Operator Building**. ###Backward Operator Registry @@ -60,7 +60,7 @@ A backward network is a series of backward operators. The main idea of building 1. Op - when the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NoGradient` operator + when the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NOP`. 2. NetOp @@ -70,27 +70,27 @@ A backward network is a series of backward operators. The main idea of building RnnOp is a nested stepnet operator. Backward module need to recusively call `Backward` for every stepnet. -4. Shared Variable +4. Sharing Variables - **shared variable**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwrite their shared input variable. + **sharing variables**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwrite their sharing input variable.


-​ pic 1. Shared variable in operators. +​ pic 1. Sharing variables in operators.

-​ Share variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator replace the overwrite links. +​ Sharing variable between operators or same input variable used in multiple operators leads to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively and add a generic add operator to replace the overwrite links.


-​ pic 2. Replace shared variable's gradient with `Add` operator. +​ pic 2. Replace sharing variable's gradient with `Add` operator.

-​ Because our framework find variable accord to its name, we need rename the output links. We add a suffix of number represent its position in clockwise. +​ Because our framework finds variables accord to their names, we need to rename the output links. We add a suffix of number to represent its position in clockwise. 5. Part of Gradient is Zero. From 6d03ca33475b75b22bd306d57ac1d0aaf681dd46 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Wed, 13 Sep 2017 10:10:20 +0800 Subject: [PATCH 58/69] refine new_op_cn.md --- doc/howto/dev/new_op_cn.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/howto/dev/new_op_cn.md b/doc/howto/dev/new_op_cn.md index 07dce05df4..e3892849ab 100644 --- a/doc/howto/dev/new_op_cn.md +++ b/doc/howto/dev/new_op_cn.md @@ -262,7 +262,7 @@ MulOp(const std::string &type, const framework::VariableNameMap &inputs, - 生成库 - 无需修改 [`paddle/pybind/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/pybind/CMakeLists.txt)文件,`paddle/operators` 目录下新增的 `*_op.cc` 文件会被自动添加链接到生成的lib库中。 + `paddle/operators` 目录下新增的 `*_op.cc` 文件会被自动添加链接到生成的lib库中。 ## 实现单元测试 @@ -354,7 +354,7 @@ class TestMulGradOp(GradientChecker): ### 编译和执行单元测试 -无需修改 [`python/paddle/v2/framework/tests/CMakeLists.txt`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/v2/framework/tests/CMakeLists.txt) 文件,新增的 `test_*.py` 单元测试会被自动加入工程。 +`python/paddle/v2/framework/tests` 目录下新增的 `test_*.py` 单元测试会被自动加入工程进行编译。 请注意,**不同于Op的编译测试,运行单元测试测时需要编译整个工程**,并且编译时需要打开`WITH_TESTING`, 即`cmake paddle_dir -DWITH_TESTING=ON`。编译成功后,执行下面的命令来运行单元测试: From bc9e20d9ed399d6b21c31afa4c294b7bb7371e43 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 20:01:50 -0700 Subject: [PATCH 59/69] "update img alt" --- paddle/framework/backward.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md index d0494f50d7..61c80635b8 100644 --- a/paddle/framework/backward.md +++ b/paddle/framework/backward.md @@ -13,9 +13,9 @@ std::unique_ptr Backward(const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars); ``` -The implementation behind it can be divided into two parts, ** Backward Operator Creating** and **Backward Operator Building**. +The implementation behind it can be divided into two parts, **Backward Operator Creating** and **Backward Operator Building**. -###Backward Operator Registry +### Backward Operator Registry A backward network is built up with several backward operators. Backward operators take forward operators' inputs outputs, and output gradients and then calculate its input gradients. @@ -36,7 +36,7 @@ REGISTER_OP(mul, MulOp, MulOpMaker, mul_grad, MulOpGrad); `mul_grad` is the type of backward operator, and `MulOpGrad` is its class name. -###Backward Opeartor Creating +### Backward Opeartor Creating Given a certain forward operator, we can get its corresponding backward operator by calling: @@ -54,13 +54,13 @@ The function `BuildGradOp` will sequentially execute following processes: 4. Building backward operator with `inputs`, `outputs` and forward operator's attributes. -###Backward Network Building +### Backward Network Building A backward network is a series of backward operators. The main idea of building a backward network is creating backward operators in the inverted sequence and append them together one by one. There is some corner case need to process specially. 1. Op - when the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NOP`. + When the input forward network is an Op, return its gradient Operator Immediately. If all of its outputs are in no gradient set, then return a special `NOP`. 2. NetOp @@ -72,12 +72,12 @@ A backward network is a series of backward operators. The main idea of building 4. Sharing Variables - **sharing variables**. As illustrated in the pictures, two operator's `Output` `Gradient` will overwrite their sharing input variable. + **sharing variables**. As illustrated in the pictures, two operator's share the same variable name of W@GRAD, which will overwrite their sharing input variable.

-
+Sharing variables in operators.
-​ pic 1. Sharing variables in operators. +​ pic 1.

From 885fa893324b3c51f676c706e09d5472822fffe2 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 20:05:13 -0700 Subject: [PATCH 60/69] "remove the alt" --- paddle/framework/backward.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md index 61c80635b8..19e1850e46 100644 --- a/paddle/framework/backward.md +++ b/paddle/framework/backward.md @@ -75,9 +75,9 @@ A backward network is a series of backward operators. The main idea of building **sharing variables**. As illustrated in the pictures, two operator's share the same variable name of W@GRAD, which will overwrite their sharing input variable.

-Sharing variables in operators.
+
-​ pic 1. +​ pic 1. Sharing variables in operators.

From a90274eb5ce32025ed9492d969502cc3157cee52 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Tue, 12 Sep 2017 20:07:38 -0700 Subject: [PATCH 61/69] "update words" --- paddle/framework/backward.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md index 19e1850e46..0a6d762bc8 100644 --- a/paddle/framework/backward.md +++ b/paddle/framework/backward.md @@ -17,7 +17,7 @@ The implementation behind it can be divided into two parts, **Backward Operator ### Backward Operator Registry -A backward network is built up with several backward operators. Backward operators take forward operators' inputs outputs, and output gradients and then calculate its input gradients. +A backward network is built up with several backward operators. Backward operators take forward operators' inputs, outputs, and output gradients and then calculate its input gradients. | | forward operator | backward operator | ---------------------- | ---------------- |------------------------- | From 236a84c5050d419285cb7fbcc9c8f5bf923058ab Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 13 Sep 2017 11:09:01 +0800 Subject: [PATCH 62/69] Fix nullptr check --- paddle/operators/pad_op.cc | 5 +++-- paddle/operators/pad_op.h | 13 +++++++------ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index 99f605c651..7e78b6ec13 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -96,8 +96,9 @@ class PadOpGrad : public framework::OperatorWithKernel { "Input(Out@GRAD) should not be null"); auto x_dims = ctx.Input("X")->dims(); auto *x_grad = ctx.Output(framework::GradVarName("X")); - - x_grad->Resize(x_dims); + if (x_grad != nullptr) { + x_grad->Resize(x_dims); + } } }; diff --git a/paddle/operators/pad_op.h b/paddle/operators/pad_op.h index ca8832f26a..2cc3b945ae 100644 --- a/paddle/operators/pad_op.h +++ b/paddle/operators/pad_op.h @@ -87,12 +87,13 @@ void PadGradFunction(const framework::ExecutionContext& context) { } auto* d_out = context.Input(framework::GradVarName("Out")); auto* d_x = context.Output(framework::GradVarName("X")); - d_x->mutable_data(context.GetPlace()); - - auto d_x_tensor = EigenTensor::From(*d_x); - auto d_out_tensor = EigenTensor::From(*d_out); - auto place = context.GetEigenDevice(); - d_x_tensor.device(place) = d_out_tensor.pad(paddings, 0); + if (d_x != nullptr) { + d_x->mutable_data(context.GetPlace()); + auto d_x_tensor = EigenTensor::From(*d_x); + auto d_out_tensor = EigenTensor::From(*d_out); + auto place = context.GetEigenDevice(); + d_x_tensor.device(place) = d_out_tensor.pad(paddings, 0); + } } template From 92e7b09547a102edb8724eb95b1756dd3d0c5b16 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Tue, 12 Sep 2017 19:52:43 +0800 Subject: [PATCH 63/69] Add ARGS ANDROID_API in Dockerfile.android, to support using toolchain of different api level. --- Dockerfile.android | 13 +++++---- paddle/scripts/docker/build_android.sh | 37 ++++++++++++++++++++++---- 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/Dockerfile.android b/Dockerfile.android index 452aa15745..9d13a414f6 100644 --- a/Dockerfile.android +++ b/Dockerfile.android @@ -6,13 +6,14 @@ RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ub # ENV variables ARG ANDROID_ABI +ARG ANDROID_API ENV ANDROID_ABI=${ANDROID_ABI:-"armeabi-v7a"} +ENV ANDROID_API=${ANDROID_API:-21} ENV HOME=/root \ ANDROID_NDK_HOME=/opt/android-ndk-linux \ - ANDROID_ARM_STANDALONE_TOOLCHAIN=/opt/arm-toolchain \ - ANDROID_ARM64_STANDALONE_TOOLCHAIN=/opt/arm64-toolchain + ANDROID_TOOLCHAINS_DIR=/opt/toolchains RUN apt-get update && \ apt-get install -y \ @@ -42,14 +43,12 @@ RUN pip install --upgrade pip && \ pip install pre-commit # Android NDK -RUN mkdir /opt/android-ndk-tmp && \ +RUN mkdir -p ${ANDROID_TOOLCHAINS_DIR} && \ + mkdir -p /opt/android-ndk-tmp && \ cd /opt/android-ndk-tmp && \ wget -q https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip && \ unzip -q android-ndk-r14b-linux-x86_64.zip && \ mv android-ndk-r14b ${ANDROID_NDK_HOME} && \ - ${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm --platform=android-23 --install-dir=${ANDROID_ARM_STANDALONE_TOOLCHAIN} && \ - ${ANDROID_NDK_HOME}/build/tools/make-standalone-toolchain.sh --arch=arm64 --platform=android-23 --install-dir=${ANDROID_ARM64_STANDALONE_TOOLCHAIN} && \ - rm -rf /opt/android-ndk-tmp && \ - rm -rf ${ANDROID_NDK_HOME} + rm -rf /opt/android-ndk-tmp CMD ["bash", "/paddle/paddle/scripts/docker/build_android.sh"] diff --git a/paddle/scripts/docker/build_android.sh b/paddle/scripts/docker/build_android.sh index aabd2da5e4..11612ad4be 100644 --- a/paddle/scripts/docker/build_android.sh +++ b/paddle/scripts/docker/build_android.sh @@ -2,8 +2,30 @@ set -xe +if [ $ANDROID_ABI == "arm64-v8a" ]; then + ANDROID_ARCH=arm64 +else # armeabi, armeabi-v7a + ANDROID_ARCH=arm +fi + +ANDROID_STANDALONE_TOOLCHAIN=$ANDROID_TOOLCHAINS_DIR/$ANDROID_ARCH-android-$ANDROID_API + +cat </dev/null || true mkdir -p $BUILD_ROOT @@ -11,7 +33,7 @@ cd $BUILD_ROOT if [ $ANDROID_ABI == "armeabi-v7a" ]; then cmake -DCMAKE_SYSTEM_NAME=Android \ - -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_ARM_STANDALONE_TOOLCHAIN \ + -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_STANDALONE_TOOLCHAIN \ -DANDROID_ABI=$ANDROID_ABI \ -DANDROID_ARM_NEON=ON \ -DANDROID_ARM_MODE=ON \ @@ -26,7 +48,7 @@ if [ $ANDROID_ABI == "armeabi-v7a" ]; then .. elif [ $ANDROID_ABI == "arm64-v8a" ]; then cmake -DCMAKE_SYSTEM_NAME=Android \ - -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_ARM64_STANDALONE_TOOLCHAIN \ + -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_STANDALONE_TOOLCHAIN \ -DANDROID_ABI=$ANDROID_ABI \ -DANDROID_ARM_MODE=ON \ -DHOST_C_COMPILER=/usr/bin/gcc \ @@ -40,12 +62,12 @@ elif [ $ANDROID_ABI == "arm64-v8a" ]; then .. elif [ $ANDROID_ABI == "armeabi" ]; then cmake -DCMAKE_SYSTEM_NAME=Android \ - -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_ARM_STANDALONE_TOOLCHAIN \ + -DANDROID_STANDALONE_TOOLCHAIN=$ANDROID_STANDALONE_TOOLCHAIN \ -DANDROID_ABI=$ANDROID_ABI \ -DANDROID_ARM_MODE=ON \ -DHOST_C_COMPILER=/usr/bin/gcc \ -DHOST_CXX_COMPILER=/usr/bin/g++ \ - -DCMAKE_INSTALL_PREFIX=/paddle/install \ + -DCMAKE_INSTALL_PREFIX=$DEST_ROOT \ -DCMAKE_BUILD_TYPE=Release \ -DWITH_C_API=ON \ -DWITH_SWIG_PY=OFF \ @@ -55,5 +77,10 @@ else echo "Invalid ANDROID_ABI: $ANDROID_ABI" fi +cat < Date: Wed, 13 Sep 2017 12:16:52 +0800 Subject: [PATCH 64/69] Update pad op unitest --- .../paddle/v2/framework/tests/test_pad_op.py | 60 +++---------------- 1 file changed, 9 insertions(+), 51 deletions(-) diff --git a/python/paddle/v2/framework/tests/test_pad_op.py b/python/paddle/v2/framework/tests/test_pad_op.py index 56b9c88f7d..456b765e33 100644 --- a/python/paddle/v2/framework/tests/test_pad_op.py +++ b/python/paddle/v2/framework/tests/test_pad_op.py @@ -1,16 +1,12 @@ import unittest import numpy as np -from paddle.v2.framework.op import Operator -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta +from op_test import OpTest -class TestPadOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestPadOp(OpTest): def setUp(self): self.initTestCase() - self.type = "pad" + self.op_type = "pad" self.inputs = {'X': np.random.random(self.shape).astype("float32"), } self.attrs = {} self.attrs['paddings'] = np.array(self.paddings).flatten() @@ -22,6 +18,12 @@ class TestPadOp(unittest.TestCase): constant_values=self.pad_value) } + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X'], 'Out') + def initTestCase(self): self.shape = (16, 16) self.paddings = [(0, 1), (2, 3)] @@ -49,49 +51,5 @@ class TestCase3(TestPadOp): self.pad_value = 0.9 -class TestPadGradOp(GradientChecker): - def setUp(self): - self.initTestCase() - self.op = Operator( - type="pad", - X="X", - Out="Out", - paddings=np.array(self.paddings).flatten(), - pad_value=self.pad_value) - self.inputs = {'X': np.random.random(self.shape).astype("float32"), } - - def initTestCase(self): - self.shape = (16, 16) - self.paddings = [(0, 1), (2, 3)] - self.pad_value = 0 - - def test_normal(self): - self.check_grad(self.op, self.inputs, set(["X"]), "Out") - - def test_cpu_gpu_compare(self): - self.compare_grad(self.op, self.inputs) - - -class TestiGradCase1(TestPadOp): - def initTestCase(self): - self.shape = (2, 3, 4, 4) - self.paddings = [(0, 1), (2, 3), (2, 1), (1, 1)] - self.pad_value = 0.5 - - -class TestGradCase2(TestPadOp): - def initTestCase(self): - self.shape = (2, 2, 2) - self.paddings = [(0, 0), (0, 0), (1, 2)] - self.pad_value = 1 - - -class TestGradCase3(TestPadOp): - def initTestCase(self): - self.shape = (8) - self.paddings = [(0, 1)] - self.pad_value = 0.9 - - if __name__ == '__main__': unittest.main() From b51ba53a55ef5dda2b37fb8feb4d68de0d659118 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Wed, 13 Sep 2017 12:54:49 +0800 Subject: [PATCH 65/69] Write the building and the lastest commit into a BUILD.txt in install phase. --- CMakeLists.txt | 2 ++ paddle/capi/CMakeLists.txt | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 08237cd850..e3194cd29c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -67,6 +67,8 @@ endif() if(ANDROID) if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "16") message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16") + elseif(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21") + message(WARNING "Using the unofficial git repository instead") endif() set(WITH_GPU OFF CACHE STRING diff --git a/paddle/capi/CMakeLists.txt b/paddle/capi/CMakeLists.txt index dde99ab340..3af111eb57 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/capi/CMakeLists.txt @@ -64,9 +64,29 @@ link_paddle_exe(paddle_capi_shared) install(FILES ${CAPI_HEADERS} DESTINATION include/paddle) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/config.h DESTINATION include/paddle) if(ANDROID) + execute_process( + COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -1 + OUTPUT_VARIABLE GIT_COMMITS_LIST + RESULT_VARIABLE GIT_COMMITS_LIST_RESULT + ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) + if(${GIT_COMMITS_LIST_RESULT}) + set(GIT_COMMITS_LIST "No commits.") + endif() install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library} DESTINATION lib/${ANDROID_ABI}) install(TARGETS paddle_capi_shared DESTINATION lib/${ANDROID_ABI}) + install(CODE "FILE(WRITE ${CMAKE_INSTALL_PREFIX}/lib/${ANDROID_ABI}/BUILD.txt + \"Compiler:\n\" + \"\\t${CMAKE_C_COMPILER}\\n\" + \"\\t${CMAKE_CXX_COMPILER}\\n\" + \"Compiler Flags:\\n\" + \"\\t${CMAKE_F_FLAGS}\\n\" + \"\\t${CMAKE_CXX_FLAGS}\\n\" + \"Android API: ${CMAKE_SYSTEM_VERSION}\\n\" + \"Lastest commit:\\n\" + \"\\t${GIT_COMMITS_LIST}\\n\" + )" + ) else(ANDROID) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${capi_whole_library} DESTINATION lib) install(TARGETS paddle_capi_shared DESTINATION lib) From 8778957cfc26a76c1495c406ffdfb66755503565 Mon Sep 17 00:00:00 2001 From: gongweibao Date: Wed, 13 Sep 2017 14:18:30 +0800 Subject: [PATCH 66/69] Add element-wise multiplication operator. (#3787) Add element-wise multiplication operator --- paddle/operators/elementwise_mul_op.cc | 109 +++++++++++ paddle/operators/elementwise_mul_op.cu | 25 +++ paddle/operators/elementwise_mul_op.h | 185 ++++++++++++++++++ paddle/pybind/pybind.cc | 1 + .../tests/test_elementwise_mul_op.py | 157 +++++++++++++++ 5 files changed, 477 insertions(+) create mode 100644 paddle/operators/elementwise_mul_op.cc create mode 100644 paddle/operators/elementwise_mul_op.cu create mode 100644 paddle/operators/elementwise_mul_op.h create mode 100644 python/paddle/v2/framework/tests/test_elementwise_mul_op.py diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc new file mode 100644 index 0000000000..1742925545 --- /dev/null +++ b/paddle/operators/elementwise_mul_op.cc @@ -0,0 +1,109 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/elementwise_mul_op.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +class ElementWiseMulOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null"); + auto x_dim = ctx.Input("X")->dims(); + auto y_dim = ctx.Input("Y")->dims(); + PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(), + "Rank of first input must >= rank of second input.") + ctx.Output("Out")->Resize(x_dim); + } +}; + +class ElementWiseMulOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ElementWiseMulOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "The first input of elementwise mul op"); + AddInput("Y", "The second input of elementwise mul op"); + AddAttr("axis", + R"DOC( +When shape(Y) does not equal shape(X),Y will be broadcasted +to match the shape of X and axis should be dimension index Y in X + )DOC") + .SetDefault(-1) + .EqualGreaterThan(-1); + + AddOutput("Out", "The output of elementwise mul op"); + AddComment(R"DOC( +Limited elementwise multiple operator.The equation is: Out = X ⊙ Y. +1. The shape of Y should be same with X or +2. Y's shape is a subset of X. + Y will be broadcasted to match the shape of X and axis should be dimension index Y in X. + example: + shape(X) = (2, 3, 4, 5), shape(Y) = (,) + shape(X) = (2, 3, 4, 5), shape(Y) = (5,) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5) + shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 +)DOC"); + } +}; + +class ElementWiseMulOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null"); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null"); + + auto x_dims = ctx.Input("X")->dims(); + auto y_dims = ctx.Input("Y")->dims(); + auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); + auto *x_grad = ctx.Output(framework::GradVarName("X")); + auto *y_grad = ctx.Output(framework::GradVarName("Y")); + + PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), + "Rank of first input must >= rank of second input.") + + if (x_grad) { + x_grad->Resize(x_dims); + } + + if (y_grad) { + y_grad->Resize(y_dims); + } + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP(elementwise_mul, ops::ElementWiseMulOp, ops::ElementWiseMulOpMaker, + elementwise_mul_grad, ops::ElementWiseMulOpGrad); +REGISTER_OP_CPU_KERNEL( + elementwise_mul, + ops::ElementWiseMulKernel); +REGISTER_OP_CPU_KERNEL( + elementwise_mul_grad, + ops::ElementWiseMulGradKernel); diff --git a/paddle/operators/elementwise_mul_op.cu b/paddle/operators/elementwise_mul_op.cu new file mode 100644 index 0000000000..56f2087c22 --- /dev/null +++ b/paddle/operators/elementwise_mul_op.cu @@ -0,0 +1,25 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#define EIGEN_USE_GPU +#include "paddle/operators/elementwise_mul_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_GPU_KERNEL( + elementwise_mul, + ops::ElementWiseMulKernel); +REGISTER_OP_GPU_KERNEL( + elementwise_mul_grad, + ops::ElementWiseMulGradKernel); diff --git a/paddle/operators/elementwise_mul_op.h b/paddle/operators/elementwise_mul_op.h new file mode 100644 index 0000000000..e9ed679179 --- /dev/null +++ b/paddle/operators/elementwise_mul_op.h @@ -0,0 +1,185 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/operators/math/math_function.h" + +namespace paddle { +namespace operators { +/* + * Out = X ⊙ Y + * 1. shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 + * pre=2, n=3*4, post=5 + * 2. shape(X) = (2, 3, 4, 5), shape(Y) = (4,5) + * pre=2*3, n=4*5, post=1 + */ + +inline void get_mid_dims(const framework::DDim& x_dims, + const framework::DDim& y_dims, const int axis, + int& pre, int& n, int& post) { + pre = 1; + n = 1; + post = 1; + for (int i = 0; i < axis; ++i) { + pre *= x_dims[i]; + } + + for (int i = 0; i < y_dims.size(); ++i) { + PADDLE_ENFORCE_EQ(x_dims[i + axis], y_dims[i], + "Broadcast dimension mismatch."); + n *= y_dims[i]; + } + + for (int i = axis + y_dims.size(); i < x_dims.size(); ++i) { + post *= x_dims[i]; + } +} + +template +class ElementWiseMulKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + using Tensor = framework::Tensor; + + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* z = ctx.Output("Out"); + z->mutable_data(ctx.GetPlace()); + + auto x_e = framework::EigenVector::Flatten(*x); + auto y_e = framework::EigenVector::Flatten(*y); + auto z_e = framework::EigenVector::Flatten(*z); + + auto x_dims = x->dims(); + auto y_dims = y->dims(); + PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), + "Rank of first input must >= rank of second input.") + + if (x_dims == y_dims || product(y_dims) == 1) { + z_e.device(ctx.GetEigenDevice()) = x_e * y_e; + return; + } + + int axis = ctx.Attr("axis"); + axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); + PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(), + "Axis should be in range [0, x_dims)"); + + int pre, n, post; + get_mid_dims(x_dims, y_dims, axis, pre, n, post); + if (post == 1) { + auto y_bcast = y_e.reshape(Eigen::DSizes(1, n)) + .broadcast(Eigen::DSizes(pre, 1)) + .reshape(Eigen::DSizes(x_e.size())); + z_e.device(ctx.GetEigenDevice()) = x_e * y_bcast; + return; + } else { + auto y_bcast = y_e.reshape(Eigen::DSizes(1, n, 1)) + .broadcast(Eigen::DSizes(pre, 1, post)) + .reshape(Eigen::DSizes(x_e.size())); + z_e.device(ctx.GetEigenDevice()) = x_e * y_bcast; + return; + } + } +}; + +template +class ElementWiseMulGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + using Tensor = framework::Tensor; + + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* dout = ctx.Input(framework::GradVarName("Out")); + + auto x_e = framework::EigenVector::Flatten(*x); + auto y_e = framework::EigenVector::Flatten(*y); + auto dout_e = framework::EigenVector::Flatten(*dout); + + auto x_dims = x->dims(); + auto y_dims = y->dims(); + + auto* dx = ctx.Output(framework::GradVarName("X")); + auto* dy = ctx.Output(framework::GradVarName("Y")); + if (dx) { + dx->mutable_data(ctx.GetPlace()); + } + if (dy) { + dy->mutable_data(ctx.GetPlace()); + } + + if (x_dims == y_dims || product(y_dims) == 1) { + if (dx) { + auto dx_e = framework::EigenVector::Flatten(*dx); + dx_e.device(ctx.GetEigenDevice()) = dout_e * y_e; + } + + if (dy) { + auto dy_e = framework::EigenVector::Flatten(*dy); + dy_e.device(ctx.GetEigenDevice()) = x_e * dout_e; + } + return; + } + + int axis = ctx.Attr("axis"); + axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); + + int pre, n, post; + get_mid_dims(x_dims, y_dims, axis, pre, n, post); + + // TODO(gongweibao): wrap reshape to a function. + if (post == 1) { + auto y_e_bcast = y_e.reshape(Eigen::DSizes(1, n)) + .broadcast(Eigen::DSizes(pre, 1)) + .reshape(Eigen::DSizes(x_e.size())); + if (dx) { + auto dx_e = framework::EigenVector::Flatten(*dx); + dx_e.device(ctx.GetEigenDevice()) = dout_e * y_e_bcast; + } + + if (dy) { + auto dy_e = framework::EigenVector::Flatten(*dy); + dy_e.device(ctx.GetEigenDevice()) = + (x_e * dout_e) + .reshape(Eigen::DSizes(pre, n)) + .sum(Eigen::array{{0}}); + } + return; + } else { + auto y_e_bcast = y_e.reshape(Eigen::DSizes(1, n, 1)) + .broadcast(Eigen::DSizes(pre, 1, post)) + .reshape(Eigen::DSizes(x_e.size())); + if (dx) { + auto dx_e = framework::EigenVector::Flatten(*dx); + dx_e.device(ctx.GetEigenDevice()) = dout_e * y_e_bcast; + } + + if (dy) { + auto dy_e = framework::EigenVector::Flatten(*dy); + dy_e.device(ctx.GetEigenDevice()) = + (x_e * dout_e) + .reshape(Eigen::DSizes(pre, n, post)) + .sum(Eigen::array{{0, 2}}); + } + return; + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 16a2368aae..ef62d6e997 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -35,6 +35,7 @@ USE_OP(add); USE_OP(onehot_cross_entropy); USE_OP(sgd); USE_OP(mul); +USE_OP(elementwise_mul); USE_OP(mean); USE_OP(sigmoid); USE_OP(softmax); diff --git a/python/paddle/v2/framework/tests/test_elementwise_mul_op.py b/python/paddle/v2/framework/tests/test_elementwise_mul_op.py new file mode 100644 index 0000000000..e268cfddb2 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_elementwise_mul_op.py @@ -0,0 +1,157 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestElementwiseMulOp_Matrix(OpTest): + def setUp(self): + self.op_type = "elementwise_mul" + """ Warning + CPU gradient check error! + 'X': np.random.random((32,84)).astype("float32"), + 'Y': np.random.random((32,84)).astype("float32") + """ + self.inputs = { + 'X': np.random.uniform(0.1, 1, [13, 17]).astype("float32"), + 'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float32") + } + self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) + + +class TestElementwiseMulOp_Vector(OpTest): + def setUp(self): + self.op_type = "elementwise_mul" + self.inputs = { + 'X': np.random.random((32, )).astype("float32"), + 'Y': np.random.random((32, )).astype("float32") + } + self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) + + +class TestElementwiseMulOp_broadcast_0(OpTest): + def setUp(self): + self.op_type = "elementwise_mul" + self.inputs = { + 'X': np.random.rand(2, 3, 4).astype(np.float32), + 'Y': np.random.rand(2).astype(np.float32) + } + + self.attrs = {'axis': 0} + self.outputs = { + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(2, 1, 1) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) + + +class TestElementwiseMulOp_broadcast_1(OpTest): + def setUp(self): + self.op_type = "elementwise_mul" + self.inputs = { + 'X': np.random.rand(2, 3, 4).astype(np.float32), + 'Y': np.random.rand(3).astype(np.float32) + } + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 3, 1) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) + + +class TestElementwiseMulOp_broadcast_2(OpTest): + def setUp(self): + self.op_type = "elementwise_mul" + self.inputs = { + 'X': np.random.rand(2, 3, 4).astype(np.float32), + 'Y': np.random.rand(4).astype(np.float32) + } + + self.outputs = { + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 4) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) + + +class TestElementwiseMulOp_broadcast_3(OpTest): + def setUp(self): + self.op_type = "elementwise_mul" + self.inputs = { + 'X': np.random.rand(2, 3, 4, 5).astype(np.float32), + 'Y': np.random.rand(3, 4).astype(np.float32) + } + + self.attrs = {'axis': 1} + self.outputs = { + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 3, 4, 1) + } + + +if __name__ == '__main__': + unittest.main() From b75123b56f31c3c90cdf07d378a806efdf5d838a Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Wed, 13 Sep 2017 11:30:03 +0000 Subject: [PATCH 67/69] Add TODO comment about the support of glog for Android api 16 ~ 19. --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index e3194cd29c..5739c2a260 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -68,6 +68,7 @@ if(ANDROID) if(${CMAKE_SYSTEM_VERSION} VERSION_LESS "16") message(FATAL_ERROR "Unsupport standalone toolchains with Android API level lower than 16") elseif(${CMAKE_SYSTEM_VERSION} VERSION_LESS "21") + # TODO: support glog for Android api 16 ~ 19 in the future message(WARNING "Using the unofficial git repository instead") endif() From 22de57f1273d08d0dfd2e46a7a7c91e5022e0db3 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 13 Sep 2017 19:56:49 +0800 Subject: [PATCH 68/69] enable cudnn_convt --- python/paddle/trainer_config_helpers/layers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 4b1d80d3db..8c7d1738ad 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -169,6 +169,7 @@ class LayerType(object): EXCONV_LAYER = 'exconv' EXCONVTRANS_LAYER = 'exconvt' CUDNNCONV_LAYER = 'cudnn_conv' + CUDNNCONVTRANS_LAYER = 'cudnn_convt' POOL_LAYER = 'pool' POOL3D_LAYER = 'pool3d' BATCH_NORM_LAYER = 'batch_norm' From 47975870aa043d1d4e6c71335c6e4d09df94e13d Mon Sep 17 00:00:00 2001 From: Yancey Date: Wed, 13 Sep 2017 20:18:00 +0800 Subject: [PATCH 69/69] Fix check grad with multioutput (#4067) Fix check grad with multi outputs --- python/paddle/v2/framework/tests/op_test.py | 16 +++++++++++----- .../v2/framework/tests/test_gradient_checker.py | 3 ++- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/python/paddle/v2/framework/tests/op_test.py b/python/paddle/v2/framework/tests/op_test.py index 4fec4c9109..9936fd76ba 100644 --- a/python/paddle/v2/framework/tests/op_test.py +++ b/python/paddle/v2/framework/tests/op_test.py @@ -85,7 +85,7 @@ def get_numeric_gradient(scope, op, inputs, input_to_check, - output_name, + output_names, delta=0.005, in_place=False): @@ -100,8 +100,11 @@ def get_numeric_gradient(scope, ctx = core.DeviceContext.create(core.CPUPlace()) def get_output(): - op.run(scope, ctx) - return np.array(scope.find_var(output_name).get_tensor()).sum() + sum = 0.0 + for output_name in output_names: + op.run(scope, ctx) + sum += np.array(scope.find_var(output_name).get_tensor()).sum() + return sum tensor_to_check = scope.find_var(input_to_check).get_tensor() tensor_size = product(tensor_to_check.get_dims()) @@ -225,7 +228,7 @@ class OpTest(unittest.TestCase): def check_grad(self, inputs_to_check, - output_name, + output_names, no_grad_set=None, in_place=False, max_relative_error=0.005): @@ -237,13 +240,16 @@ class OpTest(unittest.TestCase): if no_grad_set is None: no_grad_set = set() + if not type(output_names) is list: + output_names = [output_names] + numeric_grads = [ get_numeric_gradient( self.scope, self.op, self.inputs, input_to_check, - output_name, + output_names, in_place=in_place) for input_to_check in inputs_to_check ] grad_names = [ diff --git a/python/paddle/v2/framework/tests/test_gradient_checker.py b/python/paddle/v2/framework/tests/test_gradient_checker.py index abeb01cb34..85117bf960 100644 --- a/python/paddle/v2/framework/tests/test_gradient_checker.py +++ b/python/paddle/v2/framework/tests/test_gradient_checker.py @@ -12,7 +12,8 @@ class GetNumericGradientTest(unittest.TestCase): z = x + y scope = core.Scope() add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict()) - arr = get_numeric_gradient(scope, add_op, {'X': x, 'Y': y}, 'X', 'Out') + arr = get_numeric_gradient(scope, add_op, {'X': x, + 'Y': y}, 'X', ['Out']) self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4) def test_softmax_op(self):