From b1b436458078253df97d7e279ad51d7529201c79 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 26 Jul 2017 15:06:35 +0800 Subject: [PATCH 01/58] Rename PlainNet --> NetOp --- paddle/framework/CMakeLists.txt | 4 +--- paddle/framework/net.cc | 16 ++++---------- paddle/framework/net.h | 24 +++++---------------- paddle/framework/net_op_test.cc | 37 +++++++++++++------------------- paddle/framework/net_proto.proto | 15 ------------- paddle/framework/operator.h | 14 ++++++------ paddle/operators/fc_op.cc | 2 +- paddle/operators/type_alias.h | 2 +- paddle/pybind/pybind.cc | 18 ++++++++-------- 9 files changed, 44 insertions(+), 88 deletions(-) delete mode 100644 paddle/framework/net_proto.proto diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 433edbfda7..a29a81c994 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -29,7 +29,5 @@ py_proto_compile(framework_py_proto SRCS attr_type.proto op_proto.proto op_desc. add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) add_dependencies(framework_py_proto framework_py_proto_init) -proto_library(net_proto SRCS net_proto.proto DEPS op_proto) -# cc_library(net SRCS net.cc DEPS operator net_proto op_registry fc_op) -cc_library(net SRCS net.cc DEPS operator net_proto op_registry) +cc_library(net SRCS net.cc DEPS op_registry) cc_test(net_op_test SRCS net_op_test.cc DEPS net add_op mul_op sigmoid_op softmax_op fc_op) diff --git a/paddle/framework/net.cc b/paddle/framework/net.cc index bc23b63b35..2cd378c6b2 100644 --- a/paddle/framework/net.cc +++ b/paddle/framework/net.cc @@ -20,17 +20,7 @@ namespace paddle { namespace framework { -std::shared_ptr AddBackwardOp(std::shared_ptr ForwardOps) { - auto grad_ops = std::make_shared(); - for (auto& op : ForwardOps->ops_) { - auto op_grad = OpRegistry::CreateGradOp(op); - grad_ops->AddOp(op_grad); - } - grad_ops->CompleteAddOp(); - return grad_ops; -} - -void PlainNet::CompleteAddOp(bool calc) { +void NetOp::CompleteAddOp(bool calc) { add_op_done_ = true; if (!calc) return; std::unordered_set input_set; @@ -70,7 +60,7 @@ void PlainNet::CompleteAddOp(bool calc) { attrs_["temporary_index"] = tmp_index; } -std::string PlainNet::DebugString() const { +std::string NetOp::DebugString() const { std::ostringstream os; os << OperatorBase::DebugString() << std::endl; for (auto& op : ops_) { @@ -82,5 +72,7 @@ std::string PlainNet::DebugString() const { return os.str(); } +bool NetOp::IsNetOp() const { return true; } + } // namespace framework } // namespace paddle diff --git a/paddle/framework/net.h b/paddle/framework/net.h index 3264f1f565..089c135595 100644 --- a/paddle/framework/net.h +++ b/paddle/framework/net.h @@ -37,21 +37,7 @@ namespace framework { * This is the base class of network, all the networks should implement the APIs * it defines. */ -class Net : public OperatorBase { - public: - virtual void AddOp(const std::shared_ptr& op) = 0; - virtual void CompleteAddOp(bool calc) = 0; -}; - -using NetPtr = std::shared_ptr; - -/** - * @brief a basic implementation of Net. - * - * PlainNet is a very simple Net, it create a list of operators, and run them - * sequentially following the order they added. - */ -class PlainNet : public Net { +class NetOp : public OperatorBase { public: /** * Infer all the operators' input and output variables' shapes, will be called @@ -80,15 +66,17 @@ class PlainNet : public Net { /** * @brief Add an operator by ptr */ - void AddOp(const std::shared_ptr& op) override { + void AddOp(const std::shared_ptr& op) { PADDLE_ENFORCE(!add_op_done_, "Cannot AddOp when this network is sealed"); ops_.push_back(op); } - void CompleteAddOp(bool calculate = true) override; + void CompleteAddOp(bool calculate = true); std::string DebugString() const override; + bool IsNetOp() const override; + std::vector> ops_; private: @@ -100,7 +88,5 @@ class PlainNet : public Net { } }; -std::shared_ptr AddBackwardOp(std::shared_ptr ForwardOps); - } // namespace framework } // namespace paddle diff --git a/paddle/framework/net_op_test.cc b/paddle/framework/net_op_test.cc index d924058624..8048311fe5 100644 --- a/paddle/framework/net_op_test.cc +++ b/paddle/framework/net_op_test.cc @@ -40,7 +40,7 @@ void AssertSameVectorWithoutOrder(const std::vector& expected, } TEST(OpKernel, all) { - auto net = std::make_shared(); + auto net = std::make_shared(); ASSERT_NE(net, nullptr); auto op1 = std::make_shared(); @@ -71,28 +71,21 @@ TEST(OpKernel, all) { ASSERT_EQ(2, run_cnt); ASSERT_THROW(net->AddOp(op2), paddle::platform::EnforceNotMet); } -TEST(AddBackwardOp, TestGradOp) { - auto net = std::make_shared(); - ASSERT_NE(net, nullptr); - net->AddOp(framework::OpRegistry::CreateOp("mul", {"X", "Y"}, {"Out"}, {})); - net->AddOp( - framework::OpRegistry::CreateOp("add_two", {"X", "Y"}, {"Out"}, {})); - net->AddOp(framework::OpRegistry::CreateOp("add_two", {"X", "Y"}, {""}, {})); - auto grad_ops = AddBackwardOp(net); - for (auto& op : grad_ops->ops_) { - op->DebugString(); - } -} -// TODO(zhihong): add fc grad without registering. -// TEST(AddBackwardOp, TestNoGradOp) { -// auto net = std::make_shared(); -// ASSERT_NE(net, nullptr); -// net->AddOp(framework::OpRegistry::CreateOp("fc", {"X", "W", "b"}, {"Y"}, -// {})); auto grad_ops = AddBackwardOp(net); for (auto& op : grad_ops->ops_) { -// op->DebugString(); -// } -// } +//! TODO(yuyang18): Refine Backward Op. +// TEST(AddBackwardOp, TestGradOp) { +// auto net = std::make_shared(); +// ASSERT_NE(net, nullptr); +// net->AddOp(framework::OpRegistry::CreateOp("mul", {"X", "Y"}, {"Out"}, {})); +// net->AddOp( +// framework::OpRegistry::CreateOp("add_two", {"X", "Y"}, {"Out"}, {})); +// net->AddOp(framework::OpRegistry::CreateOp("add_two", {"X", "Y"}, {""}, +// {})); +// auto grad_ops = AddBackwardOp(net); +// for (auto& op : grad_ops->ops_) { +// op->DebugString(); +// } +//} } // namespace framework } // namespace paddle diff --git a/paddle/framework/net_proto.proto b/paddle/framework/net_proto.proto deleted file mode 100644 index 0779f49fe2..0000000000 --- a/paddle/framework/net_proto.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax="proto2"; -package paddle.framework; - -import "op_proto.proto"; - -message NetDesc { - // network identification - optional string name = 1; - // operator contains in network - repeated OpProto operators = 2; - // network type to run with. e.g "plainNet", "DAG" - optional string net_type = 3; - // num worker always - optional int32 num_workers = 4; -} diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index f59314f828..65fddb6811 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -90,15 +90,17 @@ class OperatorBase { virtual void Run(const std::shared_ptr& scope, const platform::DeviceContext& dev_ctx) const = 0; - // Get a input with argument's name described in `op_proto` + virtual bool IsNetOp() const { return false; } + + //! Get a input with argument's name described in `op_proto` const std::string& Input(const std::string& name) const; - // Get a input which has multiple variables. - // TODO add a vector_view to prevent memory copy. + //! Get a input which has multiple variables. + //! TODO add a vector_view to prevent memory copy. std::vector Inputs(const std::string& name) const; - // Get a output with argument's name described in `op_proto` + //! Get a output with argument's name described in `op_proto` const std::string& Output(const std::string& name) const; - // Get an output which has multiple variables. - // TODO add a vector_view to prevent memory copy. + //! Get an output which has multiple variables. + //! TODO add a vector_view to prevent memory copy. std::vector Outputs(const std::string& name) const; public: diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index 40ff2f41dd..c4a9f5937f 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -17,7 +17,7 @@ namespace paddle { namespace operators { -class FullyConnectedOp : public PlainNet { +class FullyConnectedOp : public NetOp { public: void Init() override { AddOp(OpRegistry::CreateOp("mul", diff --git a/paddle/operators/type_alias.h b/paddle/operators/type_alias.h index 44ffefb299..b712e457ff 100644 --- a/paddle/operators/type_alias.h +++ b/paddle/operators/type_alias.h @@ -43,7 +43,7 @@ using OpProto = framework::OpProto; using OpAttrChecker = framework::OpAttrChecker; using CPUPlace = platform::CPUPlace; using GPUPlace = platform::GPUPlace; -using PlainNet = framework::PlainNet; +using NetOp = framework::NetOp; using OpRegistry = framework::OpRegistry; } // namespace operators } // namespace paddle diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index 0b152d03c0..ccefcd2511 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -146,22 +146,22 @@ All parameter, weight, gradient are variables in Paddle. }); ExposeOperator(operator_base); - using PlainNetPtr = std::shared_ptr; - py::class_ net(m, "Net"); + py::class_> net(m, "Net"); net.def_static("create", - []() -> std::shared_ptr { - auto retv = std::make_shared(); + []() -> std::shared_ptr { + auto retv = std::make_shared(); retv->type_ = "plain_net"; return retv; }) - .def("add_op", &pd::PlainNet::AddOp) + .def("add_op", &pd::NetOp::AddOp) .def("add_op", - [](PlainNetPtr& self, const PlainNetPtr& net) -> void { - self->AddOp(std::static_pointer_cast(net)); + [](pd::NetOp& self, const std::shared_ptr& net) -> void { + self.AddOp(std::static_pointer_cast(net)); }) - .def("complete_add_op", &pd::PlainNet::CompleteAddOp) - .def("complete_add_op", [](PlainNetPtr& self) { self->CompleteAddOp(); }); + .def("complete_add_op", &pd::NetOp::CompleteAddOp) + .def("complete_add_op", + [](std::shared_ptr& self) { self->CompleteAddOp(); }); ExposeOperator(net); m.def("unique_integer", UniqueIntegerGenerator); From ecf23ce5f339bb22800f2a9bc3c156ecc036c1cb Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 26 Jul 2017 16:04:53 +0800 Subject: [PATCH 02/58] Update Backward --- paddle/framework/CMakeLists.txt | 2 + paddle/framework/backward.cc | 93 +++++++++++++++++++++++++++++++++ paddle/framework/backward.h | 31 +++++++++++ paddle/framework/op_registry.h | 2 + 4 files changed, 128 insertions(+) create mode 100644 paddle/framework/backward.cc create mode 100644 paddle/framework/backward.h diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index a29a81c994..26d93336b1 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -31,3 +31,5 @@ add_dependencies(framework_py_proto framework_py_proto_init) cc_library(net SRCS net.cc DEPS op_registry) cc_test(net_op_test SRCS net_op_test.cc DEPS net add_op mul_op sigmoid_op softmax_op fc_op) + +cc_library(backward SRCS backward.cc DEPS net) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc new file mode 100644 index 0000000000..1169034218 --- /dev/null +++ b/paddle/framework/backward.cc @@ -0,0 +1,93 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include +#include + +namespace paddle { +namespace framework { + +static bool AllInSet(const std::vector& names, + const std::string& suffix, + const std::unordered_set& set) { + for (auto& name : names) { + if (set.find(name + suffix) == set.end()) { + return false; + } + } + return true; +} + +static std::vector InSetIdx(const std::vector& names, + const std::string& suffix, + const std::unordered_set& set) { + std::vector ret_val; + ret_val.reserve(names.size()); + for (size_t i = 0; i < names.size(); ++i) { + if (set.find(names[i] + suffix) != set.end()) { + ret_val.push_back(i); + } + } + return ret_val; +} + +static std::shared_ptr EmptyOp() { + auto net_op = std::make_shared(); + net_op->CompleteAddOp(); + return net_op; +} + +static std::shared_ptr BackwardImpl( + const OperatorBase& forwardOp, + std::unordered_set& no_grad_names, int& uniq_id) { + if (AllInSet(forwardOp.inputs_, OperatorBase::GRAD_VAR_SUFFIX(), + no_grad_names)) { + return EmptyOp(); + } + + if (AllInSet(forwardOp.outputs_, OperatorBase::GRAD_VAR_SUFFIX(), + no_grad_names)) { + for (auto& name : forwardOp.inputs_) { + // Mark all input is not need + no_grad_names.insert(name + OperatorBase::GRAD_VAR_SUFFIX()); + } + return EmptyOp(); + } + + auto* net = new NetOp(); + + if (forwardOp.IsNetOp()) { + //! TODO(dzh) + } else { + //! TODO(fjy) + } + + net->CompleteAddOp(); + return std::shared_ptr(net); +} + +extern std::shared_ptr Backward( + const std::shared_ptr& forwardOp, + const std::unordered_set& no_grad_vars) { + std::unordered_set no_grad_names; + no_grad_names.reserve(no_grad_vars.size()); + + for (auto& name : no_grad_vars) { + no_grad_names.insert(name + OperatorBase::GRAD_VAR_SUFFIX()); + } + int uid = 0; + return BackwardImpl(*forwardOp, no_grad_names, uid); +} +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/backward.h b/paddle/framework/backward.h new file mode 100644 index 0000000000..e835ef6351 --- /dev/null +++ b/paddle/framework/backward.h @@ -0,0 +1,31 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include +#include "operator.h" +namespace paddle { +namespace framework { + +/** + * @brief + * @param forwardOp + * @param no_grad_vars ignored input name of forward + * @return + */ +extern std::shared_ptr Backward( + const std::shared_ptr& forwardOp, + const std::unordered_set& no_grad_vars); +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index f16deae028..5bcd7ac927 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -302,6 +302,8 @@ class OpRegistry { static std::shared_ptr CreateGradOp( std::shared_ptr op) { + PADDLE_ENFORCE(!op->IsNetOp(), + "Use framework::Backward to get backward ops"); GradOpBuilder builder(op.get()); std::shared_ptr grad_op(builder.Build()); grad_op->Init(); From b1b13f8f5494b7ccd38f8b095c74d2d8172e2d9d Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 26 Jul 2017 17:13:22 +0800 Subject: [PATCH 03/58] Update Interface --- paddle/framework/backward.cc | 12 ++++++------ paddle/framework/backward.h | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 1169034218..d8653b5dd6 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -29,10 +29,10 @@ static bool AllInSet(const std::vector& names, return true; } -static std::vector InSetIdx(const std::vector& names, - const std::string& suffix, - const std::unordered_set& set) { - std::vector ret_val; +static std::vector InSetIdx( + const std::vector& names, const std::string& suffix, + const std::unordered_set& set) { + std::vector ret_val; ret_val.reserve(names.size()); for (size_t i = 0; i < names.size(); ++i) { if (set.find(names[i] + suffix) != set.end()) { @@ -78,7 +78,7 @@ static std::shared_ptr BackwardImpl( } extern std::shared_ptr Backward( - const std::shared_ptr& forwardOp, + const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars) { std::unordered_set no_grad_names; no_grad_names.reserve(no_grad_vars.size()); @@ -87,7 +87,7 @@ extern std::shared_ptr Backward( no_grad_names.insert(name + OperatorBase::GRAD_VAR_SUFFIX()); } int uid = 0; - return BackwardImpl(*forwardOp, no_grad_names, uid); + return BackwardImpl(forwardOp, no_grad_names, uid); } } // namespace framework } // namespace paddle diff --git a/paddle/framework/backward.h b/paddle/framework/backward.h index e835ef6351..d711c7bbb6 100644 --- a/paddle/framework/backward.h +++ b/paddle/framework/backward.h @@ -25,7 +25,7 @@ namespace framework { * @return */ extern std::shared_ptr Backward( - const std::shared_ptr& forwardOp, + const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars); } // namespace framework } // namespace paddle From 00615ebca2217c9890b1e1212eba1f5d753aa92b Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 26 Jul 2017 17:50:13 +0800 Subject: [PATCH 04/58] Refine OpRegistry::AddInput/AddOutput Remove bool argument, use a class to handle that. --- paddle/framework/CMakeLists.txt | 1 + paddle/framework/backward_test.cc | 50 +++++++++++++++++++++++ paddle/framework/op_registry.h | 61 +++++++++++++++------------- paddle/framework/op_registry_test.cc | 5 +-- paddle/framework/operator_test.cc | 4 +- paddle/operators/fc_op.cc | 4 +- 6 files changed, 89 insertions(+), 36 deletions(-) create mode 100644 paddle/framework/backward_test.cc diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 26d93336b1..66f516a963 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -33,3 +33,4 @@ cc_library(net SRCS net.cc DEPS op_registry) cc_test(net_op_test SRCS net_op_test.cc DEPS net add_op mul_op sigmoid_op softmax_op fc_op) cc_library(backward SRCS backward.cc DEPS net) +cc_test(backward_test SRCS backward_test.cc DEPS net) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc new file mode 100644 index 0000000000..b2286facfe --- /dev/null +++ b/paddle/framework/backward_test.cc @@ -0,0 +1,50 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include +#include "paddle/framework/op_registry.h" +namespace paddle { +namespace framework { + +class EmptyOp : public OperatorBase { + public: + void InferShape(const std::shared_ptr &scope) const override {} + void Run(const std::shared_ptr &scope, + const platform::DeviceContext &dev_ctx) const override {} +}; + +class RowwiseAddOp : public EmptyOp {}; +class RowwiseAddOpMaker : public OpProtoAndCheckerMaker { + public: + RowwiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input X of Add").IgnoreGradient(); + AddInput("b", "Bias of Add").IgnoreGradient(); + AddOutput("Out", "Out of Add").IgnoreGradient(); + AddComment("Add Op"); + } +}; + +class RowwiseAddGradOp : public EmptyOp {}; +} // namespace framework +} // namespace paddle + +namespace f = paddle::framework; +REGISTER_OP(rowwise_add, f::RowwiseAddOp, f::RowwiseAddOpMaker); +REGISTER_GRADIENT_OP(rowwise_add, rowwise_add_grad, f::RowwiseAddGradOp); + +TEST(Backward, simple_grad) { + auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); + ASSERT_NE(fwd, nullptr); +} \ No newline at end of file diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 5bcd7ac927..e4ac8a6e76 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -86,43 +86,46 @@ class OpProtoAndCheckerMaker { } protected: - void AddInput(const std::string& name, const std::string& comment, - bool multiple = false, bool ignore_gradient = false) { + struct VariableBuilder { + VarProto* var_; + std::function on_multiple_; + std::function on_temporary_; + + VariableBuilder& SetMultiple() { + var_->set_multiple(true); + on_multiple_(); + return *this; + } + + VariableBuilder& SetTemporary() { + PADDLE_ENFORCE(bool(on_temporary_), "Cannot set temporary"); + var_->set_temporary(true); + on_temporary_(); + return *this; + } + + VariableBuilder& IgnoreGradient() { + var_->set_ignore_gradient(true); + return *this; + } + }; + + VariableBuilder AddInput(const std::string& name, + const std::string& comment) { auto input = proto_->mutable_inputs()->Add(); *input->mutable_name() = name; *input->mutable_comment() = comment; - input->set_ignore_gradient(ignore_gradient); - input->set_multiple(multiple); - if (multiple) { - SetHasMultipleInput(); - } - } - - void AddInputs(const std::string& name, const std::string& comment, - bool ignore_gradient = false) { - AddInput(name, comment, true, ignore_gradient); + return VariableBuilder{input, [=] { this->SetHasMultipleInput(); }, + nullptr}; } - void AddOutput(const std::string& name, const std::string& comment, - bool temporary = false, bool multiple = false, - bool ignore_gradient = false) { + VariableBuilder AddOutput(const std::string& name, + const std::string& comment) { auto output = proto_->mutable_outputs()->Add(); *output->mutable_name() = name; *output->mutable_comment() = comment; - output->set_ignore_gradient(ignore_gradient); - output->set_multiple(multiple); - if (multiple) { - SetHasMultipleOutput(); - } - output->set_temporary(temporary); - if (temporary) { - SetHasTemporaryOutput(); - } - } - - void AddOutputs(const std::string& name, const std::string& comment, - bool temporary = false, bool ignore_gradient = false) { - AddOutput(name, comment, temporary, true, ignore_gradient); + return VariableBuilder{output, [=] { this->SetHasMultipleOutput(); }, + [=] { this->SetHasTemporaryOutput(); }}; } template diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index 2ef781bf86..a534f661af 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -36,9 +36,8 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { public: MyTestOpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInputs("input", "input of cosine op"); - AddOutput("output", "output of cosine op", - /*temporary*/ true); + AddInput("input", "input of cosine op").SetMultiple(); + AddOutput("output", "output of cosine op").SetTemporary(); auto my_checker = [](int i) { PADDLE_ENFORCE(i % 2 == 0, "'test_attr' must be even!"); }; diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index 3fae356c3e..839280abbc 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -137,9 +137,9 @@ class OpKernelTestMultiInputsProtoAndCheckerMaker OpKernelTestMultiInputsProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInputs("xs", "inputs of test op"); + AddInput("xs", "inputs of test op").SetMultiple(); AddInput("k", "input of test op"); - AddOutputs("ys", "outputs of test op"); + AddOutput("ys", "outputs of test op").SetMultiple(); AddAttr("scale", "scale of cosine op") .SetDefault(1.0) .LargerThan(0.0); diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index c4a9f5937f..71ceda9587 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -50,8 +50,8 @@ public: AddInput("b", "the bias of fc operator"); AddOutput("Y", "the output of fc operator"); - AddOutput( - "before_act", "the before activation output of fc operator", true); + AddOutput("before_act", "the before activation output of fc operator") + .SetTemporary(); AddAttr("activation", "The activation key for fc layer") .SetDefault("sigmoid") .InEnum({"sigmoid", "softmax"}); From a2dc9614edfff8ab2a602e1ed605ffdc4155373a Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 26 Jul 2017 18:10:19 +0800 Subject: [PATCH 05/58] Add fill_zeros_like op --- paddle/operators/fill_zeros_like_op.cc | 58 ++++++++++++++++++++++++++ paddle/operators/fill_zeros_like_op.cu | 6 +++ paddle/operators/fill_zeros_like_op.h | 34 +++++++++++++++ 3 files changed, 98 insertions(+) create mode 100644 paddle/operators/fill_zeros_like_op.cc create mode 100644 paddle/operators/fill_zeros_like_op.cu create mode 100644 paddle/operators/fill_zeros_like_op.h diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc new file mode 100644 index 0000000000..3df3a2cfab --- /dev/null +++ b/paddle/operators/fill_zeros_like_op.cc @@ -0,0 +1,58 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/fill_zeros_like_op.h" +#include "paddle/framework/op_registry.h" +#include "paddle/framework/tensor.h" + +namespace paddle { +namespace operators { + +class FillZerosLike : public framework::OperatorWithKernel { +protected: + void InferShape( + const std::vector &inputs, + const std::vector &outputs) const override { + PADDLE_ENFORCE(inputs.size() == 1, + "Input size of FillZerosLike must be one."); + PADDLE_ENFORCE(outputs.size() == 1, "Output size of AddOp must be one."); + PADDLE_ENFORCE(inputs[0] != nullptr && outputs[0] != nullptr, + "Outputs of FillZerosLike must all be set."); + outputs[0]->Resize(inputs[0]->dims()); + } +}; + +class FillZerosLikeOpMaker : public framework::OpProtoAndCheckerMaker { +public: + FillZerosLikeOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("Src", "The input of fill-zeros-like op."); + AddOutput("Dst", "The varibale will be filled up with zeros."); + AddComment(R"DOC( +Fill up a vriable with zeros. + +The output will have the same size with input. +)DOC") + } +}; +} // namespace operators +} // namespace paddle + +REGISTER_OP(fill_zeros_like, + paddle::operators::FillZerosLikeOp, + paddle::operators::FillZerosLikeOpMaker); +EGISTER_OP_CPU_KERNEL( + fill_zeros_like, + paddle::operators::FillZerosLikeKernal); \ No newline at end of file diff --git a/paddle/operators/fill_zeros_like_op.cu b/paddle/operators/fill_zeros_like_op.cu new file mode 100644 index 0000000000..55ad58f4f1 --- /dev/null +++ b/paddle/operators/fill_zeros_like_op.cu @@ -0,0 +1,6 @@ +#include "paddle/framework/op_registry.h" +#include "paddle/operators/fill_zeros_like_op.h" + +REGISTER_OP_GPU_KERNEL( + fill_zeros_like, + paddle::operators::FillZerosLikeKernel); \ No newline at end of file diff --git a/paddle/operators/fill_zeros_like_op.h b/paddle/operators/fill_zeros_like_op.h new file mode 100644 index 0000000000..ca44a201f7 --- /dev/null +++ b/paddle/operators/fill_zeros_like_op.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "glog/logging.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/operator.h" + +namespace paddle { +namespace operators { + +template +class FillZerosLikeKernel : public framework::OpKernel { +public: + void Compute(const framework::KernelContext& context) const override { + auto* output = context.Output(0)->GetMutable(); + output->mutable_data(context.GetPlace()); + framework::EigenVector::Flatten(*output).setZero(); + } +}; + +} // namespace operators +} // namespace paddle From e32e306821fc8ffd79ccbe6f9c090d1ad217fd56 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 26 Jul 2017 19:37:10 +0800 Subject: [PATCH 06/58] Develop backward building precess of single op --- paddle/framework/backward.cc | 23 +++++++++++++++++++++-- paddle/framework/operator.h | 3 +++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index d8653b5dd6..1531cb53f9 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -12,8 +12,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include -#include +#include "paddle/framework/backward.h" +#include "paddle/framework/net.h" +#include "paddle/framework/op_registry.h" namespace paddle { namespace framework { @@ -71,6 +72,24 @@ static std::shared_ptr BackwardImpl( //! TODO(dzh) } else { //! TODO(fjy) + std::shared_ptr grad_op = OpRegistry::CreateGradOp(forwardOp); + for (std::string& grad_input : grad_op->inputs_) { + if (no_grad_names.count(grad_input)) { + std::string prefix = grad_input.substr( + 0, grad_input.size() - OperatorBase::GRAD_VAR_SUFFIX().size()); + grad_input = prefix + OperatorBase::ZERO_VAR_SUFFIX(); + std::vector fill_zeros_in = {prefix}; + std::vector fill_zeros_out = {grad_input}; + net.AddOp(OpRegistry::CreateOp("fill_zeros_like", fill_zeros_in, + fill_zeros_out, AttributeMap())); + } + } + for (std::string& grad_output : grad_op->output_) { + if (no_grad_names.count(grad_output)) { + grad_output = OperatorBase::EMPTY_VAR_NAME(); + } + } + net.AddOp(grad_op); } net->CompleteAddOp(); diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 65fddb6811..c2cd21a080 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -67,6 +67,9 @@ class OperatorBase { /// e.g. Variable "x@GRAD" is the gradient of varibale "x". static std::string GRAD_VAR_SUFFIX() { return "@GRAD"; } + /// Variables with this suffix are supposed to be filled up with zeros. + static std::string ZERO_VAR_SUFFIX() { return "@ZERO"; } + virtual ~OperatorBase() {} template From 831d4e1c85dedc2bca8cc997ccc612208dc05c38 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 26 Jul 2017 19:37:40 +0800 Subject: [PATCH 07/58] Refining Unittest --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/backward_test.cc | 142 ++++++++++++++++++++++- paddle/framework/grad_op_builder.cc | 19 ++- paddle/framework/grad_op_builder.h | 4 +- paddle/framework/grad_op_builder_test.cc | 2 +- paddle/framework/op_registry.h | 7 +- 6 files changed, 152 insertions(+), 24 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 66f516a963..7febaaa527 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -33,4 +33,4 @@ cc_library(net SRCS net.cc DEPS op_registry) cc_test(net_op_test SRCS net_op_test.cc DEPS net add_op mul_op sigmoid_op softmax_op fc_op) cc_library(backward SRCS backward.cc DEPS net) -cc_test(backward_test SRCS backward_test.cc DEPS net) +cc_test(backward_test SRCS backward_test.cc DEPS backward) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index b2286facfe..cc00279db5 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -12,8 +12,11 @@ See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/framework/backward.h" #include +#include "paddle/framework/net.h" #include "paddle/framework/op_registry.h" + namespace paddle { namespace framework { @@ -24,10 +27,9 @@ class EmptyOp : public OperatorBase { const platform::DeviceContext &dev_ctx) const override {} }; -class RowwiseAddOp : public EmptyOp {}; -class RowwiseAddOpMaker : public OpProtoAndCheckerMaker { +class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { public: - RowwiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) + RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "Input X of Add").IgnoreGradient(); AddInput("b", "Bias of Add").IgnoreGradient(); @@ -36,15 +38,143 @@ class RowwiseAddOpMaker : public OpProtoAndCheckerMaker { } }; -class RowwiseAddGradOp : public EmptyOp {}; +class MulOpMaker : public OpProtoAndCheckerMaker { + public: + MulOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("A", "A"); + AddInput("B", "B"); + AddOutput("Out", "Out"); + AddComment("Mul"); + } +}; + +class SigmoidOpMaker : public OpProtoAndCheckerMaker { + public: + SigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "X"); + AddOutput("Y", "Y"); + AddComment("Sigmoid"); + } +}; + +class FcOp : public NetOp { + public: + void Init() override { + AddOp(OpRegistry::CreateOp("mul", {Input("X"), Input("W")}, + {Output("before_act")}, {})); + auto b_name = Input("b"); + if (b_name != EMPTY_VAR_NAME()) { + AddOp(OpRegistry::CreateOp("rowwise_add", {Output("before_act"), b_name}, + {Output("before_act")}, {})); + } + AddOp(OpRegistry::CreateOp("sigmoid", {Output("before_act")}, + {Output("Out")}, {})); + CompleteAddOp(false); + } +}; + +class FcOpMaker : public OpProtoAndCheckerMaker { + public: + FcOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "x"); + AddInput("W", "w"); + AddInput("b", "b"); + AddOutput("before_act", "before act").SetTemporary(); + AddOutput("Out", ""); + AddComment(""); + } +}; + +class ManyOutputOpMaker : public OpProtoAndCheckerMaker { + public: + ManyOutputOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("x", "x"); + AddOutput("y", "y"); + AddOutput("z", "z"); + AddComment(""); + } +}; + +class FillZeroOpMaker : public OpProtoAndCheckerMaker { + public: + FillZeroOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("x", "x"); + AddOutput("out", "out"); + AddComment(""); + } +}; } // namespace framework } // namespace paddle namespace f = paddle::framework; -REGISTER_OP(rowwise_add, f::RowwiseAddOp, f::RowwiseAddOpMaker); -REGISTER_GRADIENT_OP(rowwise_add, rowwise_add_grad, f::RowwiseAddGradOp); +using EnforceNotMet = paddle::platform::EnforceNotMet; +REGISTER_OP(rowwise_add, f::EmptyOp, f::RowWiseAddOpMaker); +REGISTER_GRADIENT_OP(rowwise_add, rowwise_add_grad, f::EmptyOp); +REGISTER_OP(mul, f::EmptyOp, f::MulOpMaker); +REGISTER_GRADIENT_OP(mul, mul_grad, f::EmptyOp); +REGISTER_OP(sigmoid, f::EmptyOp, f::SigmoidOpMaker); +REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, f::EmptyOp); +REGISTER_OP(fc, f::FcOp, f::FcOpMaker); +REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker); +REGISTER_GRADIENT_OP(many_output_op, many_output_op_grad, f::EmptyOp); +REGISTER_OP(fill_zeros_like, f::EmptyOp, f::FillZeroOpMaker); TEST(Backward, simple_grad) { auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); ASSERT_NE(fwd, nullptr); + auto gop = f::OpRegistry::CreateGradOp(*fwd); + ASSERT_EQ("Out" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->inputs_[0]); + ASSERT_EQ("rowwise_add_grad", gop->type_); + ASSERT_EQ("X" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->outputs_[0]); + ASSERT_EQ("b" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->outputs_[1]); + + // LOG(INFO) << gop->Output("X" + "@GRAD"); +} + +TEST(Backward, not_for_network) { + auto fwd = + f::OpRegistry::CreateOp("fc", {"X", "W", "b"}, {"Out", "tmp_out"}, + {{"temporary_index", std::vector{1}}}); + ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet); +} + +TEST(Backward, all_input_are_not_need) { + auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); + auto backward = f::Backward(*fwd, {"X", "b"}); + ASSERT_TRUE(backward->IsNetOp()); + auto net = static_cast(backward.get()); + ASSERT_TRUE(net->ops_.empty()); +} + +TEST(Backward, all_output_are_not_need) { + auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); + auto backward = f::Backward(*fwd, {"Out"}); + ASSERT_TRUE(backward->IsNetOp()); + auto net = static_cast(backward.get()); + ASSERT_TRUE(net->ops_.empty()); +} + +TEST(Backward, part_of_output_are_not_need) { + auto fwd = f::OpRegistry::CreateOp("many_output_op", {"X"}, {"Y", "Z"}, {}); + auto backward = f::Backward(*fwd, {"Z"}); + ASSERT_TRUE(backward->IsNetOp()); + auto net = static_cast(backward.get()); + ASSERT_EQ(net->ops_.size(), 2); + + auto &fill_zero = *net->ops_[0]; + ASSERT_EQ("fill_zeros_like", fill_zero.type_); + ASSERT_EQ(1, fill_zero.inputs_.size()); + ASSERT_EQ("Z", fill_zero.inputs_[0]); + ASSERT_EQ(1, fill_zero.outputs_.size()); + ASSERT_EQ("Z@ZERO", fill_zero.outputs_[0]); + + auto &d_many_out = *net->ops_[1]; + ASSERT_EQ("many_output_op_grad", d_many_out.type_); + ASSERT_EQ(1 + 2 + 2, d_many_out.inputs_.size()); // I/O/OG + ASSERT_EQ("Z@ZERO", d_many_out.Input("z@GRAD")); } \ No newline at end of file diff --git a/paddle/framework/grad_op_builder.cc b/paddle/framework/grad_op_builder.cc index 6235be75f2..dd686cc782 100644 --- a/paddle/framework/grad_op_builder.cc +++ b/paddle/framework/grad_op_builder.cc @@ -20,7 +20,7 @@ namespace framework { OperatorBase* GradOpBuilder::Build() { BuildOpInOutArgList(); - std::string grad_op_type = OpRegistry::grad_ops().at(op_->type_); + std::string grad_op_type = OpRegistry::grad_ops().at(op_.type_); OperatorBase* grad_op = OpRegistry::op_creators().at(grad_op_type)(); grad_op->type_ = grad_op_type; CompleteGradOp(grad_op); @@ -39,15 +39,15 @@ OpInOutArg* GradOpBuilder::BuildArg(const VarProto& var, } void GradOpBuilder::BuildOpInOutArgList() { - const OpProto& op_proto = OpRegistry::protos().at(op_->type_); - const auto& var_map = *(OpRegistry::VarIndexMaps().at(op_->type_)); + const OpProto& op_proto = OpRegistry::protos().at(op_.type_); + const auto& var_map = *(OpRegistry::VarIndexMaps().at(op_.type_)); const std::vector& in_format = - op_->attrs_.count("input_format") - ? op_->GetAttr>("input_format") + op_.attrs_.count("input_format") + ? op_.GetAttr>("input_format") : std::vector(); const std::vector& out_format = - op_->attrs_.count("output_format") - ? op_->GetAttr>("output_format") + op_.attrs_.count("output_format") + ? op_.GetAttr>("output_format") : std::vector(); for (const auto& var : op_proto.inputs()) { arg_list_.emplace_back( @@ -70,8 +70,7 @@ void GradOpBuilder::AddArgIntoGradOp(const OpInOutArg* arg, } (*varmap)[var_name] = idx++; size_t pre_sz = in_out.size(); - auto base_it = - arg->type_ == IN ? op_->inputs_.begin() : op_->outputs_.begin(); + auto base_it = arg->type_ == IN ? op_.inputs_.begin() : op_.outputs_.begin(); std::copy(base_it + arg->begin_idx_, base_it + arg->end_idx_, std::back_inserter(in_out)); if (is_grad) { @@ -83,7 +82,7 @@ void GradOpBuilder::AddArgIntoGradOp(const OpInOutArg* arg, } void GradOpBuilder::CompleteGradOp(OperatorBase* grad_op) const { - grad_op->attrs_ = op_->attrs_; + grad_op->attrs_ = op_.attrs_; grad_op->attrs_.erase("input_format"); grad_op->attrs_.erase("output_format"); VarIndexMap* grad_varmap = new VarIndexMap(); diff --git a/paddle/framework/grad_op_builder.h b/paddle/framework/grad_op_builder.h index 2ecf39479b..cc7a76f372 100644 --- a/paddle/framework/grad_op_builder.h +++ b/paddle/framework/grad_op_builder.h @@ -29,7 +29,7 @@ class GradOpBuilder { using VarIndexMap = std::unordered_map; public: - GradOpBuilder(const OperatorBase* op) : op_(op) {} + GradOpBuilder(const OperatorBase& op) : op_(op) {} OperatorBase* Build(); private: @@ -40,7 +40,7 @@ class GradOpBuilder { std::vector& format, VarIndexMap* varmap, int& idx, bool is_grad) const; void CompleteGradOp(OperatorBase* grad_op) const; - const OperatorBase* op_; + const OperatorBase& op_; std::vector> arg_list_; }; diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index 288a7841cd..e9cf3b9798 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -11,7 +11,7 @@ namespace framework { TEST(GradOpBuilder, AddTwo) { std::shared_ptr add_op( OpRegistry::CreateOp("add_two", {"x", "y"}, {"out"}, {})); - std::shared_ptr grad_add_op = OpRegistry::CreateGradOp(add_op); + std::shared_ptr grad_add_op = OpRegistry::CreateGradOp(*add_op); EXPECT_EQ(static_cast(grad_add_op->inputs_.size()), 4); EXPECT_EQ(static_cast(grad_add_op->outputs_.size()), 2); EXPECT_EQ(grad_add_op->Input("X"), "x"); diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index e4ac8a6e76..cee20b1112 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -303,11 +303,10 @@ class OpRegistry { return CreateOp(op_desc.type(), inputs, outputs, attrs); } - static std::shared_ptr CreateGradOp( - std::shared_ptr op) { - PADDLE_ENFORCE(!op->IsNetOp(), + static std::shared_ptr CreateGradOp(const OperatorBase& op) { + PADDLE_ENFORCE(!op.IsNetOp(), "Use framework::Backward to get backward ops"); - GradOpBuilder builder(op.get()); + GradOpBuilder builder(op); std::shared_ptr grad_op(builder.Build()); grad_op->Init(); return grad_op; From fa7cbfdeecfc50afb45cea01fbdc6a159e597651 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Wed, 26 Jul 2017 21:04:30 +0800 Subject: [PATCH 08/58] "backward is NetOp" --- paddle/framework/backward.cc | 40 +++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index d8653b5dd6..5b35de77e4 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -48,9 +48,11 @@ static std::shared_ptr EmptyOp() { return net_op; } -static std::shared_ptr BackwardImpl( - const OperatorBase& forwardOp, - std::unordered_set& no_grad_names, int& uniq_id) { +static void DeDuplicate(NetOp* net, std::unordered_se) + + static std::shared_ptr BackwardImpl( + const OperatorBase& forwardOp, + std::unordered_set& no_grad_names, unsigned& uniq_id) { if (AllInSet(forwardOp.inputs_, OperatorBase::GRAD_VAR_SUFFIX(), no_grad_names)) { return EmptyOp(); @@ -68,6 +70,38 @@ static std::shared_ptr BackwardImpl( auto* net = new NetOp(); if (forwardOp.IsNetOp()) { + std::unordered_map dup_output; + std::unordered_map> dup_output_ops; + const unsigned uniq_id_local = uniq_id; + unsigned op_id_offset = 0; + for (auto& fwd : forwardOp) { + auto bwd = Backward(fwd, no_grad_names); + net->AddOp(bwd); + for (size_t i = 0; i < bwd.outputs_; ++i) { + bwd->outputs_[i] += OperatorBase::EMPTY_VAR_NAME(); + if (dup_output.find(bwd->inputs_[i]) == dup_output.end()) { + dup_output[bwd->inputs_[i]] = 1; + dup_output_ops[bwd->inputs_[i]] = std::vector{op_id_offset++}; + } else { + dup_output[bwd->inputs_[i]]++; + dup_output_ops[bwd->inputs_[i]].emplace_back(op_id_offset++); + } + } + } + for (auto dup : dup_output) { + if (dup.second == 1) continue; + auto op_ids = dup_output_ops.at(dup.first); + for (auto& op_id : op_ids) { + auto& op_ptr = net->ops_[op_id]; + for (size_t i = 0; i < op_ptr->inputs_.size(); ++i) { + if (op_ptr->inputs_[i] == dup.first) { + // unique the duplicate name + op_ptr->inputs_[i] += std::to_string(uniq_id++); + } + } + } + } + //! TODO(dzh) } else { //! TODO(fjy) From 292f2ab1a56451d932be1f5259e3d5ee2e0b36ec Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Wed, 26 Jul 2017 21:06:37 +0800 Subject: [PATCH 09/58] "split to generic add PR" --- paddle/framework/backward.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index c14249269b..a4660d7156 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -71,6 +71,7 @@ static void DeDuplicate(NetOp* net, std::unordered_se) auto* net = new NetOp(); if (forwardOp.IsNetOp()) { + //! TODO(dzh) std::unordered_map dup_output; std::unordered_map> dup_output_ops; const unsigned uniq_id_local = uniq_id; @@ -98,12 +99,12 @@ static void DeDuplicate(NetOp* net, std::unordered_se) if (op_ptr->inputs_[i] == dup.first) { // unique the duplicate name op_ptr->inputs_[i] += std::to_string(uniq_id++); + // TODO(dzh): need a generic add op here } } } } - //! TODO(dzh) } else { //! TODO(fjy) std::shared_ptr grad_op = OpRegistry::CreateGradOp(forwardOp); From 05d9afff1ce46342da759b7f4964b6a99bad10b9 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 27 Jul 2017 10:06:45 +0800 Subject: [PATCH 10/58] Stash --- paddle/framework/backward_test.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index cc00279db5..404adb4f37 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -133,6 +133,8 @@ TEST(Backward, simple_grad) { ASSERT_EQ("X" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->outputs_[0]); ASSERT_EQ("b" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->outputs_[1]); + ASSERT_EQ("X" + f::OperatorBase::GRAD_VAR_SUFFIX(), + gop->Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX())); // LOG(INFO) << gop->Output("X" + "@GRAD"); } From 03f418c5d1a4b2a1fc65867c3dec7306aaec706e Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 27 Jul 2017 10:38:26 +0800 Subject: [PATCH 11/58] Fix compile error --- paddle/framework/backward.cc | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 1531cb53f9..db4af8c712 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -78,18 +78,16 @@ static std::shared_ptr BackwardImpl( std::string prefix = grad_input.substr( 0, grad_input.size() - OperatorBase::GRAD_VAR_SUFFIX().size()); grad_input = prefix + OperatorBase::ZERO_VAR_SUFFIX(); - std::vector fill_zeros_in = {prefix}; - std::vector fill_zeros_out = {grad_input}; - net.AddOp(OpRegistry::CreateOp("fill_zeros_like", fill_zeros_in, - fill_zeros_out, AttributeMap())); + net->AddOp(OpRegistry::CreateOp("fill_zeros_like", {prefix}, + {grad_input}, {})); } } - for (std::string& grad_output : grad_op->output_) { + for (std::string& grad_output : grad_op->outputs_) { if (no_grad_names.count(grad_output)) { grad_output = OperatorBase::EMPTY_VAR_NAME(); } } - net.AddOp(grad_op); + net->AddOp(grad_op); } net->CompleteAddOp(); From f9fab14c4e059cfd5ce871a381e5128ee14e2039 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 27 Jul 2017 10:40:54 +0800 Subject: [PATCH 12/58] Fix compile error --- paddle/framework/backward.cc | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 7b470adb47..dae457f858 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -49,11 +49,9 @@ static std::shared_ptr EmptyOp() { return net_op; } -static void DeDuplicate(NetOp* net, std::unordered_se) - - static std::shared_ptr BackwardImpl( - const OperatorBase& forwardOp, - std::unordered_set& no_grad_names, unsigned& uniq_id) { +static std::shared_ptr BackwardImpl( + const OperatorBase& forwardOp, + std::unordered_set& no_grad_names, size_t& uniq_id) { if (AllInSet(forwardOp.inputs_, OperatorBase::GRAD_VAR_SUFFIX(), no_grad_names)) { return EmptyOp(); @@ -73,13 +71,16 @@ static void DeDuplicate(NetOp* net, std::unordered_se) if (forwardOp.IsNetOp()) { //! TODO(dzh) std::unordered_map dup_output; - std::unordered_map> dup_output_ops; - const unsigned uniq_id_local = uniq_id; - unsigned op_id_offset = 0; - for (auto& fwd : forwardOp) { - auto bwd = Backward(fwd, no_grad_names); + std::unordered_map> dup_output_ops; + // const unsigned uniq_id_local = uniq_id; + int op_id_offset = 0; + // Because it is a net op, it can static_cast. + auto& forwardNet = static_cast(forwardOp); + + for (auto& fwd : forwardNet.ops_) { + auto bwd = Backward(*fwd, no_grad_names); net->AddOp(bwd); - for (size_t i = 0; i < bwd.outputs_; ++i) { + for (size_t i = 0; i < bwd->outputs_.size(); ++i) { bwd->outputs_[i] += OperatorBase::EMPTY_VAR_NAME(); if (dup_output.find(bwd->inputs_[i]) == dup_output.end()) { dup_output[bwd->inputs_[i]] = 1; @@ -138,7 +139,7 @@ extern std::shared_ptr Backward( for (auto& name : no_grad_vars) { no_grad_names.insert(name + OperatorBase::GRAD_VAR_SUFFIX()); } - int uid = 0; + size_t uid = 0; return BackwardImpl(forwardOp, no_grad_names, uid); } } // namespace framework From 3d18737b84181e59190c56c0e91d2a057ce8c0db Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 27 Jul 2017 10:44:41 +0800 Subject: [PATCH 13/58] Add unittest for part_of_output_are_not_need --- paddle/framework/backward_test.cc | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 404adb4f37..dd0d2be668 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -170,13 +170,18 @@ TEST(Backward, part_of_output_are_not_need) { auto &fill_zero = *net->ops_[0]; ASSERT_EQ("fill_zeros_like", fill_zero.type_); - ASSERT_EQ(1, fill_zero.inputs_.size()); + ASSERT_EQ(1UL, fill_zero.inputs_.size()); ASSERT_EQ("Z", fill_zero.inputs_[0]); - ASSERT_EQ(1, fill_zero.outputs_.size()); - ASSERT_EQ("Z@ZERO", fill_zero.outputs_[0]); + ASSERT_EQ(1UL, fill_zero.outputs_.size()); + ASSERT_EQ("Z" + f::OperatorBase::ZERO_VAR_SUFFIX(), fill_zero.outputs_[0]); auto &d_many_out = *net->ops_[1]; ASSERT_EQ("many_output_op_grad", d_many_out.type_); - ASSERT_EQ(1 + 2 + 2, d_many_out.inputs_.size()); // I/O/OG - ASSERT_EQ("Z@ZERO", d_many_out.Input("z@GRAD")); + ASSERT_EQ(1UL + 2UL + 2UL, d_many_out.inputs_.size()); // I/O/OG + ASSERT_EQ("Z" + f::OperatorBase::ZERO_VAR_SUFFIX(), + d_many_out.Input("z" + f::OperatorBase::GRAD_VAR_SUFFIX())); + ASSERT_EQ("Y" + f::OperatorBase::GRAD_VAR_SUFFIX(), + d_many_out.Input("y" + f::OperatorBase::GRAD_VAR_SUFFIX())); + ASSERT_EQ("X" + f::OperatorBase::GRAD_VAR_SUFFIX(), + d_many_out.Output("x" + f::OperatorBase::GRAD_VAR_SUFFIX())); } \ No newline at end of file From 70bd07a0e1260542a14ff8845e124108a44520b4 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 27 Jul 2017 11:26:41 +0800 Subject: [PATCH 14/58] Fix compile errors of FillZerosLikeOp --- paddle/operators/CMakeLists.txt | 1 + paddle/operators/fill_zeros_like_op.cc | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 0a14dc2114..644460ee47 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -49,6 +49,7 @@ op_library(rowwise_add_op SRCS rowwise_add_op.cu rowwise_add_op.cc) op_library(sigmoid_op SRCS sigmoid_op.cu sigmoid_op.cc) op_library(softmax_op SRCS softmax_op.cc softmax_op.cu) op_library(cross_entropy_op SRCS cross_entropy_op.cc cross_entropy_op.cu) +op_library(fill_zeros_like_op SRCS fill_zeros_like_op.cc fill_zeros_like_op.cu) op_library(fc_op SRCS fc_op.cc DEPS mul_op rowwise_add_op sigmoid_op softmax_op net) diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index 3df3a2cfab..d641bc4ada 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -19,16 +19,16 @@ limitations under the License. */ namespace paddle { namespace operators { -class FillZerosLike : public framework::OperatorWithKernel { +class FillZerosLikeOp : public framework::OperatorWithKernel { protected: void InferShape( const std::vector &inputs, const std::vector &outputs) const override { PADDLE_ENFORCE(inputs.size() == 1, - "Input size of FillZerosLike must be one."); + "Input size of FillZerosLikeOp must be one."); PADDLE_ENFORCE(outputs.size() == 1, "Output size of AddOp must be one."); PADDLE_ENFORCE(inputs[0] != nullptr && outputs[0] != nullptr, - "Outputs of FillZerosLike must all be set."); + "Outputs of FillZerosLikeOp must all be set."); outputs[0]->Resize(inputs[0]->dims()); } }; @@ -44,7 +44,7 @@ public: Fill up a vriable with zeros. The output will have the same size with input. -)DOC") +)DOC"); } }; } // namespace operators @@ -53,6 +53,6 @@ The output will have the same size with input. REGISTER_OP(fill_zeros_like, paddle::operators::FillZerosLikeOp, paddle::operators::FillZerosLikeOpMaker); -EGISTER_OP_CPU_KERNEL( +REGISTER_OP_CPU_KERNEL( fill_zeros_like, - paddle::operators::FillZerosLikeKernal); \ No newline at end of file + paddle::operators::FillZerosLikeKernel); From 63636d69e6c588b06ea01db9522df35bd0ca6636 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 27 Jul 2017 13:25:32 +0800 Subject: [PATCH 15/58] Stash for canpio --- paddle/framework/backward_test.cc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index dd0d2be668..609dc661f2 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -184,4 +184,17 @@ TEST(Backward, part_of_output_are_not_need) { d_many_out.Input("y" + f::OperatorBase::GRAD_VAR_SUFFIX())); ASSERT_EQ("X" + f::OperatorBase::GRAD_VAR_SUFFIX(), d_many_out.Output("x" + f::OperatorBase::GRAD_VAR_SUFFIX())); +} + +TEST(Backward, part_of_input_are_not_need) { + auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); + auto backward = f::Backward(*fwd, {"X"}); + ASSERT_TRUE(backward->IsNetOp()); + auto net = static_cast(backward.get()); + ASSERT_EQ(1UL, net->ops_.size()); + + auto &d_add = *net->ops_[0]; + ASSERT_EQ("rowwise_add_grad", d_add.type_); + ASSERT_EQ(f::OperatorBase::EMPTY_VAR_NAME(), + d_add.Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX())); } \ No newline at end of file From 04db4183e975ed3b2d07a57984dd5edf4a8adcb0 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 27 Jul 2017 14:26:17 +0800 Subject: [PATCH 16/58] Add unitest of Backward.part_of_input_are_not_need --- paddle/framework/backward_test.cc | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index dd0d2be668..878d3010de 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -166,7 +166,7 @@ TEST(Backward, part_of_output_are_not_need) { auto backward = f::Backward(*fwd, {"Z"}); ASSERT_TRUE(backward->IsNetOp()); auto net = static_cast(backward.get()); - ASSERT_EQ(net->ops_.size(), 2); + ASSERT_EQ(net->ops_.size(), 2UL); auto &fill_zero = *net->ops_[0]; ASSERT_EQ("fill_zeros_like", fill_zero.type_); @@ -184,4 +184,23 @@ TEST(Backward, part_of_output_are_not_need) { d_many_out.Input("y" + f::OperatorBase::GRAD_VAR_SUFFIX())); ASSERT_EQ("X" + f::OperatorBase::GRAD_VAR_SUFFIX(), d_many_out.Output("x" + f::OperatorBase::GRAD_VAR_SUFFIX())); +} + +TEST(Backward, part_of_input_are_not_need) { + auto fwd = f::OpRegistry::CreateOp("mul", {"a", "b"}, {"out"}, {}); + auto backward = f::Backward(*fwd, {"a"}); + ASSERT_TRUE(backward->IsNetOp()); + auto net = static_cast(backward.get()); + ASSERT_EQ(net->ops_.size(), 1UL); + + auto &grad_mul = *net->ops_[0]; + ASSERT_EQ(grad_mul.type_, "mul_grad"); + ASSERT_EQ(grad_mul.inputs_.size(), 2UL + 1UL + 1UL); + ASSERT_EQ(grad_mul.outputs_.size(), 2UL); + ASSERT_EQ(grad_mul.Output("A" + f::OperatorBase::GRAD_VAR_SUFFIX()), + f::OperatorBase::EMPTY_VAR_NAME()); + ASSERT_EQ(grad_mul.Output("B" + f::OperatorBase::GRAD_VAR_SUFFIX()), + "b" + f::OperatorBase::GRAD_VAR_SUFFIX()); + ASSERT_EQ(grad_mul.Input("Out" + f::OperatorBase::GRAD_VAR_SUFFIX()), + "out" + f::OperatorBase::GRAD_VAR_SUFFIX()); } \ No newline at end of file From 28c0281d9e8854f1cb8a9d89d6bf472a8d07a87d Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 27 Jul 2017 14:50:03 +0800 Subject: [PATCH 17/58] Stash --- paddle/framework/backward_test.cc | 64 ++++++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 609dc661f2..6ab81b5589 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -128,6 +128,7 @@ TEST(Backward, simple_grad) { auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); ASSERT_NE(fwd, nullptr); auto gop = f::OpRegistry::CreateGradOp(*fwd); + ASSERT_EQ(1, gop->inputs_.size()); ASSERT_EQ("Out" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->inputs_[0]); ASSERT_EQ("rowwise_add_grad", gop->type_); ASSERT_EQ("X" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->outputs_[0]); @@ -138,6 +139,67 @@ TEST(Backward, simple_grad) { // LOG(INFO) << gop->Output("X" + "@GRAD"); } +TEST(Backward, fc_backward_normal) { + std::shared_ptr fwd = + f::OpRegistry::CreateOp("fc", {"X", "w", "b"}, {"out"}, {}); + ASSERT_NE(fwd, nullptr); + std::shared_ptr gop = f::Backward(*fwd, {}); + ASSERT_TRUE(gop->IsNetOp()); + auto net = static_cast(gop.get()); + + ASSERT_NO_THROW(net->DebugString()); + + ASSERT_EQ(3UL, net->ops_.size()); + + f::OperatorBase &d_sigmoid = *net->ops_[0]; + ASSERT_EQ("sigmoid_grad", d_sigmoid.type_); + + f::OperatorBase &d_add = *net->ops_[1]; + ASSERT_EQ("rowwise_add_grad", d_add.type_); + + f::OperatorBase &d_mul = *net->ops_[2]; + ASSERT_EQ("mul_grad", d_mul.type_); +} + +TEST(Backward, fc_backward_not_have_b) { + std::shared_ptr fwd = f::OpRegistry::CreateOp( + "fc", {"X", "w", f::OperatorBase::EMPTY_VAR_NAME()}, {"out"}, {}); + ASSERT_NE(fwd, nullptr); + std::shared_ptr gop = f::Backward(*fwd, {}); + ASSERT_TRUE(gop->IsNetOp()); + auto net = static_cast(gop.get()); + + ASSERT_NO_THROW(net->DebugString()); + + ASSERT_EQ(2UL, net->ops_.size()); + + f::OperatorBase &d_sigmoid = *net->ops_[0]; + ASSERT_EQ("sigmoid_grad", d_sigmoid.type_); + + f::OperatorBase &d_mul = *net->ops_[1]; + ASSERT_EQ("mul_grad", d_mul.type_); +} + +TEST(Backward, input_layer_not_need_grad) { + f::NetOp net; + net.AddOp(f::OpRegistry::CreateOp("fc", {"X", "W1", "b1"}, {"hidden0"}, {})); + net.AddOp( + f::OpRegistry::CreateOp("fc", {"hidden0", "W2", "b2"}, {"hidden1"}, {})); + + auto bwd = Backward(net, {"X"}); // X@GRAD is not need. + ASSERT_TRUE(bwd->IsNetOp()); + auto bwd_net = static_cast(bwd.get()); + + std::unordered_set all_output = std::unordered_set( + bwd_net->outputs_.begin(), bwd_net->outputs_.end()); + all_output.erase(f::OperatorBase::EMPTY_VAR_NAME()); + + for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) { + ASSERT_NE(all_output.find(out + f::OperatorBase::GRAD_VAR_SUFFIX()), + all_output.end()); + } +} + TEST(Backward, not_for_network) { auto fwd = f::OpRegistry::CreateOp("fc", {"X", "W", "b"}, {"Out", "tmp_out"}, @@ -166,7 +228,7 @@ TEST(Backward, part_of_output_are_not_need) { auto backward = f::Backward(*fwd, {"Z"}); ASSERT_TRUE(backward->IsNetOp()); auto net = static_cast(backward.get()); - ASSERT_EQ(net->ops_.size(), 2); + ASSERT_EQ(net->ops_.size(), 2UL); auto &fill_zero = *net->ops_[0]; ASSERT_EQ("fill_zeros_like", fill_zero.type_); From 3dd5fd047b16f02b982bfc6b0275a13ddf1b3b11 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 27 Jul 2017 16:01:16 +0800 Subject: [PATCH 18/58] Add unitest of Backward.intermediate_variable_not_need_in_linear_net --- paddle/framework/backward_test.cc | 34 ++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index e920af3d1a..538522bf44 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -128,7 +128,7 @@ TEST(Backward, simple_grad) { auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); ASSERT_NE(fwd, nullptr); auto gop = f::OpRegistry::CreateGradOp(*fwd); - ASSERT_EQ(1, gop->inputs_.size()); + ASSERT_EQ(1UL, gop->inputs_.size()); ASSERT_EQ("Out" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->inputs_[0]); ASSERT_EQ("rowwise_add_grad", gop->type_); ASSERT_EQ("X" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->outputs_[0]); @@ -265,4 +265,36 @@ TEST(Backward, part_of_input_are_not_need) { "b" + f::OperatorBase::GRAD_VAR_SUFFIX()); ASSERT_EQ(grad_mul.Input("Out" + f::OperatorBase::GRAD_VAR_SUFFIX()), "out" + f::OperatorBase::GRAD_VAR_SUFFIX()); + ASSERT_EQ(grad_mul.Input("A"), "a"); + ASSERT_EQ(grad_mul.Input("B"), "b"); + ASSERT_EQ(grad_mul.Input("Out"), "out"); +} + +TEST(Backward, intermediate_variable_not_need_in_linear_net) { + f::NetOp net; + net.AddOp(f::OpRegistry::CreateOp("fc", {"x1", "w1", "b1"}, {"out1"}, {})); + net.AddOp(f::OpRegistry::CreateOp("fc", {"out1", "w2", "b2"}, {"out2"}, {})); + net.AddOp(f::OpRegistry::CreateOp("fc", {"out2", "w3", "b3"}, {"out3"}, {})); + net.CompleteAddOp(false); + auto backward = f::Backward(net, {"out2"}); + ASSERT_TRUE(backward->IsNetOp()); + auto bwd_net = static_cast(backward.get()); + ASSERT_EQ(bwd_net->ops_.size(), 1UL); + + auto &grad_fc = *bwd_net->ops_[0]; + ASSERT_EQ(grad_fc.type_, "fc_grad"); + ASSERT_EQ(grad_fc.inputs_.size(), 3UL + 1UL + 1UL); + ASSERT_EQ(grad_fc.outputs_.size(), 3UL); + ASSERT_EQ(grad_fc.Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX()), + f::OperatorBase::EMPTY_VAR_NAME()); + ASSERT_EQ(grad_fc.Output("W" + f::OperatorBase::GRAD_VAR_SUFFIX()), + "w3" + f::OperatorBase::GRAD_VAR_SUFFIX()); + ASSERT_EQ(grad_fc.Output("b" + f::OperatorBase::GRAD_VAR_SUFFIX()), + "b3" + f::OperatorBase::GRAD_VAR_SUFFIX()); + ASSERT_EQ(grad_fc.Input("Out" + f::OperatorBase::GRAD_VAR_SUFFIX()), + "out3" + f::OperatorBase::GRAD_VAR_SUFFIX()); + ASSERT_EQ(grad_fc.Input("X"), "out2"); + ASSERT_EQ(grad_fc.Input("W"), "w3"); + ASSERT_EQ(grad_fc.Input("b"), "b3"); + ASSERT_EQ(grad_fc.Input("Out"), "out3"); } \ No newline at end of file From 84198f75483aa9b7718c71d3bafa3372f73aef5a Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 27 Jul 2017 16:06:43 +0800 Subject: [PATCH 19/58] Add unittest --- paddle/framework/backward_test.cc | 58 +++++++++++++++++++++++++------ 1 file changed, 47 insertions(+), 11 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index e920af3d1a..81a55a42b4 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -108,6 +108,16 @@ class FillZeroOpMaker : public OpProtoAndCheckerMaker { AddComment(""); } }; + +class AddOpMaker : public OpProtoAndCheckerMaker { + public: + AddOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "x").SetMultiple(); + AddOutput("Y", "y"); + AddComment(""); + } +}; } // namespace framework } // namespace paddle @@ -123,12 +133,14 @@ REGISTER_OP(fc, f::FcOp, f::FcOpMaker); REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker); REGISTER_GRADIENT_OP(many_output_op, many_output_op_grad, f::EmptyOp); REGISTER_OP(fill_zeros_like, f::EmptyOp, f::FillZeroOpMaker); +REGISTER_OP(add, f::EmptyOp, f::AddOpMaker); +REGISTER_GRADIENT_OP(add, add_grad, f::EmptyOp); -TEST(Backward, simple_grad) { +TEST(Backward, simple_op_grad) { auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); ASSERT_NE(fwd, nullptr); auto gop = f::OpRegistry::CreateGradOp(*fwd); - ASSERT_EQ(1, gop->inputs_.size()); + ASSERT_EQ(1UL, gop->inputs_.size()); ASSERT_EQ("Out" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->inputs_[0]); ASSERT_EQ("rowwise_add_grad", gop->type_); ASSERT_EQ("X" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->outputs_[0]); @@ -139,7 +151,7 @@ TEST(Backward, simple_grad) { // LOG(INFO) << gop->Output("X" + "@GRAD"); } -TEST(Backward, fc_backward_normal) { +TEST(Backward, net_fc_backward_normal) { std::shared_ptr fwd = f::OpRegistry::CreateOp("fc", {"X", "w", "b"}, {"out"}, {}); ASSERT_NE(fwd, nullptr); @@ -161,7 +173,7 @@ TEST(Backward, fc_backward_normal) { ASSERT_EQ("mul_grad", d_mul.type_); } -TEST(Backward, fc_backward_not_have_b) { +TEST(Backward, net_fc_backward_not_have_b) { std::shared_ptr fwd = f::OpRegistry::CreateOp( "fc", {"X", "w", f::OperatorBase::EMPTY_VAR_NAME()}, {"out"}, {}); ASSERT_NE(fwd, nullptr); @@ -180,12 +192,12 @@ TEST(Backward, fc_backward_not_have_b) { ASSERT_EQ("mul_grad", d_mul.type_); } -TEST(Backward, input_layer_not_need_grad) { +TEST(Backward, net_input_of_network_not_need_grad) { f::NetOp net; net.AddOp(f::OpRegistry::CreateOp("fc", {"X", "W1", "b1"}, {"hidden0"}, {})); net.AddOp( f::OpRegistry::CreateOp("fc", {"hidden0", "W2", "b2"}, {"hidden1"}, {})); - + net.CompleteAddOp(); auto bwd = Backward(net, {"X"}); // X@GRAD is not need. ASSERT_TRUE(bwd->IsNetOp()); auto bwd_net = static_cast(bwd.get()); @@ -198,16 +210,40 @@ TEST(Backward, input_layer_not_need_grad) { ASSERT_NE(all_output.find(out + f::OperatorBase::GRAD_VAR_SUFFIX()), all_output.end()); } + + // Not Generated X + ASSERT_EQ(all_output.find("X" + f::OperatorBase::GRAD_VAR_SUFFIX()), + all_output.end()); + + ASSERT_EQ(2, bwd_net->ops_.size()); + ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp()); + auto first_fc_grad = static_cast(bwd_net->ops_[1].get()); + ASSERT_EQ(3, first_fc_grad->ops_.size()); + ASSERT_EQ(f::OperatorBase::EMPTY_VAR_NAME(), + first_fc_grad[2].Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX())); +} + +TEST(Backward, net_shared_weight) { + f::NetOp net; + net.AddOp(f::OpRegistry::CreateOp("mul", {"X", "W"}, {"Out"}, {})); + net.AddOp(f::OpRegistry::CreateOp("mul", {"Out", "W"}, {"FinalOut"}, {})); + net.CompleteAddOp(); + + auto bwd = f::Backward(net, {}); + ASSERT_TRUE(bwd->IsNetOp()); + auto bwd_net = static_cast(bwd.get()); + ASSERT_EQ(3UL, bwd_net->ops_.size()); + ASSERT_EQ("add_grad", bwd_net->ops_[2]->type_); } -TEST(Backward, not_for_network) { +TEST(Backward, op_register_grad_not_for_network) { auto fwd = f::OpRegistry::CreateOp("fc", {"X", "W", "b"}, {"Out", "tmp_out"}, {{"temporary_index", std::vector{1}}}); ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet); } -TEST(Backward, all_input_are_not_need) { +TEST(Backward, op_all_input_are_not_need) { auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); auto backward = f::Backward(*fwd, {"X", "b"}); ASSERT_TRUE(backward->IsNetOp()); @@ -215,7 +251,7 @@ TEST(Backward, all_input_are_not_need) { ASSERT_TRUE(net->ops_.empty()); } -TEST(Backward, all_output_are_not_need) { +TEST(Backward, op_all_output_are_not_need) { auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); auto backward = f::Backward(*fwd, {"Out"}); ASSERT_TRUE(backward->IsNetOp()); @@ -223,7 +259,7 @@ TEST(Backward, all_output_are_not_need) { ASSERT_TRUE(net->ops_.empty()); } -TEST(Backward, part_of_output_are_not_need) { +TEST(Backward, op_part_of_output_are_not_need) { auto fwd = f::OpRegistry::CreateOp("many_output_op", {"X"}, {"Y", "Z"}, {}); auto backward = f::Backward(*fwd, {"Z"}); ASSERT_TRUE(backward->IsNetOp()); @@ -248,7 +284,7 @@ TEST(Backward, part_of_output_are_not_need) { d_many_out.Output("x" + f::OperatorBase::GRAD_VAR_SUFFIX())); } -TEST(Backward, part_of_input_are_not_need) { +TEST(Backward, op_part_of_input_are_not_need) { auto fwd = f::OpRegistry::CreateOp("mul", {"a", "b"}, {"out"}, {}); auto backward = f::Backward(*fwd, {"a"}); ASSERT_TRUE(backward->IsNetOp()); From b1d84194901fb9f5968d4a12de4b97005609a5de Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 27 Jul 2017 16:10:46 +0800 Subject: [PATCH 20/58] rename test --- paddle/framework/backward_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 733c888a26..6f86b62b48 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -306,7 +306,7 @@ TEST(Backward, op_part_of_input_are_not_need) { ASSERT_EQ(grad_mul.Input("Out"), "out"); } -TEST(Backward, intermediate_variable_not_need_in_linear_net) { +TEST(Backward, linear_net_intermediate_variable_has_no_grad) { f::NetOp net; net.AddOp(f::OpRegistry::CreateOp("fc", {"x1", "w1", "b1"}, {"out1"}, {})); net.AddOp(f::OpRegistry::CreateOp("fc", {"out1", "w2", "b2"}, {"out2"}, {})); From d2583bd4112ffc17d1c1fe1786abdd2d6583d8dd Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 27 Jul 2017 16:43:17 +0800 Subject: [PATCH 21/58] InsertOp for NetOp --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/net.h | 9 ++++++++ paddle/framework/net_op_test.cc | 37 ++++++++++++++++----------------- 3 files changed, 28 insertions(+), 20 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 7febaaa527..c9a50d8968 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -30,7 +30,7 @@ add_custom_target(framework_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch add_dependencies(framework_py_proto framework_py_proto_init) cc_library(net SRCS net.cc DEPS op_registry) -cc_test(net_op_test SRCS net_op_test.cc DEPS net add_op mul_op sigmoid_op softmax_op fc_op) +cc_test(net_op_test SRCS net_op_test.cc DEPS net) cc_library(backward SRCS backward.cc DEPS net) cc_test(backward_test SRCS backward_test.cc DEPS backward) diff --git a/paddle/framework/net.h b/paddle/framework/net.h index 089c135595..b584dd578f 100644 --- a/paddle/framework/net.h +++ b/paddle/framework/net.h @@ -68,9 +68,18 @@ class NetOp : public OperatorBase { */ void AddOp(const std::shared_ptr& op) { PADDLE_ENFORCE(!add_op_done_, "Cannot AddOp when this network is sealed"); + PADDLE_ENFORCE(op != nullptr, "Cannot Insert Null op"); ops_.push_back(op); } + void InsertOp(size_t pos, const std::shared_ptr& op) { + PADDLE_ENFORCE(!add_op_done_, + "Cannot InsertOp when this network is sealed"); + PADDLE_ENFORCE(op != nullptr, "Cannot Insert Null op"); + PADDLE_ENFORCE(pos <= ops_.size(), "Out of range"); + ops_.insert(ops_.begin() + pos, op); + } + void CompleteAddOp(bool calculate = true); std::string DebugString() const override; diff --git a/paddle/framework/net_op_test.cc b/paddle/framework/net_op_test.cc index 8048311fe5..4b733e958e 100644 --- a/paddle/framework/net_op_test.cc +++ b/paddle/framework/net_op_test.cc @@ -3,11 +3,6 @@ #include #include -USE_OP(add_two); -USE_OP(mul); -USE_OP(sigmoid); -USE_OP(softmax); - namespace paddle { namespace framework { @@ -26,6 +21,13 @@ class TestOp : public OperatorBase { } }; +class EmptyOp : public OperatorBase { + public: + void InferShape(const std::shared_ptr& scope) const override {} + void Run(const std::shared_ptr& scope, + const platform::DeviceContext& dev_ctx) const override {} +}; + template void AssertSameVectorWithoutOrder(const std::vector& expected, const std::vector& actual) { @@ -72,20 +74,17 @@ TEST(OpKernel, all) { ASSERT_THROW(net->AddOp(op2), paddle::platform::EnforceNotMet); } -//! TODO(yuyang18): Refine Backward Op. -// TEST(AddBackwardOp, TestGradOp) { -// auto net = std::make_shared(); -// ASSERT_NE(net, nullptr); -// net->AddOp(framework::OpRegistry::CreateOp("mul", {"X", "Y"}, {"Out"}, {})); -// net->AddOp( -// framework::OpRegistry::CreateOp("add_two", {"X", "Y"}, {"Out"}, {})); -// net->AddOp(framework::OpRegistry::CreateOp("add_two", {"X", "Y"}, {""}, -// {})); -// auto grad_ops = AddBackwardOp(net); -// for (auto& op : grad_ops->ops_) { -// op->DebugString(); -// } -//} +TEST(Net, insert_op) { + NetOp net; + auto op1 = std::make_shared(); + op1->inputs_ = {"x", "w1", "b1"}; + op1->outputs_ = {"y"}; + net.AddOp(op1); + net.InsertOp(0, op1); + ASSERT_EQ(2UL, net.ops_.size()); + net.InsertOp(2, op1); + ASSERT_EQ(3UL, net.ops_.size()); +} } // namespace framework } // namespace paddle From b9f2bb3747512f8bd0f5f0a7e024ff329477aabc Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Thu, 27 Jul 2017 16:44:06 +0800 Subject: [PATCH 22/58] "wait add generic" --- paddle/framework/backward.cc | 62 +++++++++++++++++++++--------------- paddle/framework/net.cc | 22 +++++++++++++ paddle/framework/net.h | 9 ++++++ paddle/framework/operator.cc | 6 ++++ paddle/framework/operator.h | 10 ++++++ 5 files changed, 84 insertions(+), 25 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index dae457f858..8538ad9f0a 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -52,6 +52,11 @@ static std::shared_ptr EmptyOp() { static std::shared_ptr BackwardImpl( const OperatorBase& forwardOp, std::unordered_set& no_grad_names, size_t& uniq_id) { + // struct OpIdentity { + // size_t local_op_id; + // size_t op_output_offset; + // }; + if (AllInSet(forwardOp.inputs_, OperatorBase::GRAD_VAR_SUFFIX(), no_grad_names)) { return EmptyOp(); @@ -66,44 +71,51 @@ static std::shared_ptr BackwardImpl( return EmptyOp(); } - auto* net = new NetOp(); + // auto* net = new NetOp(); if (forwardOp.IsNetOp()) { //! TODO(dzh) - std::unordered_map dup_output; - std::unordered_map> dup_output_ops; - // const unsigned uniq_id_local = uniq_id; - int op_id_offset = 0; + std::unordered_map /*op offs et*/> + dup_output_ops; + size_t local_op_id = 0; // Because it is a net op, it can static_cast. auto& forwardNet = static_cast(forwardOp); + // travesal subnet/op for (auto& fwd : forwardNet.ops_) { auto bwd = Backward(*fwd, no_grad_names); net->AddOp(bwd); for (size_t i = 0; i < bwd->outputs_.size(); ++i) { - bwd->outputs_[i] += OperatorBase::EMPTY_VAR_NAME(); - if (dup_output.find(bwd->inputs_[i]) == dup_output.end()) { - dup_output[bwd->inputs_[i]] = 1; - dup_output_ops[bwd->inputs_[i]] = std::vector{op_id_offset++}; - } else { - dup_output[bwd->inputs_[i]]++; - dup_output_ops[bwd->inputs_[i]].emplace_back(op_id_offset++); - } + dup_output_ops[bwd->outputs_[i]].emplace_back(local_op_id); } + local_op_id++; } - for (auto dup : dup_output) { - if (dup.second == 1) continue; - auto op_ids = dup_output_ops.at(dup.first); - for (auto& op_id : op_ids) { - auto& op_ptr = net->ops_[op_id]; - for (size_t i = 0; i < op_ptr->inputs_.size(); ++i) { - if (op_ptr->inputs_[i] == dup.first) { - // unique the duplicate name - op_ptr->inputs_[i] += std::to_string(uniq_id++); - // TODO(dzh): need a generic add op here - } - } + // unique the duplicate name + auto uid = uniq_id++; + std::unordered_map insert_postion; + for (auto& dup_output_op : dup_output_ops) { + std::string& name = dup_output_op.first; + auto& dup_op = dup_output_op.second; + if (dup_op.size() == 1) continue; + std::vector dup_outputs; + + for (size_t i = 0; i < dup_op.size(); ++i) { + auto op_offset = dup_op[i]; + net->ops_[op_offset].Rename( + name, + name + "@RENAME@" + std::to_string(uid) + "@" + std::to_string(i)); } + insert_postion[op_offset] = + OpRegistry::CreateOp("Add", {}, {dup_op->inputs_}, {}); + net->AddOp("Add"); + net->AddOp(); + // process shared variable + // while(dup_op.size()) { + // + // AddOp(OpRegistry::CreateOp("generic_add", {dup_outputs}, + // {dup_op->inputs_}, {})); + //} } } else { diff --git a/paddle/framework/net.cc b/paddle/framework/net.cc index 2cd378c6b2..403d96a22d 100644 --- a/paddle/framework/net.cc +++ b/paddle/framework/net.cc @@ -74,5 +74,27 @@ std::string NetOp::DebugString() const { bool NetOp::IsNetOp() const { return true; } +void NetOp::Rename(const std::unordered_map< + std::string, std::vector>& dup_output_ops, + size_t& uniq_id) { + for (auto& op : ops_) { + if (op->isNetOp()) { + op->Rename(dup_output_ops, uniq_id); + } + for (size_t i = 0; i < op->outputs_.size(); ++i) { + std::vector dup_outputs; + if (op->outputs_[i] ==) { + op->outputs_[i] += std::to_string(uniq_id++); + dup_outputs.push_back(op->outputs_[i]); + } + // add duplicate output together. replace with AddOp + if (dup_outputs.size() >= 2) { + AddOp(OpRegistry::CreateOp("generic_add", {dup_outputs}, {op->inputs_}, + {})); + } + } + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/net.h b/paddle/framework/net.h index 089c135595..fa8aaf654c 100644 --- a/paddle/framework/net.h +++ b/paddle/framework/net.h @@ -49,6 +49,11 @@ class NetOp : public OperatorBase { } } + /** + * @brief rename duplicated output gradient name in Net + */ + bool Rename(size_t& uniq_id); + /** * @brief Run the network. * @@ -88,5 +93,9 @@ class NetOp : public OperatorBase { } }; +/** + * @brief Identify operator in local Net. used in backward + */ + } // namespace framework } // namespace paddle diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 1e57e9a20f..c49b2288d6 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -99,5 +99,11 @@ std::string OperatorBase::DebugString() const { return ss.str(); } +void OperatorBase::Rename(const std::string& old_name, + const std::string& new_name) { + std::replace(inputs_.begin(), inputs_.end(), old_name, new_name); + std::replace(outputs_.begin(), outputs_.end(), old_name, new_name); +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index c2cd21a080..f98359de12 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include #include @@ -95,6 +96,9 @@ class OperatorBase { virtual bool IsNetOp() const { return false; } + /// rename inputs outputs name + void Rename(const std::string& old_name, const std::string& new_name); + //! Get a input with argument's name described in `op_proto` const std::string& Input(const std::string& name) const; //! Get a input which has multiple variables. @@ -108,7 +112,13 @@ class OperatorBase { public: std::string type_; + // NOTE: in case of OpGrad, inputs_ contains: + // I (Inputs) + // O (Outputs) + // OG (Output Gradients) std::vector inputs_; + // NOTE: in case of OpGrad, outputs_ contains + // IG (Inputs Gradients) std::vector outputs_; AttributeMap attrs_; // store the arguments' offset described in op_desc. From 7088654a2797132b3feb6042fe723a4bd646a0da Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Thu, 27 Jul 2017 17:10:52 +0800 Subject: [PATCH 23/58] "add duplicate" --- paddle/framework/backward.cc | 36 +++++++++++++++++-------------- paddle/framework/backward_test.cc | 4 ++-- paddle/framework/net.cc | 22 ------------------- paddle/framework/net.h | 5 ----- 4 files changed, 22 insertions(+), 45 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 8538ad9f0a..716e78f342 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include "paddle/framework/backward.h" +#include #include "paddle/framework/net.h" #include "paddle/framework/op_registry.h" @@ -71,7 +72,7 @@ static std::shared_ptr BackwardImpl( return EmptyOp(); } - // auto* net = new NetOp(); + auto* net = new NetOp(); if (forwardOp.IsNetOp()) { //! TODO(dzh) @@ -93,29 +94,32 @@ static std::shared_ptr BackwardImpl( } // unique the duplicate name auto uid = uniq_id++; - std::unordered_map insert_postion; + // TODO(dzh): more comment + typedef std::pair> Pos; + std::list insert_postion; for (auto& dup_output_op : dup_output_ops) { - std::string& name = dup_output_op.first; + const std::string& name = dup_output_op.first; auto& dup_op = dup_output_op.second; if (dup_op.size() == 1) continue; std::vector dup_outputs; for (size_t i = 0; i < dup_op.size(); ++i) { auto op_offset = dup_op[i]; - net->ops_[op_offset].Rename( - name, - name + "@RENAME@" + std::to_string(uid) + "@" + std::to_string(i)); + dup_outputs.push_back(name + "@RENAME@" + std::to_string(uid) + "@" + + std::to_string(i)); + net->ops_[op_offset]->Rename(name, dup_outputs.back()); } - insert_postion[op_offset] = - OpRegistry::CreateOp("Add", {}, {dup_op->inputs_}, {}); - net->AddOp("Add"); - net->AddOp(); - // process shared variable - // while(dup_op.size()) { - // - // AddOp(OpRegistry::CreateOp("generic_add", {dup_outputs}, - // {dup_op->inputs_}, {})); - //} + insert_postion.push_back( + {dup_op.back(), + OpRegistry::CreateOp( + "Add", {dup_outputs}, {name}, + {{"input_format", + std::vector{0, (int)dup_outputs.size()}}})}); + } + insert_postion.sort( + [](const Pos& l, const Pos& r) { return l.first > r.first; }); + for (auto& pos : insert_postion) { + net->InsertOp(pos.first, pos.second); } } else { diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 6f86b62b48..0666bcc14c 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -215,7 +215,7 @@ TEST(Backward, net_input_of_network_not_need_grad) { ASSERT_EQ(all_output.find("X" + f::OperatorBase::GRAD_VAR_SUFFIX()), all_output.end()); - ASSERT_EQ(2, bwd_net->ops_.size()); + ASSERT_EQ(2UL, bwd_net->ops_.size()); ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp()); auto first_fc_grad = static_cast(bwd_net->ops_[1].get()); ASSERT_EQ(3, first_fc_grad->ops_.size()); @@ -333,4 +333,4 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { ASSERT_EQ(grad_fc.Input("W"), "w3"); ASSERT_EQ(grad_fc.Input("b"), "b3"); ASSERT_EQ(grad_fc.Input("Out"), "out3"); -} \ No newline at end of file +} diff --git a/paddle/framework/net.cc b/paddle/framework/net.cc index 403d96a22d..2cd378c6b2 100644 --- a/paddle/framework/net.cc +++ b/paddle/framework/net.cc @@ -74,27 +74,5 @@ std::string NetOp::DebugString() const { bool NetOp::IsNetOp() const { return true; } -void NetOp::Rename(const std::unordered_map< - std::string, std::vector>& dup_output_ops, - size_t& uniq_id) { - for (auto& op : ops_) { - if (op->isNetOp()) { - op->Rename(dup_output_ops, uniq_id); - } - for (size_t i = 0; i < op->outputs_.size(); ++i) { - std::vector dup_outputs; - if (op->outputs_[i] ==) { - op->outputs_[i] += std::to_string(uniq_id++); - dup_outputs.push_back(op->outputs_[i]); - } - // add duplicate output together. replace with AddOp - if (dup_outputs.size() >= 2) { - AddOp(OpRegistry::CreateOp("generic_add", {dup_outputs}, {op->inputs_}, - {})); - } - } - } -} - } // namespace framework } // namespace paddle diff --git a/paddle/framework/net.h b/paddle/framework/net.h index bc55c8ee05..9c7f0eab73 100644 --- a/paddle/framework/net.h +++ b/paddle/framework/net.h @@ -49,11 +49,6 @@ class NetOp : public OperatorBase { } } - /** - * @brief rename duplicated output gradient name in Net - */ - bool Rename(size_t& uniq_id); - /** * @brief Run the network. * From 404cc056b8f0de18ee3633c7c6ba28b773320e2e Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Thu, 27 Jul 2017 17:50:17 +0800 Subject: [PATCH 24/58] "reverse travesal" --- paddle/framework/backward.cc | 7 +++++-- paddle/framework/backward_test.cc | 8 ++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 716e78f342..2d9efdd511 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -77,14 +77,17 @@ static std::shared_ptr BackwardImpl( if (forwardOp.IsNetOp()) { //! TODO(dzh) std::unordered_map /*op offs et*/> + std::vector /*op offset*/> dup_output_ops; size_t local_op_id = 0; // Because it is a net op, it can static_cast. auto& forwardNet = static_cast(forwardOp); // travesal subnet/op - for (auto& fwd : forwardNet.ops_) { + for (auto it = forwardNet.ops_.end(); it != forwardNet.ops_.begin(); --it) { + auto fwd = *it; + // for (auto& fwd : forwardNet.ops_) { + // auto bwd = Backward(*fwd, no_grad_names); auto bwd = Backward(*fwd, no_grad_names); net->AddOp(bwd); for (size_t i = 0; i < bwd->outputs_.size(); ++i) { diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 0666bcc14c..54acc47599 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -129,12 +129,12 @@ REGISTER_OP(mul, f::EmptyOp, f::MulOpMaker); REGISTER_GRADIENT_OP(mul, mul_grad, f::EmptyOp); REGISTER_OP(sigmoid, f::EmptyOp, f::SigmoidOpMaker); REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, f::EmptyOp); -REGISTER_OP(fc, f::FcOp, f::FcOpMaker); -REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker); -REGISTER_GRADIENT_OP(many_output_op, many_output_op_grad, f::EmptyOp); REGISTER_OP(fill_zeros_like, f::EmptyOp, f::FillZeroOpMaker); REGISTER_OP(add, f::EmptyOp, f::AddOpMaker); REGISTER_GRADIENT_OP(add, add_grad, f::EmptyOp); +REGISTER_OP(fc, f::FcOp, f::FcOpMaker); +REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker); +REGISTER_GRADIENT_OP(many_output_op, many_output_op_grad, f::EmptyOp); TEST(Backward, simple_op_grad) { auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); @@ -218,7 +218,7 @@ TEST(Backward, net_input_of_network_not_need_grad) { ASSERT_EQ(2UL, bwd_net->ops_.size()); ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp()); auto first_fc_grad = static_cast(bwd_net->ops_[1].get()); - ASSERT_EQ(3, first_fc_grad->ops_.size()); + ASSERT_EQ(3UL, first_fc_grad->ops_.size()); ASSERT_EQ(f::OperatorBase::EMPTY_VAR_NAME(), first_fc_grad[2].Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX())); } From 65d2678720a8647f16e284f7890f7e63abfa046d Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Fri, 28 Jul 2017 11:28:33 +0800 Subject: [PATCH 25/58] "add simple net test" --- paddle/framework/backward.cc | 2 -- paddle/framework/backward_test.cc | 14 ++++++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 2d9efdd511..7e111551d9 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -86,8 +86,6 @@ static std::shared_ptr BackwardImpl( // travesal subnet/op for (auto it = forwardNet.ops_.end(); it != forwardNet.ops_.begin(); --it) { auto fwd = *it; - // for (auto& fwd : forwardNet.ops_) { - // auto bwd = Backward(*fwd, no_grad_names); auto bwd = Backward(*fwd, no_grad_names); net->AddOp(bwd); for (size_t i = 0; i < bwd->outputs_.size(); ++i) { diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 54acc47599..ada7c70682 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -63,10 +63,10 @@ class FcOp : public NetOp { public: void Init() override { AddOp(OpRegistry::CreateOp("mul", {Input("X"), Input("W")}, - {Output("before_act")}, {})); + {Output("mul_out")}, {})); auto b_name = Input("b"); if (b_name != EMPTY_VAR_NAME()) { - AddOp(OpRegistry::CreateOp("rowwise_add", {Output("before_act"), b_name}, + AddOp(OpRegistry::CreateOp("rowwise_add", {Output("mul_out"), b_name}, {Output("before_act")}, {})); } AddOp(OpRegistry::CreateOp("sigmoid", {Output("before_act")}, @@ -82,6 +82,7 @@ class FcOpMaker : public OpProtoAndCheckerMaker { AddInput("X", "x"); AddInput("W", "w"); AddInput("b", "b"); + AddOutput("mul_out", "mul output").SetTemporary(); AddOutput("before_act", "before act").SetTemporary(); AddOutput("Out", ""); AddComment(""); @@ -140,6 +141,7 @@ TEST(Backward, simple_op_grad) { auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); ASSERT_NE(fwd, nullptr); auto gop = f::OpRegistry::CreateGradOp(*fwd); + LOG(INFO) << gop->DebugString(); ASSERT_EQ(1UL, gop->inputs_.size()); ASSERT_EQ("Out" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->inputs_[0]); ASSERT_EQ("rowwise_add_grad", gop->type_); @@ -151,10 +153,18 @@ TEST(Backward, simple_op_grad) { // LOG(INFO) << gop->Output("X" + "@GRAD"); } +TEST(Backward, simple_net_grad) { + auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); + ASSERT_NE(fwd, nullptr); + auto gop = f::Backward(*fwd, {}); + LOG(INFO) << gop->DebugString(); +} + TEST(Backward, net_fc_backward_normal) { std::shared_ptr fwd = f::OpRegistry::CreateOp("fc", {"X", "w", "b"}, {"out"}, {}); ASSERT_NE(fwd, nullptr); + LOG(INFO) << fwd->DebugString(); std::shared_ptr gop = f::Backward(*fwd, {}); ASSERT_TRUE(gop->IsNetOp()); auto net = static_cast(gop.get()); From 8bf0ca0fab37628319d7ecc99f2abb74b5ba2629 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 28 Jul 2017 12:53:52 +0800 Subject: [PATCH 26/58] Fix unittest error --- paddle/framework/backward_test.cc | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 54acc47599..60fbb48688 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -152,8 +152,8 @@ TEST(Backward, simple_op_grad) { } TEST(Backward, net_fc_backward_normal) { - std::shared_ptr fwd = - f::OpRegistry::CreateOp("fc", {"X", "w", "b"}, {"out"}, {}); + std::shared_ptr fwd = f::OpRegistry::CreateOp( + "fc", {"X", "w", "b"}, {"out", "tmp_forward"}, {}); ASSERT_NE(fwd, nullptr); std::shared_ptr gop = f::Backward(*fwd, {}); ASSERT_TRUE(gop->IsNetOp()); @@ -175,7 +175,8 @@ TEST(Backward, net_fc_backward_normal) { TEST(Backward, net_fc_backward_not_have_b) { std::shared_ptr fwd = f::OpRegistry::CreateOp( - "fc", {"X", "w", f::OperatorBase::EMPTY_VAR_NAME()}, {"out"}, {}); + "fc", {"X", "w", f::OperatorBase::EMPTY_VAR_NAME()}, + {"out", "tmp_forward"}, {}); ASSERT_NE(fwd, nullptr); std::shared_ptr gop = f::Backward(*fwd, {}); ASSERT_TRUE(gop->IsNetOp()); @@ -194,9 +195,10 @@ TEST(Backward, net_fc_backward_not_have_b) { TEST(Backward, net_input_of_network_not_need_grad) { f::NetOp net; - net.AddOp(f::OpRegistry::CreateOp("fc", {"X", "W1", "b1"}, {"hidden0"}, {})); - net.AddOp( - f::OpRegistry::CreateOp("fc", {"hidden0", "W2", "b2"}, {"hidden1"}, {})); + net.AddOp(f::OpRegistry::CreateOp("fc", {"X", "W1", "b1"}, + {"hidden0", "tmp0"}, {})); + net.AddOp(f::OpRegistry::CreateOp("fc", {"hidden0", "W2", "b2"}, + {"hidden1", "tmp1"}, {})); net.CompleteAddOp(); auto bwd = Backward(net, {"X"}); // X@GRAD is not need. ASSERT_TRUE(bwd->IsNetOp()); From d0b25ac9b87225a31a2d9468ffb86a0ffe51b4c7 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 28 Jul 2017 13:11:54 +0800 Subject: [PATCH 27/58] Fix some unittest error --- paddle/framework/backward.cc | 13 +++++++++---- paddle/framework/backward_test.cc | 30 ++++++++++++++++++++---------- paddle/framework/operator.cc | 4 ++-- paddle/framework/operator.h | 1 + 4 files changed, 32 insertions(+), 16 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 2d9efdd511..52eccfba69 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -72,7 +72,7 @@ static std::shared_ptr BackwardImpl( return EmptyOp(); } - auto* net = new NetOp(); + auto net = std::make_shared(); if (forwardOp.IsNetOp()) { //! TODO(dzh) @@ -84,7 +84,8 @@ static std::shared_ptr BackwardImpl( auto& forwardNet = static_cast(forwardOp); // travesal subnet/op - for (auto it = forwardNet.ops_.end(); it != forwardNet.ops_.begin(); --it) { + for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend(); + ++it) { auto fwd = *it; // for (auto& fwd : forwardNet.ops_) { // auto bwd = Backward(*fwd, no_grad_names); @@ -115,7 +116,7 @@ static std::shared_ptr BackwardImpl( insert_postion.push_back( {dup_op.back(), OpRegistry::CreateOp( - "Add", {dup_outputs}, {name}, + "add", {dup_outputs}, {name}, {{"input_format", std::vector{0, (int)dup_outputs.size()}}})}); } @@ -142,11 +143,15 @@ static std::shared_ptr BackwardImpl( grad_output = OperatorBase::EMPTY_VAR_NAME(); } } + + if (net->ops_.empty()) { // Current no aux op is added to network + return grad_op; + } net->AddOp(grad_op); } net->CompleteAddOp(); - return std::shared_ptr(net); + return net; } extern std::shared_ptr Backward( diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 60fbb48688..63194e78fc 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -63,14 +63,22 @@ class FcOp : public NetOp { public: void Init() override { AddOp(OpRegistry::CreateOp("mul", {Input("X"), Input("W")}, - {Output("before_act")}, {})); + {Output("mul_result")}, {})); auto b_name = Input("b"); + std::string before_act = "mul_result"; if (b_name != EMPTY_VAR_NAME()) { - AddOp(OpRegistry::CreateOp("rowwise_add", {Output("before_act"), b_name}, - {Output("before_act")}, {})); + AddOp(OpRegistry::CreateOp("rowwise_add", {Output("mul_result"), b_name}, + {Output("add_result")}, {})); + before_act = "add_result"; + } else { + auto out_varname = Output("add_result"); + if (out_varname != EMPTY_VAR_NAME()) { + this->Rename(out_varname, EMPTY_VAR_NAME()); + } } - AddOp(OpRegistry::CreateOp("sigmoid", {Output("before_act")}, - {Output("Out")}, {})); + + AddOp(OpRegistry::CreateOp("sigmoid", {Output(before_act)}, {Output("Out")}, + {})); CompleteAddOp(false); } }; @@ -82,7 +90,8 @@ class FcOpMaker : public OpProtoAndCheckerMaker { AddInput("X", "x"); AddInput("W", "w"); AddInput("b", "b"); - AddOutput("before_act", "before act").SetTemporary(); + AddOutput("mul_result", "").SetTemporary(); + AddOutput("add_result", "").SetTemporary(); AddOutput("Out", ""); AddComment(""); } @@ -153,7 +162,7 @@ TEST(Backward, simple_op_grad) { TEST(Backward, net_fc_backward_normal) { std::shared_ptr fwd = f::OpRegistry::CreateOp( - "fc", {"X", "w", "b"}, {"out", "tmp_forward"}, {}); + "fc", {"X", "w", "b"}, {"mul_result", "add_result", "out"}, {}); ASSERT_NE(fwd, nullptr); std::shared_ptr gop = f::Backward(*fwd, {}); ASSERT_TRUE(gop->IsNetOp()); @@ -176,7 +185,7 @@ TEST(Backward, net_fc_backward_normal) { TEST(Backward, net_fc_backward_not_have_b) { std::shared_ptr fwd = f::OpRegistry::CreateOp( "fc", {"X", "w", f::OperatorBase::EMPTY_VAR_NAME()}, - {"out", "tmp_forward"}, {}); + {"mul_result", "add_result", "tmp"}, {}); ASSERT_NE(fwd, nullptr); std::shared_ptr gop = f::Backward(*fwd, {}); ASSERT_TRUE(gop->IsNetOp()); @@ -196,9 +205,9 @@ TEST(Backward, net_fc_backward_not_have_b) { TEST(Backward, net_input_of_network_not_need_grad) { f::NetOp net; net.AddOp(f::OpRegistry::CreateOp("fc", {"X", "W1", "b1"}, - {"hidden0", "tmp0"}, {})); + {"mul_tmp_0", "add_tmp_0", "hidden0"}, {})); net.AddOp(f::OpRegistry::CreateOp("fc", {"hidden0", "W2", "b2"}, - {"hidden1", "tmp1"}, {})); + {"mul_tmp_1", "add_tmp_1", "hidden1"}, {})); net.CompleteAddOp(); auto bwd = Backward(net, {"X"}); // X@GRAD is not need. ASSERT_TRUE(bwd->IsNetOp()); @@ -235,6 +244,7 @@ TEST(Backward, net_shared_weight) { ASSERT_TRUE(bwd->IsNetOp()); auto bwd_net = static_cast(bwd.get()); ASSERT_EQ(3UL, bwd_net->ops_.size()); + LOG(INFO) << bwd_net->DebugString(); ASSERT_EQ("add_grad", bwd_net->ops_[2]->type_); } diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 3ad9dc2d7b..646269074c 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -52,7 +52,7 @@ std::vector OperatorBase::Inputs(const std::string& name) const { PADDLE_ENFORCE(in_out_idxs_ != nullptr, "IO Idx could not be nullptr"); auto input_format = GetAttr>("input_format"); auto offset = in_out_idxs_->at(name); - PADDLE_ENFORCE(input_format.at((size_t)offset + 1) <= inputs_.size(), + PADDLE_ENFORCE(input_format.at((size_t)offset + 1) <= (int)inputs_.size(), "Input Out Of Range"); return std::vector{ @@ -78,7 +78,7 @@ std::vector OperatorBase::Outputs(const std::string& name) const { PADDLE_ENFORCE(in_out_idxs_ != nullptr, "InOut Indice could not be nullptr"); auto output_format = GetAttr>("output_format"); auto offset = in_out_idxs_->at(name); - PADDLE_ENFORCE(output_format.at((size_t)offset + 1) <= outputs_.size(), + PADDLE_ENFORCE(output_format.at((size_t)offset + 1) <= (int)outputs_.size(), "Output Out of Range"); return std::vector{ outputs_.begin() + output_format.at(offset), diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index eecf2f8302..358ab841d6 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -101,6 +101,7 @@ class OperatorBase { //! Get a input with argument's name described in `op_proto` const std::string& Input(const std::string& name) const; + //! Get a input which has multiple variables. //! TODO add a vector_view to prevent memory copy. std::vector Inputs(const std::string& name) const; From 29d50ad910f6a874bf6055ad0de748765da19692 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 28 Jul 2017 13:55:21 +0800 Subject: [PATCH 28/58] Refine unit-test --- paddle/framework/backward.cc | 10 ++-------- paddle/framework/backward_test.cc | 29 +++++++++++++++-------------- 2 files changed, 17 insertions(+), 22 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 52eccfba69..dac57c2e22 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -53,11 +53,6 @@ static std::shared_ptr EmptyOp() { static std::shared_ptr BackwardImpl( const OperatorBase& forwardOp, std::unordered_set& no_grad_names, size_t& uniq_id) { - // struct OpIdentity { - // size_t local_op_id; - // size_t op_output_offset; - // }; - if (AllInSet(forwardOp.inputs_, OperatorBase::GRAD_VAR_SUFFIX(), no_grad_names)) { return EmptyOp(); @@ -87,9 +82,7 @@ static std::shared_ptr BackwardImpl( for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend(); ++it) { auto fwd = *it; - // for (auto& fwd : forwardNet.ops_) { - // auto bwd = Backward(*fwd, no_grad_names); - auto bwd = Backward(*fwd, no_grad_names); + auto bwd = BackwardImpl(*fwd, no_grad_names, uniq_id); net->AddOp(bwd); for (size_t i = 0; i < bwd->outputs_.size(); ++i) { dup_output_ops[bwd->outputs_[i]].emplace_back(local_op_id); @@ -138,6 +131,7 @@ static std::shared_ptr BackwardImpl( {grad_input}, {})); } } + for (std::string& grad_output : grad_op->outputs_) { if (no_grad_names.count(grad_output)) { grad_output = OperatorBase::EMPTY_VAR_NAME(); diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 63194e78fc..7185872d0a 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -230,8 +230,9 @@ TEST(Backward, net_input_of_network_not_need_grad) { ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp()); auto first_fc_grad = static_cast(bwd_net->ops_[1].get()); ASSERT_EQ(3UL, first_fc_grad->ops_.size()); - ASSERT_EQ(f::OperatorBase::EMPTY_VAR_NAME(), - first_fc_grad[2].Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX())); + ASSERT_EQ( + f::OperatorBase::EMPTY_VAR_NAME(), + first_fc_grad->ops_[2]->Output("A" + f::OperatorBase::GRAD_VAR_SUFFIX())); } TEST(Backward, net_shared_weight) { @@ -244,14 +245,13 @@ TEST(Backward, net_shared_weight) { ASSERT_TRUE(bwd->IsNetOp()); auto bwd_net = static_cast(bwd.get()); ASSERT_EQ(3UL, bwd_net->ops_.size()); - LOG(INFO) << bwd_net->DebugString(); ASSERT_EQ("add_grad", bwd_net->ops_[2]->type_); } TEST(Backward, op_register_grad_not_for_network) { - auto fwd = - f::OpRegistry::CreateOp("fc", {"X", "W", "b"}, {"Out", "tmp_out"}, - {{"temporary_index", std::vector{1}}}); + auto fwd = f::OpRegistry::CreateOp( + "fc", {"X", "W", "b"}, {"mul_result", "add_result", "Out"}, + {{"temporary_index", std::vector{1}}}); ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet); } @@ -299,11 +299,9 @@ TEST(Backward, op_part_of_output_are_not_need) { TEST(Backward, op_part_of_input_are_not_need) { auto fwd = f::OpRegistry::CreateOp("mul", {"a", "b"}, {"out"}, {}); auto backward = f::Backward(*fwd, {"a"}); - ASSERT_TRUE(backward->IsNetOp()); - auto net = static_cast(backward.get()); - ASSERT_EQ(net->ops_.size(), 1UL); + ASSERT_TRUE(!backward->IsNetOp()); - auto &grad_mul = *net->ops_[0]; + auto &grad_mul = *backward; ASSERT_EQ(grad_mul.type_, "mul_grad"); ASSERT_EQ(grad_mul.inputs_.size(), 2UL + 1UL + 1UL); ASSERT_EQ(grad_mul.outputs_.size(), 2UL); @@ -320,10 +318,13 @@ TEST(Backward, op_part_of_input_are_not_need) { TEST(Backward, linear_net_intermediate_variable_has_no_grad) { f::NetOp net; - net.AddOp(f::OpRegistry::CreateOp("fc", {"x1", "w1", "b1"}, {"out1"}, {})); - net.AddOp(f::OpRegistry::CreateOp("fc", {"out1", "w2", "b2"}, {"out2"}, {})); - net.AddOp(f::OpRegistry::CreateOp("fc", {"out2", "w3", "b3"}, {"out3"}, {})); - net.CompleteAddOp(false); + net.AddOp(f::OpRegistry::CreateOp("fc", {"x1", "w1", "b1"}, + {"mul_out1", "add_out1", "out1"}, {})); + net.AddOp(f::OpRegistry::CreateOp("fc", {"out1", "w2", "b2"}, + {"mul_out2", "tmp_out2", "out2"}, {})); + net.AddOp(f::OpRegistry::CreateOp("fc", {"out2", "w3", "b3"}, + {"mul_out3", "tmp_out3", "out3"}, {})); + net.CompleteAddOp(); auto backward = f::Backward(net, {"out2"}); ASSERT_TRUE(backward->IsNetOp()); auto bwd_net = static_cast(backward.get()); From 74cd9a7542027a89b0751c2cb5c45bb8f413c52b Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Fri, 28 Jul 2017 13:57:31 +0800 Subject: [PATCH 29/58] "fix unittest" --- paddle/framework/backward.cc | 2 +- paddle/framework/backward_test.cc | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 1a24d266db..b6c46302b1 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -79,11 +79,11 @@ static std::shared_ptr BackwardImpl( std::unordered_map /*op offset*/> dup_output_ops; - size_t local_op_id = 0; // Because it is a net op, it can static_cast. auto& forwardNet = static_cast(forwardOp); // travesal subnet/op + size_t local_op_id = 0; for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend(); ++it) { auto fwd = *it; diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 7472a970b9..cb1d402526 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -149,7 +149,6 @@ TEST(Backward, simple_op_grad) { auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); ASSERT_NE(fwd, nullptr); auto gop = f::OpRegistry::CreateGradOp(*fwd); - LOG(INFO) << gop->DebugString(); ASSERT_EQ(1UL, gop->inputs_.size()); ASSERT_EQ("Out" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->inputs_[0]); ASSERT_EQ("rowwise_add_grad", gop->type_); @@ -161,18 +160,19 @@ TEST(Backward, simple_op_grad) { // LOG(INFO) << gop->Output("X" + "@GRAD"); } -TEST(Backward, simple_net_grad) { - auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); +TEST(Backward, simple_op_not_need_grad) { + auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"x", "b"}, {"out"}, {}); ASSERT_NE(fwd, nullptr); - auto gop = f::Backward(*fwd, {}); + auto gop = f::Backward(*fwd, {"x"}); LOG(INFO) << gop->DebugString(); + ASSERT_NE(gop->outputs_.find("x" + f::OperatorBase::GRAD_VAR_SUFFIX()), + gop->outputs_.end()); } TEST(Backward, net_fc_backward_normal) { std::shared_ptr fwd = f::OpRegistry::CreateOp( "fc", {"X", "w", "b"}, {"mul_result", "add_result", "out"}, {}); ASSERT_NE(fwd, nullptr); - LOG(INFO) << fwd->DebugString(); std::shared_ptr gop = f::Backward(*fwd, {}); ASSERT_TRUE(gop->IsNetOp()); auto net = static_cast(gop.get()); From 7087a043187016b84937c76e6f1310fed43f21e3 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Fri, 28 Jul 2017 14:09:40 +0800 Subject: [PATCH 30/58] "add unittest" --- paddle/framework/backward_test.cc | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index cb1d402526..a481cb1b2a 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include "paddle/framework/backward.h" + #include #include "paddle/framework/net.h" #include "paddle/framework/op_registry.h" @@ -161,12 +162,23 @@ TEST(Backward, simple_op_grad) { } TEST(Backward, simple_op_not_need_grad) { - auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"x", "b"}, {"out"}, {}); + auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); ASSERT_NE(fwd, nullptr); - auto gop = f::Backward(*fwd, {"x"}); - LOG(INFO) << gop->DebugString(); - ASSERT_NE(gop->outputs_.find("x" + f::OperatorBase::GRAD_VAR_SUFFIX()), + auto gop = f::Backward(*fwd, {"X"}); + LOG(INFO) << "full " << gop->DebugString(); + ASSERT_NE(std::find(gop->outputs_.begin(), gop->outputs_.end(), + "X" + f::OperatorBase::GRAD_VAR_SUFFIX()), gop->outputs_.end()); + auto no_input_gop = f::Backward(*fwd, {"X", "b"}); + LOG(INFO) << "no input gop " << no_input_gop->DebugString(); + ASSERT_NE(no_input_gop, nullptr); + ASSERT_EQ(std::vector{}, no_input_gop->outputs_); + ASSERT_EQ( + std::vector{"Out" + f::OperatorBase::GRAD_VAR_SUFFIX()}, + no_input_gop->inputs_); + // auto no_output_gop = f::Backward(*fwd, {"Out"}); + // ASSERT_EQ(std::vector{"X" + + // f::OperatorBase::GRAD_VAR_SUFFIX(), "b"}) } TEST(Backward, net_fc_backward_normal) { From 658588a6755b8b036d87d6a89928a36dadfb7f00 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Fri, 28 Jul 2017 14:28:09 +0800 Subject: [PATCH 31/58] "format test case" --- paddle/framework/backward_test.cc | 52 +++++++++++++++++++------------ 1 file changed, 32 insertions(+), 20 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 69faee9fb7..9886679d30 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -167,15 +167,28 @@ TEST(Backward, simple_op_not_need_grad) { auto gop = f::Backward(*fwd, {"X"}); LOG(INFO) << "full " << gop->DebugString(); ASSERT_NE(std::find(gop->outputs_.begin(), gop->outputs_.end(), - "X" + f::OperatorBase::GRAD_VAR_SUFFIX()), + std::string("X") + f::OperatorBase::GRAD_VAR_SUFFIX()), gop->outputs_.end()); + auto no_input_gop = f::Backward(*fwd, {"X", "b"}); - LOG(INFO) << "no input gop " << no_input_gop->DebugString(); + LOG(INFO) << "no input gop " << gop->DebugString(); ASSERT_NE(no_input_gop, nullptr); - ASSERT_EQ(std::vector{}, no_input_gop->outputs_); + + typedef std::vector Vec; + auto vector_equal = [](const Vec &l, const Vec &r) { + return l.size() == r.size(); + for (size_t i = 0; i < l.size(); ++i) { + if (l[i] != r[i]) return false; + } + return true; + }; + ASSERT_EQ(vector_equal(std::vector{}, no_input_gop->outputs_), + true); ASSERT_EQ( - std::vector{"Out" + f::OperatorBase::GRAD_VAR_SUFFIX()}, - no_input_gop->inputs_); + vector_equal( + std::vector{"Out" + f::OperatorBase::GRAD_VAR_SUFFIX()}, + no_input_gop->inputs_), + true); // auto no_output_gop = f::Backward(*fwd, {"Out"}); // ASSERT_EQ(std::vector{"X" + // f::OperatorBase::GRAD_VAR_SUFFIX(), "b"}) @@ -251,9 +264,8 @@ TEST(Backward, net_input_of_network_not_need_grad) { ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp()); auto first_fc_grad = static_cast(bwd_net->ops_[1].get()); ASSERT_EQ(3UL, first_fc_grad->ops_.size()); - ASSERT_EQ( - f::OperatorBase::EMPTY_VAR_NAME(), - first_fc_grad->ops_[2]->Output("A" + f::OperatorBase::GRAD_VAR_SUFFIX())); + ASSERT_EQ(f::OperatorBase::EMPTY_VAR_NAME(), + first_fc_grad[2].Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX())); } TEST(Backward, net_shared_weight) { @@ -266,13 +278,14 @@ TEST(Backward, net_shared_weight) { ASSERT_TRUE(bwd->IsNetOp()); auto bwd_net = static_cast(bwd.get()); ASSERT_EQ(3UL, bwd_net->ops_.size()); + LOG(INFO) << bwd_net->DebugString(); ASSERT_EQ("add_grad", bwd_net->ops_[2]->type_); } TEST(Backward, op_register_grad_not_for_network) { - auto fwd = f::OpRegistry::CreateOp( - "fc", {"X", "W", "b"}, {"mul_result", "add_result", "Out"}, - {{"temporary_index", std::vector{1}}}); + auto fwd = + f::OpRegistry::CreateOp("fc", {"X", "W", "b"}, {"Out", "tmp_out"}, + {{"temporary_index", std::vector{1}}}); ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet); } @@ -320,9 +333,11 @@ TEST(Backward, op_part_of_output_are_not_need) { TEST(Backward, op_part_of_input_are_not_need) { auto fwd = f::OpRegistry::CreateOp("mul", {"a", "b"}, {"out"}, {}); auto backward = f::Backward(*fwd, {"a"}); - ASSERT_TRUE(!backward->IsNetOp()); + ASSERT_TRUE(backward->IsNetOp()); + auto net = static_cast(backward.get()); + ASSERT_EQ(net->ops_.size(), 1UL); - auto &grad_mul = *backward; + auto &grad_mul = *net->ops_[0]; ASSERT_EQ(grad_mul.type_, "mul_grad"); ASSERT_EQ(grad_mul.inputs_.size(), 2UL + 1UL + 1UL); ASSERT_EQ(grad_mul.outputs_.size(), 2UL); @@ -339,13 +354,10 @@ TEST(Backward, op_part_of_input_are_not_need) { TEST(Backward, linear_net_intermediate_variable_has_no_grad) { f::NetOp net; - net.AddOp(f::OpRegistry::CreateOp("fc", {"x1", "w1", "b1"}, - {"mul_out1", "add_out1", "out1"}, {})); - net.AddOp(f::OpRegistry::CreateOp("fc", {"out1", "w2", "b2"}, - {"mul_out2", "tmp_out2", "out2"}, {})); - net.AddOp(f::OpRegistry::CreateOp("fc", {"out2", "w3", "b3"}, - {"mul_out3", "tmp_out3", "out3"}, {})); - net.CompleteAddOp(); + net.AddOp(f::OpRegistry::CreateOp("fc", {"x1", "w1", "b1"}, {"out1"}, {})); + net.AddOp(f::OpRegistry::CreateOp("fc", {"out1", "w2", "b2"}, {"out2"}, {})); + net.AddOp(f::OpRegistry::CreateOp("fc", {"out2", "w3", "b3"}, {"out3"}, {})); + net.CompleteAddOp(false); auto backward = f::Backward(net, {"out2"}); ASSERT_TRUE(backward->IsNetOp()); auto bwd_net = static_cast(backward.get()); From d6e0368285dd1f264fd78cec9f2832be84b772cd Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 28 Jul 2017 14:38:38 +0800 Subject: [PATCH 32/58] Add comment in backward.cc --- paddle/framework/backward.cc | 61 +++++++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 18 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index dac57c2e22..25ebcefa03 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -50,50 +50,72 @@ static std::shared_ptr EmptyOp() { return net_op; } +/** + * @brief Backward an operator, implementation + * @param forwardOp the forward operator + * @param no_grad_names variable names not calculate for gradient. Like X@GRAD + * is not needed. + * @param uniq_id a unique index used inside BackwardImpl, it will be shared + * through recursive invoke. + * @return The backward operator. For simple situation, it is a simple operator. + * For complex situation, it is a NetOp. + * + * See Backward.h for details + */ static std::shared_ptr BackwardImpl( const OperatorBase& forwardOp, std::unordered_set& no_grad_names, size_t& uniq_id) { + /** + * If all input gradients of forwarding operator do not need to calculate, + * just return an EmptyOp. Not return null ptr because EmptyOp does not take + * too much time for calculation, but it is useful for simplifying logic. + */ if (AllInSet(forwardOp.inputs_, OperatorBase::GRAD_VAR_SUFFIX(), no_grad_names)) { return EmptyOp(); } + /** + * All output gradients of forwarding operator do not need to calculate. Then + * all input gradients cannot be computed at all, and we put them into + * `no_grad_names` set. Return an EmptyOp. + */ if (AllInSet(forwardOp.outputs_, OperatorBase::GRAD_VAR_SUFFIX(), no_grad_names)) { for (auto& name : forwardOp.inputs_) { - // Mark all input is not need + /// Mark all input is not need no_grad_names.insert(name + OperatorBase::GRAD_VAR_SUFFIX()); } return EmptyOp(); } + //! Returned gradient network auto net = std::make_shared(); if (forwardOp.IsNetOp()) { - //! TODO(dzh) - std::unordered_map /*op offset*/> - dup_output_ops; - size_t local_op_id = 0; - // Because it is a net op, it can static_cast. + /// Because forwardOp is a net op, it can static_cast. auto& forwardNet = static_cast(forwardOp); - // travesal subnet/op + //! Map from output gradient variable name to operator's indices in backward + //! net. That operator generates that variable. + std::unordered_map> dup_output_ops; + + size_t local_op_id = 0; + /// reversely travel forwardNet for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend(); - ++it) { + ++it, ++local_op_id) { auto fwd = *it; auto bwd = BackwardImpl(*fwd, no_grad_names, uniq_id); net->AddOp(bwd); - for (size_t i = 0; i < bwd->outputs_.size(); ++i) { - dup_output_ops[bwd->outputs_[i]].emplace_back(local_op_id); + for (auto& out : bwd->outputs_) { + dup_output_ops[out].emplace_back(local_op_id); } - local_op_id++; } - // unique the duplicate name + /// Get unique ID for this method. auto uid = uniq_id++; // TODO(dzh): more comment - typedef std::pair> Pos; - std::list insert_postion; + using Pos = std::pair>; + std::list insert_position; for (auto& dup_output_op : dup_output_ops) { const std::string& name = dup_output_op.first; auto& dup_op = dup_output_op.second; @@ -106,16 +128,18 @@ static std::shared_ptr BackwardImpl( std::to_string(i)); net->ops_[op_offset]->Rename(name, dup_outputs.back()); } - insert_postion.push_back( + insert_position.push_back( {dup_op.back(), OpRegistry::CreateOp( "add", {dup_outputs}, {name}, {{"input_format", std::vector{0, (int)dup_outputs.size()}}})}); } - insert_postion.sort( + + insert_position.sort( [](const Pos& l, const Pos& r) { return l.first > r.first; }); - for (auto& pos : insert_postion) { + + for (auto& pos : insert_position) { net->InsertOp(pos.first, pos.second); } @@ -148,6 +172,7 @@ static std::shared_ptr BackwardImpl( return net; } +//! See header for comments extern std::shared_ptr Backward( const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars) { From 71bd439b45f36d4de5e0c06dfc013859d97684e3 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 28 Jul 2017 15:25:07 +0800 Subject: [PATCH 33/58] Addjust Backward.linear_net_intermediate_variable_has_no_grad --- paddle/framework/backward_test.cc | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 7185872d0a..ae85e6201b 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -325,14 +325,14 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { net.AddOp(f::OpRegistry::CreateOp("fc", {"out2", "w3", "b3"}, {"mul_out3", "tmp_out3", "out3"}, {})); net.CompleteAddOp(); - auto backward = f::Backward(net, {"out2"}); + auto backward = f::Backward(net, {"mul_out2", "tmp_out2", "out2"}); ASSERT_TRUE(backward->IsNetOp()); auto bwd_net = static_cast(backward.get()); ASSERT_EQ(bwd_net->ops_.size(), 1UL); auto &grad_fc = *bwd_net->ops_[0]; ASSERT_EQ(grad_fc.type_, "fc_grad"); - ASSERT_EQ(grad_fc.inputs_.size(), 3UL + 1UL + 1UL); + ASSERT_EQ(grad_fc.inputs_.size(), 3UL + 3UL + 3UL); ASSERT_EQ(grad_fc.outputs_.size(), 3UL); ASSERT_EQ(grad_fc.Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX()), f::OperatorBase::EMPTY_VAR_NAME()); @@ -340,10 +340,17 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { "w3" + f::OperatorBase::GRAD_VAR_SUFFIX()); ASSERT_EQ(grad_fc.Output("b" + f::OperatorBase::GRAD_VAR_SUFFIX()), "b3" + f::OperatorBase::GRAD_VAR_SUFFIX()); + ASSERT_EQ(grad_fc.Input("mul_result" + f::OperatorBase::GRAD_VAR_SUFFIX()), + "mul_out3" + f::OperatorBase::GRAD_VAR_SUFFIX()); + ASSERT_EQ(grad_fc.Input("add_result" + f::OperatorBase::GRAD_VAR_SUFFIX()), + "tmp_out3" + f::OperatorBase::GRAD_VAR_SUFFIX()); ASSERT_EQ(grad_fc.Input("Out" + f::OperatorBase::GRAD_VAR_SUFFIX()), "out3" + f::OperatorBase::GRAD_VAR_SUFFIX()); + ASSERT_EQ(grad_fc.Input("X"), "out2"); ASSERT_EQ(grad_fc.Input("W"), "w3"); ASSERT_EQ(grad_fc.Input("b"), "b3"); + ASSERT_EQ(grad_fc.Input("mul_result"), "mul_out3"); + ASSERT_EQ(grad_fc.Input("add_result"), "tmp_out3"); ASSERT_EQ(grad_fc.Input("Out"), "out3"); } From 0da5cce24f69946df2a163f6f8e48ea6879f4df4 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Fri, 28 Jul 2017 15:40:41 +0800 Subject: [PATCH 34/58] "fix test case" --- paddle/framework/backward_test.cc | 30 ++++++------------------------ 1 file changed, 6 insertions(+), 24 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 9886679d30..f3d2c8d54b 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -165,33 +165,12 @@ TEST(Backward, simple_op_not_need_grad) { auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); ASSERT_NE(fwd, nullptr); auto gop = f::Backward(*fwd, {"X"}); - LOG(INFO) << "full " << gop->DebugString(); - ASSERT_NE(std::find(gop->outputs_.begin(), gop->outputs_.end(), - std::string("X") + f::OperatorBase::GRAD_VAR_SUFFIX()), + ASSERT_EQ(std::find(gop->outputs_.begin(), gop->outputs_.end(), + "X" + f::OperatorBase::GRAD_VAR_SUFFIX()), gop->outputs_.end()); auto no_input_gop = f::Backward(*fwd, {"X", "b"}); - LOG(INFO) << "no input gop " << gop->DebugString(); ASSERT_NE(no_input_gop, nullptr); - - typedef std::vector Vec; - auto vector_equal = [](const Vec &l, const Vec &r) { - return l.size() == r.size(); - for (size_t i = 0; i < l.size(); ++i) { - if (l[i] != r[i]) return false; - } - return true; - }; - ASSERT_EQ(vector_equal(std::vector{}, no_input_gop->outputs_), - true); - ASSERT_EQ( - vector_equal( - std::vector{"Out" + f::OperatorBase::GRAD_VAR_SUFFIX()}, - no_input_gop->inputs_), - true); - // auto no_output_gop = f::Backward(*fwd, {"Out"}); - // ASSERT_EQ(std::vector{"X" + - // f::OperatorBase::GRAD_VAR_SUFFIX(), "b"}) } TEST(Backward, net_fc_backward_normal) { @@ -251,6 +230,8 @@ TEST(Backward, net_input_of_network_not_need_grad) { bwd_net->outputs_.begin(), bwd_net->outputs_.end()); all_output.erase(f::OperatorBase::EMPTY_VAR_NAME()); + LOG(INFO) << bwd_net->DebugString(); + LOG(INFO) << bwd_net->ops_.size(); for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) { ASSERT_NE(all_output.find(out + f::OperatorBase::GRAD_VAR_SUFFIX()), all_output.end()); @@ -264,6 +245,7 @@ TEST(Backward, net_input_of_network_not_need_grad) { ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp()); auto first_fc_grad = static_cast(bwd_net->ops_[1].get()); ASSERT_EQ(3UL, first_fc_grad->ops_.size()); + LOG(INFO) << first_fc_grad->DebugString(); ASSERT_EQ(f::OperatorBase::EMPTY_VAR_NAME(), first_fc_grad[2].Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX())); } @@ -333,7 +315,7 @@ TEST(Backward, op_part_of_output_are_not_need) { TEST(Backward, op_part_of_input_are_not_need) { auto fwd = f::OpRegistry::CreateOp("mul", {"a", "b"}, {"out"}, {}); auto backward = f::Backward(*fwd, {"a"}); - ASSERT_TRUE(backward->IsNetOp()); + ASSERT_False(backward->IsNetOp()); auto net = static_cast(backward.get()); ASSERT_EQ(net->ops_.size(), 1UL); From 52054af714c40cf93c72f675c7e0457260ff902c Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Fri, 28 Jul 2017 15:44:27 +0800 Subject: [PATCH 35/58] "fix typo" --- paddle/framework/backward_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index f3d2c8d54b..371ce3e745 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -315,7 +315,7 @@ TEST(Backward, op_part_of_output_are_not_need) { TEST(Backward, op_part_of_input_are_not_need) { auto fwd = f::OpRegistry::CreateOp("mul", {"a", "b"}, {"out"}, {}); auto backward = f::Backward(*fwd, {"a"}); - ASSERT_False(backward->IsNetOp()); + ASSERT_FALSE(backward->IsNetOp()); auto net = static_cast(backward.get()); ASSERT_EQ(net->ops_.size(), 1UL); From 302046aa511587dec818d88767c64fecbeaa4363 Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Fri, 28 Jul 2017 16:05:58 +0800 Subject: [PATCH 36/58] "fix return net error" --- paddle/framework/backward_test.cc | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 13242ead24..ee8a47d5e7 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -60,6 +60,16 @@ class SigmoidOpMaker : public OpProtoAndCheckerMaker { } }; +class NoGradOpMaker : public OpProtoAndCheckerMaker { + public: + NoGradOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "X input"); + AddOutput("Y", "Y output"); + AddComment("NoGradOp, same input output. no Grad"); + } +}; + class FcOp : public NetOp { public: void Init() override { @@ -139,6 +149,7 @@ REGISTER_OP(mul, f::EmptyOp, f::MulOpMaker); REGISTER_GRADIENT_OP(mul, mul_grad, f::EmptyOp); REGISTER_OP(sigmoid, f::EmptyOp, f::SigmoidOpMaker); REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, f::EmptyOp); +REGISTER_OP(nograd, f::EmptyOp, f::NoGradOpMaker); REGISTER_OP(fill_zeros_like, f::EmptyOp, f::FillZeroOpMaker); REGISTER_OP(add, f::EmptyOp, f::AddOpMaker); REGISTER_GRADIENT_OP(add, add_grad, f::EmptyOp); @@ -266,9 +277,11 @@ TEST(Backward, net_shared_weight) { } TEST(Backward, op_register_grad_not_for_network) { - auto fwd = - f::OpRegistry::CreateOp("fc", {"X", "W", "b"}, {"Out", "tmp_out"}, - {{"temporary_index", std::vector{1}}}); + // auto fwd = + // f::OpRegistry::CreateOp("fc", {"X", "W", "b"}, {"Out", "tmp_out"}, + // {{"temporary_index", std::vector{1}}}); + + auto fwd = f::OpRegistry::CreateOp("nograd", {"x"}, {"x"}, {}); ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet); } @@ -316,11 +329,7 @@ TEST(Backward, op_part_of_output_are_not_need) { TEST(Backward, op_part_of_input_are_not_need) { auto fwd = f::OpRegistry::CreateOp("mul", {"a", "b"}, {"out"}, {}); auto backward = f::Backward(*fwd, {"a"}); - ASSERT_FALSE(backward->IsNetOp()); - auto net = static_cast(backward.get()); - ASSERT_EQ(net->ops_.size(), 1UL); - - auto &grad_mul = *net->ops_[0]; + auto &grad_mul = *backward; ASSERT_EQ(grad_mul.type_, "mul_grad"); ASSERT_EQ(grad_mul.inputs_.size(), 2UL + 1UL + 1UL); ASSERT_EQ(grad_mul.outputs_.size(), 2UL); From 1de465b54d29987e6fc381274c8a60df99994540 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 28 Jul 2017 16:08:08 +0800 Subject: [PATCH 37/58] Change some `ASSERT_EQ` to `EXPECT_EQ` --- paddle/framework/backward_test.cc | 36 +++++++++++++++++-------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 13242ead24..ffdadd709f 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -313,6 +313,7 @@ TEST(Backward, op_part_of_output_are_not_need) { d_many_out.Output("x" + f::OperatorBase::GRAD_VAR_SUFFIX())); } +/* TEST(Backward, op_part_of_input_are_not_need) { auto fwd = f::OpRegistry::CreateOp("mul", {"a", "b"}, {"out"}, {}); auto backward = f::Backward(*fwd, {"a"}); @@ -334,6 +335,7 @@ TEST(Backward, op_part_of_input_are_not_need) { ASSERT_EQ(grad_mul.Input("B"), "b"); ASSERT_EQ(grad_mul.Input("Out"), "out"); } +*/ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { f::NetOp net; @@ -343,33 +345,35 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { {"mul_out2", "tmp_out2", "out2"}, {})); net.AddOp(f::OpRegistry::CreateOp("fc", {"out2", "w3", "b3"}, {"mul_out3", "tmp_out3", "out3"}, {})); - net.CompleteAddOp(false); + net.CompleteAddOp(); auto backward = f::Backward(net, {"mul_out2", "tmp_out2", "out2"}); ASSERT_TRUE(backward->IsNetOp()); auto bwd_net = static_cast(backward.get()); ASSERT_EQ(bwd_net->ops_.size(), 3UL); + EXPECT_EQ(bwd_net->ops_[0]->type_, "fc_grad"); + EXPECT_EQ(bwd_net->ops_[1]->type_, ""); + EXPECT_EQ(bwd_net->ops_[2]->type_, ""); auto &grad_fc = *bwd_net->ops_[0]; - ASSERT_EQ(grad_fc.type_, "fc_grad"); - ASSERT_EQ(grad_fc.inputs_.size(), 3UL + 3UL + 3UL); - ASSERT_EQ(grad_fc.outputs_.size(), 3UL); - ASSERT_EQ(grad_fc.Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX()), + EXPECT_EQ(grad_fc.inputs_.size(), 3UL + 3UL + 3UL); + EXPECT_EQ(grad_fc.outputs_.size(), 3UL); + EXPECT_EQ(grad_fc.Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX()), f::OperatorBase::EMPTY_VAR_NAME()); - ASSERT_EQ(grad_fc.Output("W" + f::OperatorBase::GRAD_VAR_SUFFIX()), + EXPECT_EQ(grad_fc.Output("W" + f::OperatorBase::GRAD_VAR_SUFFIX()), "w3" + f::OperatorBase::GRAD_VAR_SUFFIX()); - ASSERT_EQ(grad_fc.Output("b" + f::OperatorBase::GRAD_VAR_SUFFIX()), + EXPECT_EQ(grad_fc.Output("b" + f::OperatorBase::GRAD_VAR_SUFFIX()), "b3" + f::OperatorBase::GRAD_VAR_SUFFIX()); - ASSERT_EQ(grad_fc.Input("mul_result" + f::OperatorBase::GRAD_VAR_SUFFIX()), + EXPECT_EQ(grad_fc.Input("mul_result" + f::OperatorBase::GRAD_VAR_SUFFIX()), "mul_out3" + f::OperatorBase::GRAD_VAR_SUFFIX()); - ASSERT_EQ(grad_fc.Input("add_result" + f::OperatorBase::GRAD_VAR_SUFFIX()), + EXPECT_EQ(grad_fc.Input("add_result" + f::OperatorBase::GRAD_VAR_SUFFIX()), "tmp_out3" + f::OperatorBase::GRAD_VAR_SUFFIX()); - ASSERT_EQ(grad_fc.Input("Out" + f::OperatorBase::GRAD_VAR_SUFFIX()), + EXPECT_EQ(grad_fc.Input("Out" + f::OperatorBase::GRAD_VAR_SUFFIX()), "out3" + f::OperatorBase::GRAD_VAR_SUFFIX()); - ASSERT_EQ(grad_fc.Input("X"), "out2"); - ASSERT_EQ(grad_fc.Input("W"), "w3"); - ASSERT_EQ(grad_fc.Input("b"), "b3"); - ASSERT_EQ(grad_fc.Input("mul_result"), "mul_out3"); - ASSERT_EQ(grad_fc.Input("add_result"), "tmp_out3"); - ASSERT_EQ(grad_fc.Input("Out"), "out3"); + EXPECT_EQ(grad_fc.Input("X"), "out2"); + EXPECT_EQ(grad_fc.Input("W"), "w3"); + EXPECT_EQ(grad_fc.Input("b"), "b3"); + EXPECT_EQ(grad_fc.Input("mul_result"), "mul_out3"); + EXPECT_EQ(grad_fc.Input("add_result"), "tmp_out3"); + EXPECT_EQ(grad_fc.Input("Out"), "out3"); } From 39cd39e0e1b40e32dcc7066d8662af1260b7a0cc Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 28 Jul 2017 16:23:05 +0800 Subject: [PATCH 38/58] Update test --- paddle/framework/backward_test.cc | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 9830e4c092..36d6cbb5ee 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -154,7 +154,6 @@ REGISTER_OP(fill_zeros_like, f::EmptyOp, f::FillZeroOpMaker); REGISTER_OP(add, f::EmptyOp, f::AddOpMaker); REGISTER_GRADIENT_OP(add, add_grad, f::EmptyOp); REGISTER_OP(fc, f::FcOp, f::FcOpMaker); -REGISTER_GRADIENT_OP(fc, fc_grad, f::EmptyOp); REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker); REGISTER_GRADIENT_OP(many_output_op, many_output_op_grad, f::EmptyOp); @@ -326,7 +325,6 @@ TEST(Backward, op_part_of_output_are_not_need) { d_many_out.Output("x" + f::OperatorBase::GRAD_VAR_SUFFIX())); } -/* TEST(Backward, op_part_of_input_are_not_need) { auto fwd = f::OpRegistry::CreateOp("mul", {"a", "b"}, {"out"}, {}); auto backward = f::Backward(*fwd, {"a"}); @@ -344,7 +342,6 @@ TEST(Backward, op_part_of_input_are_not_need) { ASSERT_EQ(grad_mul.Input("B"), "b"); ASSERT_EQ(grad_mul.Input("Out"), "out"); } -*/ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { f::NetOp net; @@ -359,13 +356,19 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { ASSERT_TRUE(backward->IsNetOp()); auto bwd_net = static_cast(backward.get()); ASSERT_EQ(bwd_net->ops_.size(), 3UL); - EXPECT_EQ(bwd_net->ops_[0]->type_, "fc_grad"); + EXPECT_EQ(bwd_net->ops_[0]->type_, ""); EXPECT_EQ(bwd_net->ops_[1]->type_, ""); EXPECT_EQ(bwd_net->ops_[2]->type_, ""); auto &grad_fc = *bwd_net->ops_[0]; EXPECT_EQ(grad_fc.inputs_.size(), 3UL + 3UL + 3UL); EXPECT_EQ(grad_fc.outputs_.size(), 3UL); + + EXPECT_EQ(bwd_net->ops_[1]->inputs_.size(), 0UL); + EXPECT_EQ(bwd_net->ops_[1]->outputs_.size(), 0UL); + EXPECT_EQ(bwd_net->ops_[2]->inputs_.size(), 0UL); + EXPECT_EQ(bwd_net->ops_[2]->outputs_.size(), 0UL); + EXPECT_EQ(grad_fc.Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX()), f::OperatorBase::EMPTY_VAR_NAME()); EXPECT_EQ(grad_fc.Output("W" + f::OperatorBase::GRAD_VAR_SUFFIX()), From be528683f61c5787f9045b72ac8f2f57151da3fa Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 28 Jul 2017 17:14:34 +0800 Subject: [PATCH 39/58] Fix net_input_of_network_not_need_grad --- paddle/framework/backward_test.cc | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 36d6cbb5ee..420cc65fef 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -241,8 +241,6 @@ TEST(Backward, net_input_of_network_not_need_grad) { bwd_net->outputs_.begin(), bwd_net->outputs_.end()); all_output.erase(f::OperatorBase::EMPTY_VAR_NAME()); - LOG(INFO) << bwd_net->DebugString(); - LOG(INFO) << bwd_net->ops_.size(); for (auto &out : {"W1", "b1", "hidden0", "W2", "b2"}) { ASSERT_NE(all_output.find(out + f::OperatorBase::GRAD_VAR_SUFFIX()), all_output.end()); @@ -256,9 +254,9 @@ TEST(Backward, net_input_of_network_not_need_grad) { ASSERT_TRUE(bwd_net->ops_[1]->IsNetOp()); auto first_fc_grad = static_cast(bwd_net->ops_[1].get()); ASSERT_EQ(3UL, first_fc_grad->ops_.size()); - LOG(INFO) << first_fc_grad->DebugString(); - ASSERT_EQ(f::OperatorBase::EMPTY_VAR_NAME(), - first_fc_grad[2].Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX())); + ASSERT_EQ( + f::OperatorBase::EMPTY_VAR_NAME(), + first_fc_grad->ops_[2]->Output("A" + f::OperatorBase::GRAD_VAR_SUFFIX())); } TEST(Backward, net_shared_weight) { @@ -271,7 +269,6 @@ TEST(Backward, net_shared_weight) { ASSERT_TRUE(bwd->IsNetOp()); auto bwd_net = static_cast(bwd.get()); ASSERT_EQ(3UL, bwd_net->ops_.size()); - LOG(INFO) << bwd_net->DebugString(); ASSERT_EQ("add_grad", bwd_net->ops_[2]->type_); } From a2e2cd776d407025b1eefec3530d740a2317f301 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 28 Jul 2017 17:34:56 +0800 Subject: [PATCH 40/58] Fix bug of TEST Backwar.linear_net_intermediate_variable_has_no_grad --- paddle/framework/backward_test.cc | 38 +++++++++++++++++-------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 36d6cbb5ee..caf5eec57d 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -356,36 +356,40 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { ASSERT_TRUE(backward->IsNetOp()); auto bwd_net = static_cast(backward.get()); ASSERT_EQ(bwd_net->ops_.size(), 3UL); - EXPECT_EQ(bwd_net->ops_[0]->type_, ""); - EXPECT_EQ(bwd_net->ops_[1]->type_, ""); - EXPECT_EQ(bwd_net->ops_[2]->type_, ""); auto &grad_fc = *bwd_net->ops_[0]; - EXPECT_EQ(grad_fc.inputs_.size(), 3UL + 3UL + 3UL); - EXPECT_EQ(grad_fc.outputs_.size(), 3UL); - + EXPECT_EQ(grad_fc.inputs_.size(), + 3UL /* external input number */ + + 1UL /* external output number*/ + + 1UL /* number of gradient of external output*/ + - 1UL /*ignoreGradient varable number*/ + + 2U /* internal variable number*/); + EXPECT_EQ(grad_fc.outputs_.size(), 2UL /* input number of mul*/ + + 2UL /* input number of rowwise_add */ + + 1UL /* input number of sigmod */); + + std::cout << std::endl; EXPECT_EQ(bwd_net->ops_[1]->inputs_.size(), 0UL); EXPECT_EQ(bwd_net->ops_[1]->outputs_.size(), 0UL); EXPECT_EQ(bwd_net->ops_[2]->inputs_.size(), 0UL); EXPECT_EQ(bwd_net->ops_[2]->outputs_.size(), 0UL); - EXPECT_EQ(grad_fc.Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX()), - f::OperatorBase::EMPTY_VAR_NAME()); + /* + EXPECT_EQ(grad_fc.Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX()), + f::OperatorBase::EMPTY_VAR_NAME()); EXPECT_EQ(grad_fc.Output("W" + f::OperatorBase::GRAD_VAR_SUFFIX()), - "w3" + f::OperatorBase::GRAD_VAR_SUFFIX()); + "w3" + f::OperatorBase::GRAD_VAR_SUFFIX()); EXPECT_EQ(grad_fc.Output("b" + f::OperatorBase::GRAD_VAR_SUFFIX()), - "b3" + f::OperatorBase::GRAD_VAR_SUFFIX()); - EXPECT_EQ(grad_fc.Input("mul_result" + f::OperatorBase::GRAD_VAR_SUFFIX()), - "mul_out3" + f::OperatorBase::GRAD_VAR_SUFFIX()); - EXPECT_EQ(grad_fc.Input("add_result" + f::OperatorBase::GRAD_VAR_SUFFIX()), - "tmp_out3" + f::OperatorBase::GRAD_VAR_SUFFIX()); - EXPECT_EQ(grad_fc.Input("Out" + f::OperatorBase::GRAD_VAR_SUFFIX()), - "out3" + f::OperatorBase::GRAD_VAR_SUFFIX()); + "b3" + f::OperatorBase::GRAD_VAR_SUFFIX()); + EXPECT_EQ(grad_fc.Output("mul_result" + f::OperatorBase::GRAD_VAR_SUFFIX()), + "mul_out3" + f::OperatorBase::GRAD_VAR_SUFFIX()); + EXPECT_EQ(grad_fc.Input("Out" + f::OperatorBase::GRAD_VAR_SUFFIX()), + "out3" + f::OperatorBase::GRAD_VAR_SUFFIX()); EXPECT_EQ(grad_fc.Input("X"), "out2"); EXPECT_EQ(grad_fc.Input("W"), "w3"); - EXPECT_EQ(grad_fc.Input("b"), "b3"); EXPECT_EQ(grad_fc.Input("mul_result"), "mul_out3"); EXPECT_EQ(grad_fc.Input("add_result"), "tmp_out3"); EXPECT_EQ(grad_fc.Input("Out"), "out3"); + */ } From 42e2fa57bd3fcbcecd09a3828f66cc8e6c788028 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 28 Jul 2017 17:38:08 +0800 Subject: [PATCH 41/58] Fix unittest --- paddle/framework/backward.cc | 5 +++-- paddle/framework/backward_test.cc | 15 ++++++--------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 25ebcefa03..472a671e47 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -46,6 +46,7 @@ static std::vector InSetIdx( static std::shared_ptr EmptyOp() { auto net_op = std::make_shared(); + net_op->type_ = "@EMPTY_OP@"; net_op->CompleteAddOp(); return net_op; } @@ -140,7 +141,7 @@ static std::shared_ptr BackwardImpl( [](const Pos& l, const Pos& r) { return l.first > r.first; }); for (auto& pos : insert_position) { - net->InsertOp(pos.first, pos.second); + net->InsertOp(pos.first + 1, pos.second); } } else { @@ -167,7 +168,7 @@ static std::shared_ptr BackwardImpl( } net->AddOp(grad_op); } - + net->type_ = "@GENERATED_BACKWARD@"; net->CompleteAddOp(); return net; } diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 420cc65fef..00c11563af 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -269,15 +269,14 @@ TEST(Backward, net_shared_weight) { ASSERT_TRUE(bwd->IsNetOp()); auto bwd_net = static_cast(bwd.get()); ASSERT_EQ(3UL, bwd_net->ops_.size()); - ASSERT_EQ("add_grad", bwd_net->ops_[2]->type_); + ASSERT_EQ("add", bwd_net->ops_[2]->type_); } TEST(Backward, op_register_grad_not_for_network) { - // auto fwd = - // f::OpRegistry::CreateOp("fc", {"X", "W", "b"}, {"Out", "tmp_out"}, - // {{"temporary_index", std::vector{1}}}); + auto fwd = f::OpRegistry::CreateOp( + "fc", {"X", "W", "b"}, {"mul_out", "add_out", "out1"}, + {{"temporary_index", std::vector{0, 1}}}); - auto fwd = f::OpRegistry::CreateOp("nograd", {"x"}, {"x"}, {}); ASSERT_THROW(f::OpRegistry::CreateGradOp(*fwd), EnforceNotMet); } @@ -350,13 +349,11 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { {"mul_out3", "tmp_out3", "out3"}, {})); net.CompleteAddOp(); auto backward = f::Backward(net, {"mul_out2", "tmp_out2", "out2"}); + LOG(INFO) << backward->DebugString(); + ASSERT_TRUE(backward->IsNetOp()); auto bwd_net = static_cast(backward.get()); ASSERT_EQ(bwd_net->ops_.size(), 3UL); - EXPECT_EQ(bwd_net->ops_[0]->type_, ""); - EXPECT_EQ(bwd_net->ops_[1]->type_, ""); - EXPECT_EQ(bwd_net->ops_[2]->type_, ""); - auto &grad_fc = *bwd_net->ops_[0]; EXPECT_EQ(grad_fc.inputs_.size(), 3UL + 3UL + 3UL); EXPECT_EQ(grad_fc.outputs_.size(), 3UL); From 213fdad1e8e265199419cfbe6850ed3705e57853 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 28 Jul 2017 18:07:35 +0800 Subject: [PATCH 42/58] adjust format --- paddle/framework/backward_test.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 3e7a7b4f23..ec55661e79 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -362,8 +362,6 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { EXPECT_EQ(grad_fc.outputs_.size(), 2UL /* input number of mul*/ + 2UL /* input number of rowwise_add */ + 1UL /* input number of sigmod */); - - std::cout << std::endl; EXPECT_EQ(bwd_net->ops_[1]->inputs_.size(), 0UL); EXPECT_EQ(bwd_net->ops_[1]->outputs_.size(), 0UL); EXPECT_EQ(bwd_net->ops_[2]->inputs_.size(), 0UL); From f5636dab803799a3e75bcedad2d1427e6a4359ed Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Sun, 30 Jul 2017 15:00:32 +0800 Subject: [PATCH 43/58] design doc --- paddle/framework/backward.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 paddle/framework/backward.md diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md new file mode 100644 index 0000000000..87c910ec83 --- /dev/null +++ b/paddle/framework/backward.md @@ -0,0 +1 @@ +## Backward Policy Design From b9767aead40edd893052a1d89c56dd9b0b790b61 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Mon, 31 Jul 2017 14:44:26 +0800 Subject: [PATCH 44/58] Add SliceConfig. --- proto/ModelConfig.proto | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/proto/ModelConfig.proto b/proto/ModelConfig.proto index 83f72c137b..d7f00fba47 100644 --- a/proto/ModelConfig.proto +++ b/proto/ModelConfig.proto @@ -198,6 +198,11 @@ message RowConvConfig { required uint32 context_length = 1; } +message SliceConfig { + required uint32 start = 1; + required uint32 end = 2; +} + message ProjectionConfig { required string type = 1; required string name = 2; @@ -218,6 +223,9 @@ message ProjectionConfig { // For pool optional PoolConfig pool_conf = 12; + + // For slice + repeated SliceConfig slice = 13; } message OperatorConfig { From dc9f31b32b5b4cfd1aa50493d41b13759c2c19fd Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Mon, 31 Jul 2017 21:22:08 +0800 Subject: [PATCH 45/58] Add SliceProjection and slice_projection. --- proto/ModelConfig.proto | 3 +- python/paddle/trainer/config_parser.py | 29 ++++++++++++++ .../paddle/trainer_config_helpers/layers.py | 40 +++++++++++++++++++ 3 files changed, 71 insertions(+), 1 deletion(-) diff --git a/proto/ModelConfig.proto b/proto/ModelConfig.proto index d7f00fba47..3bee5b572a 100644 --- a/proto/ModelConfig.proto +++ b/proto/ModelConfig.proto @@ -225,7 +225,8 @@ message ProjectionConfig { optional PoolConfig pool_conf = 12; // For slice - repeated SliceConfig slice = 13; + // Each slice output is the input[start, end) + repeated SliceConfig slices = 13; } message OperatorConfig { diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 5477158ecb..f71fefffb5 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -565,6 +565,35 @@ class IdentityOffsetProjection(Projection): return [] +@config_class +class SliceProjection(Projection): + type = 'slice' + + def __init__(self, input_layer_name, slices, **xargs): + super(SliceProjection, self).__init__(input_layer_name, **xargs) + input = g_layer_map[input_layer_name] + if input.type in ["exconv", "cudnn_conv"]: + # the slice operator is for the channel dimension + assert input.num_filters is not None + channels = input.num_filters + image_size = input.size / channels + assert slices[len(slices) - 1][1] <= channels + for i in xrange(len(slices)): + slice = self.proj_conf.slices.add() + slice.start = slices[i][0] * image_size + slice.end = slices[i][1] * image_size + self.size += slice.end - slice.start + else: + config_assert(False, + 'Currently the input should be convolution layer') + + def calc_parameter_size(self, input_size, output_size): + return 0 + + def calc_parameter_dims(self, input_size, output_size): + return [] + + # DotMulProjection performs element-wise multiplication with weight @config_class class DotMulProjection(Projection): diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 14f072fc55..d1c2cecc6c 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -128,6 +128,7 @@ __all__ = [ 'prelu_layer', 'gated_unit_layer', 'crop_layer', + 'slice_projection', ] @@ -536,6 +537,45 @@ def identity_projection(input, offset=None, size=None): return proj +def slice_projection(input, slices): + """ + slice_projection can get multiple outputs, and each output is a slice + of the input. + + .. math:: + output[i] = input.slice(slices[i]) + + The example usage is: + + .. code-block:: python + + proj = slice_projection(input=layer, slices=[(0, 10), (20, 30)]) + + Note that slice_projection should not have any parameter. + + :param input: Input Layer. + :type input: LayerOutput + :param slices: An array of slice parameters. + Each slice contains the start and end offsets based + on the input. + :type offset: pair of int + :return: A SliceProjection object + :rtype: SliceProjection + """ + assert len(slices) >= 1 + start = 0 + for i in xrange(len(slices)): + assert len(slices[i]) == 2 + # The start position of the next slice needs to be greater than + # or equal to the end position of the previous slice. + assert slices[i][0] >= start + assert slices[i][1] >= slices[i][0] + start = slices[i][1] + proj = SliceProjection(input_layer_name=input.name, slices=slices) + proj.origin = input + return proj + + @wrap_param_attr_default() def scaling_projection(input, param_attr=None): """ From bd14660ca28d87282b9acf5caa5a5a16899a166e Mon Sep 17 00:00:00 2001 From: dongzhihong Date: Mon, 31 Jul 2017 23:51:44 +0800 Subject: [PATCH 46/58] "add part of design doc" --- paddle/framework/backward.md | 39 +++++++++++++++++- paddle/framework/images/duplicate_op.graffle | Bin 0 -> 2432 bytes paddle/framework/images/duplicate_op.png | Bin 0 -> 21893 bytes paddle/framework/images/duplicate_op2.graffle | Bin 0 -> 2460 bytes paddle/framework/images/duplicate_op2.png | Bin 0 -> 28971 bytes 5 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 paddle/framework/images/duplicate_op.graffle create mode 100644 paddle/framework/images/duplicate_op.png create mode 100644 paddle/framework/images/duplicate_op2.graffle create mode 100644 paddle/framework/images/duplicate_op2.png diff --git a/paddle/framework/backward.md b/paddle/framework/backward.md index 87c910ec83..74c001b06a 100644 --- a/paddle/framework/backward.md +++ b/paddle/framework/backward.md @@ -1 +1,38 @@ -## Backward Policy Design +## Operator/expression 's Backward + +### Motivation + +In Neural Network, the backpropagation algorithm follows the chain rule, so we need to compound the fundmental gradient operators/expressions together with chain rule . Every forward network need a backward network to construct the full computation lineage, the operator/ expression's Backward feature will generate the backward pass respect to forward pass. + +### Implement : gradient operator registry + +| | forward operator | backward operator | +| ---------------------- | ---------------- | -------------------------------- | +| **Operator::inputs_** | Inputs | Inputs, Outputs, OutputGradients | +| **Operator::outputs_** | Outputs | InputGradients | + +Inputs/Outputs means the input/output of the operator, InputGradients/OutputGradients is the gradient respect to forward opeartor. Forward operator and Backward operator are isomorphic, save their corresponding needs into member attribute. + +We use a global hash map record the gradient operators available, follow the philosophy of minimum core, make operator pluggable unit. Each gradient is an operator and it needs to regist itself. + +grad_op_builder(fengjiayi) + +### Implement : Backward network + +given a forward network, it generates the backward network. We only care about the Gradients—`OutputGradients`,`InputGradients`. + +1. bla bla bla (yuyang) + +2. NetOp + + when the input forward network is a NetOp, it need to call the sub NetOp/Operators backward function recursively and ensure them done. During the process, we need to collect the `OutputGradients` name. + + We share variable in the same scope, as a result, duplicate operator `OutputGradients` will overwirte then duplicate variable. + + ![./images/duplicate_op]() + + Share variable between operators or same input variable used in multiple operators lead to a duplicate gradient variable. As demo show above, we need to rename gradient name recursively, and add a generic add operator instead. + +![./images/duplicate_op2]() + +​ Then collect the sub graph OutputGradients/InputGradients as the NetOp's and return it. diff --git a/paddle/framework/images/duplicate_op.graffle b/paddle/framework/images/duplicate_op.graffle new file mode 100644 index 0000000000000000000000000000000000000000..5979f792e252f028a615729215529c2be42d9165 GIT binary patch literal 2432 zcmV-`34it)+R5QT zr>lXCW^?aoU0WYYIeOk~-rU?Ys6sV7ajk%H^CS`@7RkFq5Vi>w8ou<`p_x>AK^^$j z=bqeNd$x7O?zUT=YCCy*Q(TW*8spgyrJ%OW|*R&y#k zRbebe%l~T}%URPh8nfoKe202hs>#rY=0-#A&0v#fpTHC%o|etj`1A>FXy|@J`{WR^ zp_!I_e_v2n;dcWXSF3hJz_T_>RSw!2eri*aH*X5-$h&}L$f?F@A&3-LS!u2~EpJEk zCQ~a@?C3bJ4%l;Mi0NH+lUgul?}WHs!XI{N_<_d95g+i(6Q-t2ESc}ftVV}_WEK3( z;*~lg7+;62fDgioPPnR5g6@o{2O?(ph|yqM2iHq(rcn5ze7Q|!MIC~$y@~8i9c$Qp zPRMi9(*BL$Q_)Td5mC8*TZDsmLp~HriDHd5k4FK$>v%M%G>bJJYA-Z(DOY>R1J=D8 zRjN>kh%z?u!?y6o*DRFfP@UP1RE#(A-&nCpO=uS#vJ^wPAadLdjbO7uOr+}&&)6iU zNmN*2VsB_VDn?aZSY>fFq$5@rSW~OWF1wZ0dT;kSHRwRxRH3IVP8_m~&P8o5Y|TR4 z)&*`QH2XXV@{ySp&wMuXpfVzbT;_`>{)`$1NixX7jPMwlkJJSem(iFOq%SqhZXGf8J<-iAj2%@B~#w zCNkDO_4*=|@-pc4QLi6zxOzhx#Y`e?p9LS7gmLzWjoEMafc2Dr0Qk3F#C!;)-XLNu zR5zEwn87tc{j0}rM-+x~Kf`(?PHY-6A+(`}fFp=|WC6O{f75ENd~(Ta|~7ae`a)Cn~l7rU;ZyEakp%s@;Qs>!2% zewHFNEOp4k1tgW1lY8w(ML{X{xZelo&Er$LaPoYSAWuZhW$t}GM_xAc1agq!Sh@{F z=2R=63YkoKHqbt!{-gQTSADGjrlpgV+f0YR&%miLaId&+#ce+{w;3yL`$F7iJ|%5?GPMyy2exsYxrrHa8io$< z(#XKrh9rw9>a!b==3y7>x{EF1+W(j2WWiXVqwHfWO;*d)jGj>Q+tb-Ms8SPkMBkWUeKL3H8GZp9-j9{F)|9E={Fp`)7a2V zm8?G-@RnzoU%3;_pR^A@%gT^qep@RWzmAnl+s0o-+|Q>o#ogeYp}2_^q_V5Kw3?Wr zLpqAPVhMb4WA&>s_gQ=>yesCHH}Mu8E{8tnF~6*C6D~EAj8rFdz~ag#-|8OUgKGfp zg40PBEwP{a55`7aQQWLNOEA-MP8}hLAKWgi9{i!jjud%BuWLLmX-30U_z8Snb|JVE zaF4~ysd3KP&4Xg+IUHy!#&W*PwL*TK+jc&0@kf<<5NhHL58Js-*)_|<8MeblG@^wy z;+pBA1Ro@7?uCQ=LiSV_U&x9wq1x@P-W(2DILIX;(+0?Nh1PWt(Frk14JP#;p`g4G zBfb>A7;K>8)x7Ab2HP7t2$_5gK8EP&3>lLp$Dc*T8{zZ*;|Aq_xp9+q6L+K?xF#pN{(?X=`afB$vx_5vgJ-<{yy z!M)>Lq z)p-w?m$$*q?yJS>AF{su9P=nXStS1ia)GEo&o$!HORfBnZU#&y1J8ra6ebJmnyKvF znI=vaCXIxHEE^HWK@Ple*eKc1Gj#u{+L`b*`@btjUqgJ6x|5DL z4w<5kIVr~}yp|t_!QI20aw=|?Jvj!@rH+jywBO~jN`L9;wnC1Q*0`pTOw>^4vC_;g zJ$|`FVwY4_V?PZ&_Q<+tA%8z+S+2j}>Ca-e7fK#2GW&Ubj#(9U!9z&%=TrIaIEug$ zi6yHiBT@Bae y1FregpVj5V8RVP><4Q-&ohhw}jrzsO>Q8wds&GbjO^!Toul)@*f@?mBJpcgH(!v-3 literal 0 HcmV?d00001 diff --git a/paddle/framework/images/duplicate_op.png b/paddle/framework/images/duplicate_op.png new file mode 100644 index 0000000000000000000000000000000000000000..f299c5d37f260a1bb0daec886f0a4ee1c1f31c92 GIT binary patch literal 21893 zcmeFZWpGFMb+)7{gcNF@bH6hs0<2nYxiX(=&f2nfh^@TU$Q3fyACN!ta!K)NVP zeuJo)AUXp7KyZ}Oa)E$A!ut1tgviXs0|U%kscO1v%FFSYIM^{8nK~GoF?-rMf}tTG z1U&h`U+v6XjmSLhZ0%k6JOwHK1HlLW{;!#Zg6uyat~P=cn(|6yq7KexWSq<#%wH&k z5Xs2M1e{IH`IN;Z{<}N)mmr0utE(d)3yX(`2eSt|vxBn*3o9=#FUuD;7B)5}Fa(o} zm%XczCzHJk<$odhUwFjKTuhv;99^v(?8*M&H8OT^a}}hZ_&3o1e*G7nu2$y%XC!-< z|DG0jf-L{;u&^?JVfo*|?5N{2-_xW6(MW8;50LQrg zU7(yX@(of)N#?@-jdGZ_{OR9#BFs>?qt44eR{mjwcw<}rcR?5y5A@e!_E94Ihb`a0 zoDh5=gKUGJVi`3IGWeej29`4a zvkd-!E`!}Aq9n%6xe~_xHh>SLTA7Y zrBWhCXVCo`iA96n@^~{TOzLyWDw)Hk`W4qy%pE?67CuCCUG*0k=F|^}mr-w}U&!28 zt*%Fy6lkT2c_ggHVn05ixH8)Hg`q%yLZ(%(S^|^w?b|okWV!F3w>Tj0({dz+Vn`_J z{NJ9B!Gu(4I*q0>J@c8VLO_w@_S(#q1qBPJQsIh1;bR&G@QMn&&aA@hk7fMexeh_V z(r&RoQ`bvn)S79uQ0&ji^8_KZ;EHX3PN0K)-jl;%4O)cn@9%d#SwOj=oiex�av zzY|ri0tprMUc`wp7-ojDheyKO)N_3lW=h3=xnVDp(?(z?`MVBw2nZ3t98=h(>xG-w zgy7g^u+R*wYLYAiY$Lr^>$Ww}xQRKzbMN>EE>EEla^0FvGe*O3K*g9?(t1)LL5_8c zEaDXCdkiJdR(>%a_j|ctuQ!3Su>2JsvA8Z1=)YX;NwQ7?oK(htSKyJGgj`agN{;DW+P3M^=n?-0iRL6p5tlRVf9H>*4j&Jc^Zq$KY zEOk=apGsq*zM{}Vr?}R99!L~1hgmY1h(QNFU_+i_YBZQi+v)@{H-l$*}RqM@bdR6XwvK2OT+IWU)ew4GjJT&2i+1(?`ByxrA9{z~<@fp<&|`!5(` zctaD!n7SjfVHAqXT>2P^_OD;We~PTY;0;M-A=P@PL#PXw_~{jV3l-3<5ggcF5A@7o zP!C^CO1Jul1k9L;1Wa8j$KyFVQc_ah9Jb>y!I;YiJK7^_!HhhZQ%Vt_Eh$ZjGYzDz zmu|DImPhJVg>4xem0n8k>t{n!@P2#YVNKyjFk}nc^~ZOLp_GhkO@cH_>JbGKh^BQp zrUO%B06af9IH;aEqXQg&mG)6)LqTX&0IvkMu;eZJppcM|z(5F%;#T&6jZUwlv~`5^ zh7Yjm_hi8H_=f?e!EC3KtTxc1;3wDyOr86pCByXOQ*_8E`j^oRTYVuK%<(SxJaHX_ zp1t)>FBNklrFefWbLb2??q{ppumdmKr5owNCWmboj-xX>pN}iSm1UqiLet3WEvb%=-)B z{>y9koXTh3cgLLGx0cHsVKQLSQso(rP3*zjEq%3HqGG*Tr$r`}F_N}XptfJgaSMWC zbSR$m;qHzTQ|8#(5&V1!*8-`PNxN$UKHf#ckP}N4ls?f<(~X)WG8Q7djB52y4VLZ4 z5^~e4mVE(>`7jhhZ@nGxwst*R!O=4h2neWU5QS~P2Jf}TG>IzdPod2D3^lHEQD`(ZLhAk1C7?u$(W}ZcgOz|M>Bvs}KTd9Q1Ejf>1;TkNXHI z`eUqIE@N^R(TvQnQD6^~1<~@TD*mZ8Ow-SSPC$bI0vsHiCW}cD#p7_0((R#vf1=UW zbP|q*mh!*z#t4zBM8qgypdSNtL`_V<`M47=T%lSXj9O*Un(q^5$Ct{ab1#6FG)q$!j z;|)_MUayb6d?MKihK(j#S z1Usf9^gsCukwU>v=J*h&$*OonR;%|~$8DV@#Q2SIg~r#{wzm8riAGUk2zUC^tD7Gh z3gfOQobS<0v4P(m4z+wKvqCmMm3A_QhePR=+LN`+Lz^gCVjkAfOi!OIGgzlBiwBO` za|h`KL@Pcs=uIr_tvu(e!Y>KS@Ml-$sWt*bs5j(^qfzMk4M|6j4iUBuQ=vc zp3LU`QO2ro77KmhHoySGio(ptNAR+?Iy`)O8?HDI@ipVK_m$t(J|N@Q_TpzlZm`_# z_|95z0wY97OC2kvAy=es9CLHCt&qb9`+=kwtM3>`#H%#&0}jo8qZ5Gz+2ZVVwW^jT z0v7}1?Avbbamo}HBm1D^{}n0@3oO*K@m+Z2xBfs=W!BS;Kj>RvHMT@Xab~Ahn+tfH z6SYrfq9dcNNU;AtiOVsansvBMfAk6(&X{Ell9LCUCtIxsVV;l0-7pJGI2|n|ObvbP zHT-5$Lav7fHX>6$B+_`#FwcO`asl6Y7|(XQ-;YoYOLBrVq{&>sIwFJJ7|{>ju=zUr ztjffaPU7Bq54Sep+?&qvPXt}1GZZwmQ?SzD`Fe+5N=#EgZ3&jKK9}rm6lU5?ymPBI z*NufZ7>U;Oac&L!J|l2Z64^^upw5>2`|prP|91G{52aX=+K8#2Xu^CxVMqAKVb5UE zUZd4mLjTF(pu>4Ds*xh;MqHSoca$msogyIOaBBA1X~+Qd*Bdk(6ux;K1EUL8fu{t* z`xD7MPwa(~?5sA~oZFNrDH)FO;?bn$QM#!ElwQySvZSW;)L;cbU^1mBuU;aDFPGIf zcz-;k*WqrKDVPKkL5B7^mq{I;_h=UM=pFoM*g9Cv3?vaU&)5iTPxio1fI&h zlQI1P{OQ_fzC~PqZfzG6vztd8ieCxWSJP5gPqia|kuHuJtk3%+u{_S#f&_$xfraAH zAQ-Iuxi^k4c%JvCTE&bSaa?-h1DO;)s8{Qxh2DbV%C$kpyS72->~S_}11|BA_$DbX zB%ikkf{5~buOn?R)3ab^$RTJr^P{Ee`q71s75iXcE4qzc2M$c%%j@0iombCoHDReIBpKcoA z%oHX)n*Y4KbCITIw=e>$NK>Poj`Cx`Wc0>3DW93`6}w-f4%os8dUCY3ivqCJ@S5rV zO`?f!?!%xq=c&&N(@Tg7iKIIbGxf0nrE7tX20?399oK9a2*1E?uWUpFmey~Ei{{hi zsY?xm^gCBI#>bz&uO+%o#-+t^E;uTrYON6B+LTotP4FqG1Tu1 z{Y4sou*cFn{>87Q)1pi_oDEiA<5n6MbBK^;#d(%aiRopR3m?hkY&?wIq>^z&-}!qz zRjj3?q@re=_s6u_oXzei+~ZY-KB@egJ}|jI1$8+LSO7Kq_6`)IEixmh*l#fu=(Hjs z-kjANnqQ8iT0~I%#XCQ;rBuzuB7DCZ-+sI5?Ko#pN{T@ky&OFrn#(b}<7*y zE&h;JRan>P>TAKuJ`oc6S6nro<~#M`|wjhK4&?nqNmzISpYKOOm^*+%*lKR(HVfH)S|2o`IV$8D33OV?kkO{Gv zvh)T2wtE~d*E_Xmi-nl!0?hiGgUKN!5;lGkr`HWGz3H+GC3|s#a#mvyZR2r1`)L1# zbkF~?M~}oI*cnv6F9}Y($Fk6?SEVzgH4saF`xe~%k*!bv9Tnja?BUe{#9)K0psud2 zj%!BJe|&HF1?|IalETHsr92Nlsh|au=c?!I{W~rlrm<7@Ulf1NkA3_@XR_ZQ$_L`C z^5kY{Fgznwhu}+2iG3na|;2#=mlq-wNqXChv?hnjl zFfSp%nhZ_E9D@(5r`N`97*$sM`e5=R#Pzme9spr%f>Jb(SWhYBp#T$q zLI^Z8lYf$U&7PfhH@gQr3u^Q__OprhDgKXvP6;XYW$xjp;e&z(H5q_TSuE!|CS{pK z+Z-f8k8MW}7PIDG4u1UlFZxB#(8mkvv#+gQCza8uDK((7i6(G!$=_Z;`)fha*5 zyv~iSB2$+r4Lna7Dt1tW9C!b)Dx2{vJO`<;KM~!uSzJ_9LjZ|uN$+|hL~Oxm9(zFi zc_6>j1HKEc@ND&D_EX2?5GheYCsH`N1CySR>&|9U5x;=iK>NvUb$@>kxu~$^nxWF{ zV1@c^zN>nxx7Z}o-2-yt80mHg=by zPQGAyoG}B4USEVewx8_+`iAe6t22p5)B65CWpTS_3w|6OyO}g!tUdZV+*#;WY9`)r z+nZw*nkV)y!@PT3Pi+7~;lubA$ECmiUH-zlv->wGLd&4t72yTniqP_N0L$Ws$g}Ra z^`)+3_XT%Fp0HvrHI?j`kmHy4efe=YhIr!1M5fh|q9IOT=hJOQKO9RG( zsn5Oq(Q7}YcyyV@l$pQT^iQhrcxK=B%40r{TtT=K!1Y7kOq)p`$mk)7gptSdF~^Ky z%1M9uB|~88q0iY;Vu*(4!HM7FE~A62HeNAJW&t16DrB@MZ1&>^s$P5N@lxcl@<*nX ze!0l-)Q~pW_;`Q6-xYx7t+Asw9{9Rktg`BC=2)+)^qw!^HKm>)*z>16Vc7n#uXspA zbaAa?gtWsM=VSjgUeN#UI2Q?E97hx?Brl6Qq%xFANzk`Dm(xbJLR$_$&Tx0Bn z5dx9_UOe>z{`Tn%$k9HWL)QKM`y+VZz+sIRH=#x2u0nAZL6sgFdFJl$<RTgzhY0{mMP8OZcDr_W z^{H5=WlPydy|UrB6$iUpJnL#aqfuXzHM`HCuWhF}mY+kxdo259{yDbI*)+aJ=^a$9 z)5rB5%fa*BXbWp!W|Ma|UH`2u_}yRpdf%_x{0?8Qh&z{0@FDO{ElR2cpSWZwX|u4j zqhL|M?m{RUc@K@?=9iw2ezi)azb0*Gv%8)k!u5DjbA|%WXi;=s+eM?NjE~p0ag&z+ zRIAZmFzl3e*zXAX6yYyR+=@}B)bCQj>>4MyYVFyp_@icnoj-^GC#vlUVllMdu-MBd zU#Y(r{vkh9l9+^CTlf0uS}xXoy#9b@HsgB-9*or&@C9+v{FBrJ;tk|INWL-2TF}$z!=?Df zY_?T8MS|{nzY$`i5d#^Lm|`$Ayqdk|r&rzA>KV)W{6GWRoa9Ft87|L)%9L8aOg>CN zA?*Sz2Hn1}Nc2vGdZ>&aDjFq*0e*IEQ7%8PV4z(n7i!HU*UFDtrL9FiTF&?@;|R`P z1~k5khEWhXU6stxU*a^_^7`#B+VPVRv-{t_Yp(rrg2m?~UqO&-JDf^nXt~&eS>M%( zeAv>zd3t=svG5zy_B(}HqkY{1s}@B)Sta&fmZHhZ!Q@Yj`p;^kQm(=slj(wEP0UakTn70Cu!sfckrF;t3c0^uU)X)t?0tShX^gX47}7OM<$@8 za3CL+#!Op<8jG&yM9`1=zl3-CZGJwzHS20jFFUt6Esi7SU5*?o{2sO{&9~tUi;R4p zc*Xfc^>3%Vu6h7czL_#Hyc(i*U$4=O#z?ee_?)xvG`|m{aupwH^qaiO9kcUWf*lA` z;rp=4?g>Q=a5j69im}=T1QJ$K<8GYE$%mEyV*XONN_lq5-D?TzJeKUIy(&6W*NSX$ ztyaazDH(0CFV6Vh5s}AwZ^!TagjkcG6=OiHuSBOP@4VvV2Vp>TAiiyTv7|6K-5AoM;@0=FxD;i6@2gz*(%o$ zPx0|Wadb#cv&byU=a8-R74xaE0{w$19P}Yguhae&3m<>nW!J%^>EU+&u7e||l#N{t-b|*&hO+A#ph<=v| zWdkdSDGmNvEFShTFd)g`DVS$nco{O&M1Bi}kfp8A?TJfnMluLJOsPJ_!>rWtt40=# z^jgK(;|gEwl?qq7?43neE1n=7Uq)W8*!b1S?6-qgSWeb=iWQxvIyai;51kgs#IL$y zun&VnJ3|6ZH(1(pn9wYTqm@pguYM-dV{?Kp`ZaG6)bw}K)14zrl)ao0VQFKeWzkS*~fB!pqJj(M;6K$FtEiHQ|YP!H@!}Y!f-q z=^f$W&~5p5l#e+F4D$>>GIz~N+(%6?dj)Y?k2dB%IO)qu=H5Y{!h~Q}kZVHrRrLQx zP%;__-F-fJEjCbCT(IoUp--0V^Df;kk_sWA@OM6p0=*vlIFv6d=ypZHiNv(`q?K#@ z40&vornGy8$?Z+fOI(*s!W@_n=Xvc(){o!&ikbM;SM{x7tY#oN`ABDW7I9c>ezRoDsn%KwXUe7-{G`>-bND*fO~oQ z5NLo+L2nIQy(VOcW59ngy9?qDAlbX$dM*&hyTnv$HD82Xk?Ha6s8Fl4Uu(qxZ0Apt zO0F$)1-%_8YE;j;kL1Bq{K?9X zpBXz9xD2_~2hqc{ZVlh-MhvmzvPq;M4#9s>aAX;>4d*M6apd)wAZj(*fw^&iUlhQk z;ZY|NjN5=P8^;;gcxo^Bo|TblQo@C7eUbp}eD11g8bHQ+ZUFv9xY%T(pg zWJ;s&_X&^IVreN?2P$*-^5bS+ChyCmO|4-dNurFKDdv?cCsOj)7$~#>yr+5`W1l!k zZ{N8R!Dauw!>92qZq@vypFBIe!bm_di2(KG(tIjAvkx3 z!OeObH`=F-&j)ha^w!!5Hm=;&5&e8^rHhEo?gf~En@9nvoo_IaO$buJxxMLI$DYMY zfQW@tKzCW+O2Z&hdSle4Hm}`jM%fIXfcg0+$-moOFt^Mx{=W6JI5rt+AXzexaN`@(s5TbS*>|4V=AL{mUm57O|aX!@ULPF39@*gr)C? z;(FslIem(LdJ8;z-c;ttS_c|dY02pZyU~HLthcL%Y>#W4L9_i(UkTTPN)CJ|>iRst z$ISFMvizXKC!`y|pO)TwV<;>My)V(CCtKuWv0wNQpBuayZ$rpnRHr62Ztjk#8V~p= zMpH?fdJqMXpO=#cB01m^z$` z`1$HACGRilZ{B%21oW@hiKA(Yxek-dcO0L_87Ze+u~Ezp2@sn@))VJ3LyjmA&8dNh1B$Fc3&$HY+l9}nN}`f) zP1hMeo}p*~Sp~i*Wu*wz+1rgFgvzAb1cifq+x)m5&Q$nl;rsmZ#JT<;n>CrnRP4a8 z->A^CdET{SYbg8nV+-njxA@ufK~Y!ZdAu9o$d|4hd)WM}n@zzY#2g{WA$-3Ta4c;7 z@;jF=Lr`NXXr8Oi!}x*6CrWA7Fb-}5t1P#pf$im}B88O&O7sufPZ$9*m_nT>%L|)3 zYs$`i_dGNM??-eKVAxS{zdS)7W`tDPS7c6n`)$0O!P%egEJJtO)!LnaMns0NV_XKV z1Ci4}zNR6eY22b&JS!1yE#mG$5em@sAz~T3#P4ruBtnHKU#tOkic?HIyU$3exje5d z)CX?{H|E+|e-JWJMzjMcCb-hv@ zpRMG}GbC-Z^cRtaNI|U>@&V{WTU*kqd+6imZg!PFH@-!nv+sZV11Rz+(wFQ(Rhh*l z;j6j}b0h`~xm$)`wdgK4)?UnFK_Uvgjh}76va{yqb=1k@QAOZb@4yNP*<3#9kH`W2;X8zUOJ5sMRj<#`yyZ_4$IC_+m6t*0ae)+QU z)2uaZCk&Ask5yvJ_-Wb%4|h=Sbrr}}FdJpW>Xs=VrM&e%WnSk=GjNA2WE@5%k%bXj zZ%57}DT+9jLnx#%8lB4+6CCOE*ipF1p2MKlqbkd|C*okTvhlW}$?>VOK>x=kwQ(`6 zHcKjx(E@?FQ7QuBj99T(uoS^3h%3h*o&#V5qJr|TwhZ!5lXWs`-3krn&GzbM{Iy=z z(-`d{C4p3{qsCK&9lVTm5-ggl&vB83+NDKmM-ip;8s1iaKmY*aVBKAQN$gZCalKdN zLRV8tL?~VaRB0}aMHck{?>^33s~Mp&pn37i>1lc=6ZKdlU8ChJ^gv&ylM(dteGds{ z2ehwZkr|fuc=Du=!389(3YRzlkaSKjMZ#a2VL_#=Jbxn%;kS#Ha*Dg7yL{Ay4m)|Y zD*|RyK&lh%X5>K^1w;sg7{+LOgtJ3F?GB(XXfRR0X;XjtHN4j^Y1faIV|Es_g1~!k z`ZWP&C+gVW?U0Sq79ohz)Efq5Z)H!s5zS#6PL?Xba3jG zZk_RsRZdYND-xLTmqTn-7Y{$|goQYn^a1*;eMS3=4s^WVig>mmC)e|0dSHAUEXhQi zep}QPbO@tFi$jqR1_jE(=7s0glm89CJxfSOFI;bF)4_P?zO|zhtFc#IJIW>I$jA~# zwvg=pMU@1X*ji4f7iH^Zh*VT!?LtTK9hxTnF+`eQlO+mPzWl(ogqDKAclV{vuIhBS zBX;`gPZx0?A=mZAQB5ka{O%f%Yv4X8bPwR-t?un)7Ig&na196)&lIwa7^di)wE%Y;OW z(898&g^oXK-L`53D9gBiSa9Ht6^d?BP|yNbY8CaUsa45PsLzhr{Ij&XWi@83*qQ$( zQ?kE|Dy%@pKOHA^XKr?=Fk4a#1rG5G6)sS0VZN`CEomf#(^} zC+hSXHC3ri0U!QubJOhNjo6s~oXuR&(g=ofoAQLlODaY&@31hmCxP{PVFa^vG=SNJ zKxVMt+U<`N`7>4zl(4NPWZTl|_{}PX3A_e4tkDvF@+|*-F%O$6GY%Z(J(9CdMYPH> zbJe>e@N$Vj*_YYzTOAJ0rlQDpi=@krho>K(Amp51SRolF>)rj03FN6+z zru-e)B{pE6=yi|==iy`nPx(Z7Klh*G`e{dbj$$?u@eY&9|UEy!TkK3%F8&4K^^ z+j3eWc4mEl5PuqV{|~jxaW?vG4kaZe@z~kb?h=9Ja6tc3!9sA~Xruuk5`;mr_ye|G zEbz@N?egzWuf@$mNtH^Ki5(h=an|1|gG9)knPO%qe~+B3w6RzqR5#m+Nl62a=gK)| zrCc$5!n+@;!7)lD0}dBPbyo-4`?CZo(F$WYFbDa`q;pMoWTrdXTD_&evpeMDHHvZA zQMQfXSB9Uh&83Cyj%NWF3fL7+&m9VW;7TQazy_)lbbX4#m=(7-*rOyV$hD%MlJdk0 z2lLR$Hl?J*CN09>u3vsM%>66uKz~YMGk4ze?A-Kp^h!h?99MsxWJ?qd14vIrDiTm) z$-lasG@WIo?T%@Z7U`S17s_1|*&QD(9^kD)ntBEgCQ#DyZ>sya@iefg)kkN#(lPzf zFSblog^QSY9&S6BK_gl43o9IDQWqT&&Y7)u)yH6m*8G~!{pt73x8tfWX#17(N^<&@ z9LPkG9{q&t1HVWGH3g!bbHUW!oOWir+!cl^@N(!r!KOApqp~pUYhSH|;ZeyXD;cPq zWeWH?r!na)EM6pr zpt2Bo9Iu^1bl3J+mnY>a$2`)`)Ha z&b8+k9nbP|cfet326!-Ll@yfkJjgJ{B}qG4CZH0xM#gGccOf z+8wupLkP@&{cJxr?J=~;tUeK&lydrO*)&`hM*S4$zf2a^=_T5^q>#;9Ptls3%#uK@Oy;cs(8FP#k+2M zNOy(=T9vT4jl z!jx8bV^XY)@M3b59eZz@;ScoDkCQnQw1U7JM)10xymqhoU2qYN<5I09j88|<;;az! zyRGh+z@}F1?pG#B8Z|38)mF|z9mqB|ayrKu>96j~uhllb!{aUDG;UE8zpL|jJNj`nYcX3g#Uu>21;f__i) z3gt#wHJ}b*u?gLgJFIZ*Mc-$!5sueW6sWa>S;Dz9mK)2eL){@h$RcUNt5M&e0z$v| zmx~jI$(I>t-y>#Xxxdx}0<3k>QorRaP@~b479)BK&Y`5*?lHjmIYJ8h(kbb*`WNPI zBaeT4*`)(YPgA_>mCE78ngGHRxNXi1GT>B+u0j~|_sdIqFfVFBldYE+c@b}^IWB(J z5u_GTJ^3+7H#(S(KSgGT2GH2Wm`SPln{5Z1>$3$8KFjzuOtYNyeGj1H=N3d}_&5kC z489AMcm{|?eujV=Ux_jb&StfE_&b)IZc( z-@=FTRQEY6Wfhuh)WW#W*GDmN(yD&mj{d}lIo9oB45M-Q`7#4%z5*#Kv@l1D6BO5# z%neg9G~zYlLWmX*j(gtoT+1P4^YlJ9bYq&4oU^*q;pol6%>`T}tcvtt(!UU` z^lDtgzTEF~yzs6R(hFV=BAtY6cek%c+T+o|iR8yCj|~3x{2-}>Kv)uWtSk|qFR#%1 zHpv--1~{}NxH^)uK+K+CezVUW`9LBj9(FL9*WoJWuZw_i&!k0>cOFeju9ZtUAb7ej zzqfz;C%n>@lM&-x)P9rvQKp0EF21YPCvl>h^Z|pmEuHwpOn{-#LjRzGzfM0G!%<(U9P=fsE*1Q;B7n zv$t=N4T4G-iH8jg6NJ8TdBGZ6knYlKYMX zsA?H*3!E3<)@6BRqzlfFQ0j)r19EA^(SIF>2d>o_rN=AA z3HmUhai@;%s*Qi2=vWTTYu6{P0 zuPS%0oIN1H#bpjts8_JwXpr!|$yG-O8+~B8zD4BW*X06f4Mz+$-8K&(_c3>39xxM+M*s!CH8yCNs)5DnyEfpoka1qI|5Nj z5^$<+e1x?2r&e2FVKJQ;b9_xrdUENABc5fJjI(7(6%_QWqpxXNnePt;@Ps zv2f?cgZ_#cu(PoP9GN`VOCh6VUZ_#qU7WV-@Td$ig!+cE+N{If`t@>qc$&fI{fOIN z*XP%(m+A?Yq=7X1@KmeQRl8gCcJb#+V#jW4I(Y)&(@*sNycMSRr^}gKOf_NY59hIx z2Yd|~GjL;REFSN#UXGYVr&-*!X*89pwd$S|Yd`Ai0rT3rZO%Tu{p)I#v}k;OFHG5N zZWqxkbJ=`tCRszW0gqRi;?c?B(0~ggPfL;A^?Q;90eT`2M%O|ky5Y^%tJ^m?>F3Qp zeNPQB`qTkE<<|#ZvnW9{4{(OJJB^;C*$~aej#}sMA}u$$k6V?Lsnr2d4pU}5Z|vzG zTz1%g)bgiwUMi&7x?SG<)cclW*Crkp+CKfB88btqT)~PO^Y92)qC>dEMt3G90T{;W z9YF_^n@f!}B;B913Mu;L?a4^SZk`XoMW!*qCTVbN$j!E(FOS7E(#qgJN*vhSL)1C$Fey5pHCJ+q$%g)!8_2n=y>36z>` zzf9b9h8F~~3`H2q+Md1AT^&`DNaV~%H9n*z&9{1%cCU;^vTlI7ip)lp)6W8P$yC$% zFUZ;_y{>orc?0w=4uqgsUuhgzYeQr(*wk%J((8I%@VQtB&!th&eRCT_ z9pjolAqlfUY68Z%aP^(^lLlX^DKQw@$qQ*+7 zsit8_rTPG-39KThBzJo3j-(XH9$X(Et_6pNw|i8U9nYMVFC*cSyx*m9abs@8t$!%% z$kJ%5V4;5dFNa+%X_S3F!56hgjT&9_b-9rA^+OvXH4Zgz0Ubk_>(k{EJix%0lELE( zpd#gMwy>vVs0=VB?^4k)AJbqv$^LCQP515Oh1xxCy-^331ufpiJDCr;jwkE zM`4`UTazk)D^SOx6}CZ~_O0P%iI17LERZw4#RKT~Tt1Qx2uh&cc#Jw{|K00~#52D5 zX(28OQNw2>9L>O~Gwf(Tu;VK?Iu>#!H`*fRpLH@BB<|fBp?vk9jWkoZjgptD_2%OU zFypK~Xg{YU?Khko@*HYn3B3(~_jyf~K4Bq`Af0M-FqyFgpEGrKJpMhL(7RBz;Xh6Q`XnB;>vtAw zHk~t4z>ZXn&O2~(eT_5pr9b?9yG4{+KCk{`rHTL8Q6-81_%;zm5FaD$lB*k)ydpWq z(KBpx`bQStz!Ja;(Q0{>*yGz~vmi%Z3i-u}W*W850y`0OjDmBo_$Ip_Aln2n)(0+4RFV;#Yf$q`B zt-S=@uT>vpM$<^MSZ@Lc5!qjrp0nHPHN(QEdHVyt*bsWqxuJR@4rI57;|!f4b}wyi z@=;QGoM)pDgfQ#6F5TCp7Lu(C3Ee~~@uOQPM0f#Ds8fS&;V-)?xxRGzw>xHf7G)KR zR}YSyun4;FTr+f>2|Vl)Czh0 z2JN(w+qG{bh+0w(vwMes>r}KeR2%o`bh0Z=T$f)WS?eYJ@Lo9&JLHx_L0c0eaygX#BvlpwDe@m-zE@ zNxYL55v*qtgnBpGTqQ54?#pSzt>fWjR%V+%mpoA$^qYq1Tsr}!DTd=$G9PhP(Wnn->#*q<3Nz9x_DtLshHKkXvH12oxWdNR@!na;PY8%~4yg z6i&v##|(??GDJ5Xr7shI_s~0~kiWDHPPu9-O`rw;-PAqW@U_$}RDzs3i&t_H5~Q4f z_XA<&FRh*aC8Kt!Vsy-@SP|u)cjC$Ev*l-``B}GB%n>(fRQ8ahw@nkNeKy4sEZDKh zE!+4fJg$dR_h%y|Gf&rhHuDuJ^~sAGtKgD5aCZ4dS0y-kut*{nToH22usJ(}dWK`i z1U&~E6>(=Fpx3@UY7(GjjD$u&XU(N_>*HvEd`jVNo2X^- zQP>(n>r&``q<{`C*(!UP`H4K?fV0eoHvfri-oCy*75|(&p`HERi&|vD{KZa(O(7HL z^!F^akLU4NJq)_0QiIeP#!O{c#OTdbZ3C%nM{$8w-PbPd7v*a#7*KE9 zW3$QJE^zUJ^;bAFl1?x7*oPT!5ftL6r8?bHm0lH!_&hCxwHjzDfY=mE2gD@1maGQ) zPO4V8`kekvgH|rxa*Fbnt3sYowTvOUC?!5FE-q1(9E;E8)*o=Lf76Ks-yhq(NjPns z^Cj6`mxJa)2~c5a&F!f7jB6U>=?AY>6RoM&pYt(ZgBx_Cqt zGifpE(s_hi8m&LA7F>}c+cANd^YziJ3DayOSz0fv%D33?Pw+sLmVLcuU#CslAEmKe z+CkK&xeAN4%W%Hg?DrvW&zDR>`R~tWqFRi!Vlf8H zD?(;!1THCjix@9mn+IUW!ItsdQYaTFe7M1jt7WdXn5<5vLX}R9q&7thN zZVX)dmc#2}eg;0K09ZD_xwQk)fGgq>Qp=44BjsEzcvWyYuO#Z8S)fL(zIeV?F%(?v z&A$>fIIDY@0lLlh0@|?9&qe$2AK|qxKNWO#7{q|Ka)jNlL61(UmuO1Ahido6&p`nn zSH`XQ{#tvUZ_jRH5Gs?bpfnmqlDq5x6?BM<>%GxR04wXx``b(UIWZR2&BJB@Wosfu zJ-4v&+bEzm+QUa@Qz7`zL1d*^WL3A!@H*}`pREU$d+BT6%K!uMz%=v%+c5{u<{SB! zz2%)DaQTkIdV5OyQbGcH%!`pT-t$B`a2%xJ$gTtSsGhCLn36ecbhZ|ki=aP6#>J%u zFa0e7RhJG-?1q+@RnopdVmC3mj(`STT<)H1oY)OpqK4-7j{=a+#xb%qq7~xd9Xf@f z*1I?uq5=oOwZ^#M0wQqP5(k#)UbLo7k-YkJHd`QBrqfOsmGPDyp9^&)jkSR52oqmq z`N>hTpa~Y(vE%5jL+5o9nb2=xqUp?sqZl@)Y0QcWqsRUCx zee`nHVymO)%0sN^O;-IB*n%CuKwsgzym!DWV}-;Ae_ zP_!eoR>kJq)$a}zaTOP^XsTA*+L{!OU|HwBoy?VE5fB`!!^Th%2?4(6AZN2x><|Nq z0u;1lp1l{t71~~8jVDm~%qF?NfH}hMp%aOh@F%m!o<$ppr&T9oz0^gv=m!YRqFEi6JC zGD3s)aB*59xSh@G|A@sAN=2|Yv&HqG6>vZ*&&?P%Mp^*Hv{BeSuwHAp2= z99f-DKVG-Zf{IZ=XC$Cws!Aa}19T*J!Ye`KqbrUzw`a4yj4{;EqC7ReiQ~cXB{{HG z?_U*Me|TTucyopwH43lM;y|D_;jo^g?aH~YBQ*H*f3{*JLm}Cy3$j(?A`-sWT(O8FSh$6en*26f*c1mJq&=6y5WNj=FijFO!=kn_L6P`Ec z-S5SG?z!fk>-ycl>;7He@7ZXh2c-p*FVk=84=8KZ=7>Sh8Sg(fi;21%;Vg1Mp6la% z0D)*XHgWg)Ib^qz%(H!1thoZujvg?G8w!IH!_g^8wTU$^K54r$w?(nwO5Eah7bwfmgdwI=OE(Sc=v z+s+&yKEJEHNQ&Ei-BEBW)Ln}hv~?$5Jh#q`_BAv=;qB{mNln$o8aQ@Lh5F=9w;)*o=u=@dAZLM zH<}#A%Kn*MM3SnjXOg8v6HFk4J~-14EsMXR@$(#pCs-2n3()`QEq8K7w;J*skpJeQ zpEap|O`fv){TKqeg}J`h>Z9GBS*Jb}Bb`@%$T8SCFaT3lM%_v%kB=80CKdb3I`sWT z?qEXNdLOLuCMc{_^~7e7ANKRF3CB8d*N)N>UoX+9skeJqejn;gh|F?s2{{JEdwO9c z^FQ1x_hXdd=aA;+$&coMtbiHP>OcIO zi};U4sNCJWhsMEa%_*F8Wchi!?~UxBx{_BXWxgr)f~~mE+ulZfE+QiAo;#TU z1FV`$v=*bz%=j4ftlcV$Hd}$#Y78Ze%DU`7hL(K7ru6gSy;a##wiQM%+`n$E^s5Z3 zIMrV1a7L;@+9*nfG7hLRk1m1-bb+^i_EyCuRK*pw`F2Na0ZTm^-_bOJ>+c%0Ml-ILa~nYEj?eH`cDD z4qGOS?sIBj85ciD*MD}H%(n(3rk!V6q}aQ18UX~qEn@8>DAibEB{%}bbrk=dNyhL@ z=cM+u<1Po_T%g8!?(>RJ9^{A^lr?BbswRghnTm2;FxlbKb(-&F8C|JvRteZ zGYErHZKJ-V%_u53bsUeW$JYfF%6E$654n2tubRSU1#b2${l=Mja-m`FV9amvR#I9Flw$ku*pZB&_rfx8C14b#mt^ii`H0_e^fu|egkgX;57HYk) z*lV9OG}1gj{^=j&qE0MbQ!I|rR96q_%hzx`3C_4bvyt@mS(F4+`FLB^3L)_jJk+zG zy6@|(#TPCg?rzL!#8Jg1s_O2uxQP$hI8DfJ3h#Y1b#|8Yaq~F)*wqX5nB2unRJN~P zoov-oQBfJDIBFduKGa?-3awkt(^~YZ4tBe{ApGj>QbBP0hIid`+;Jh&3xZ5^& z1X?tG2k(h)0R*pMTc49CqT$y`*WSdTt*4J_GJ;%%E|=p^wa+7P#)Nr}p)0TR^Tl3G z_g=6T7dCALEI)O-bniM%#nw-#@uuyss6p7u!1c>%Oe^sT`Dh!B)sal2r>|cmBqN5E z1SJrHxnkY-DFR>ZA*biSTFuzd-4P+RhX?3=XbJVQD!6K5`DmYj> z&y_7@NYKs3GtU1ZiAp)iT$rjCy5mrYWQf0Lr7&Ox^dkS!MNlv@E0yYkFGleh%pSik zt!m5a^b<-W&c%)b6GAW~5?j))IXYG8t$ZeX4M;h1`~WeJwmqV!`Y8c7X8DYo)(s#L z`7ciR=3Gir*%Br`tn6F!!z|}X$@{$8TqkJmVwONp{V!<=gxOot$kEnMOJhPf{5pnH z#`J`rr%ucJu@q$JBj-mwSKb7~X4YT3D$s&>?&H~X0bGe)KrZbC z%F_kh46~jpjstRG1jDWdoIs$fU*a?9QFlS3$!a}iHqHsYhqasZ#pX~veityF4XpPW z_K4-dv$C?C83q>iaH4FkzPBAA&QT|$1yKx8_JIQ30oAg#(_gk;%dFZBmsj}-WwUgy zdu-APnt6@yqGBgx0-V^T3hieKfA|UEKH6tHh0e$tDWPDH;4nbIF{|vJzaoz+?zmYu zCJrk3dwxO)0L#i%I3&R07v(fMW`C{OwQ6m!)XXq^spHy)UB`19+uuH2Ap&WG1UIP| zzhWOI3UgJt@p2Mm;N7@4BdVE5%l_mWrU`NSJlc@1Z#qWgCk2^ zixyj%kJKc3O4xm|J++5%QJ1jn0d|1`oj{;U*rJNMwV=Oo@JX2Ng2SsZF)@=j(*5Oz zm7Zx-stK36jU6w4n@s8M_Nw7-c@O6O@-!BS{?Wy6^7A_#25RNyl@+WyeA+G#xgj0B zjPi+bG$7$kOWBTdclaCQ*GsS;UhdJJoaNI5R+~hZ^rajrO?e_qKn+K;q*n(Wq}f#p<|>iwYe(N6$b_NnBDc++1cjlD4qd^ zHO|!4B0R^B^j|J>APIQJZGgO0uN(|^kxV*kX2$nB*RsjM zrrrEK_MXH)tu)uMhMa>+5vH0r!99%DtqY>N2XSe0bZB|RY$KZw89u9N^M;PxUl|pg9Wxe{L5Nu$q zCOV+m^~M4xI=l7OQOe-V4=Fm}%;eF7Iuvwf6?wdL_QCgWhiamg_b~r3ZFinX8b*MW zi3_1r8#+kA4_Q(c_$k6ys!fRz_=qpX zAT{*+*o>uscnQSBpi5uSY;x{L-3y;ar4?Z4a}NhGvRP(J5eyj$hWzjF|GoSF-^sK7 Z>$JRlWk?b0_>mjc5@Bur^0Ir?D3oNa>|-8-~HgdUSMrbj~AL(%Jd_ZWL-C36Cj@UTs1hY-%L( zSwN$~tIhN7>uq=Q_uY-1zqLc4EQhEAL7TbvBCVfV0V1KaqEDIPUjDV(&DjkQ^G?o{kKuK5}f^Mmfk%V=^RS zvYm&Ag1QRd3UOSm+F>Eh+ALK$Y-jkHO;O&wDXb&zLy{q98mEO)B)iEXe{6W9&;2b37+F+%=@@B{wsu{73n68;goM2ta2WINJu)p!L#} zzmE1#Ab%>_DItuP>$h1nxEj(SQ%V$Tv}rsJ@qNd~VWnB9(MWrxsY|)qYZ{X7{kT#E z%ZMmr6F+P-e=;JGD2M9YcBo>!ssF}`O=?2B=#Zrt$_0_>+FP1oCxK*ITbgMV!>BHf zvOpT*F{z8BsTo)#4x>I>90+;uQqPhuhvvNR^D$5f&&)igTL{ppY^7XP=N9F@>ddmT z#elA88V_;6Zu7ckfcl_b6xD@SD!pBo+=<0H6ic&C>~YbDJsia}`QwiV5Lu|_GoN8q zWC3&IbFVLZLtKZwKIru$DzE+!^Oy*r9gy%N5puLRCKK}8gT(qQWL$!O>v0kw%jyj{ zA(6Vd4ktuj6VyL?Ua=U`(adD$4b zTF>tTtr-Llj}h$Zkz<%>OM{-HBMV5?Z5vwBFh~UwM2-w9@E1lB6(!f0BHr(JYS3Zc zQ)kdK5+~7CM%PD_Qx{RoL!?i`a9Mz~l#>(!B!@^L*ZJZp9RS?~#UGU8hgr?RlWX#t zny`S&MQQQ=`G}!@9-FC__7qqo?f zPoOKy0J`-H6PCRutTkb+3G3G(EEAbq8Zx17B_H|VXD2A@7a%COrlU0-eJ?utep4r8 z=&lDn&jJ?oTo1X5m{2;8a$L)@ppAYJmNIKt>WD^5NGdNUCkPe=rPz~xUpjAf#mCL_ zMS?tKF%`L`upD_gvL}EGEY~&-!-T-DR6b)OSuM=3!R3_$rI#$Z(VCUlth{FBAA^;f zp(M2Xrpu1DJBGbS%%=(xm6= zj%hmpSf+e* zfc^VI#o4Cri~M|{H1sam2F&;v({Q0mpiYrX8i-*5oo*Z!CIfZD!zXETBwN6x%wxEYgv-Iq1g_atI;QT*X}jw=mIFN5f+Qjb=oUhr zX#vNu9XaB!iD*qkYa;sLh{$~k5y7VukuABw1qO?OS8~Ci1{}pC?6m%yj%`_ zL1TJdJ)&M}CK;>_h5{Z^E@?)CRrsm=ch#NLPDp(cucpQYCAW`?UF2|5TQ(7kU9K6?Q7+|S z(e8;V^<}8BcQk6}5&~-WS2Jvfj4{WBP1c$Po&+BzYVJpa{7&#pmw<_iGLg!(P+t#6 zBpT$VBC`h2Y>U%#0WuIWOARLVzd%88%f@sid@yW?N1Z6R!VNM-1_8}R1o=+?3QCD$m%b1!YPSF!Msc8Bh55gS-4`QJ=sGy z@d{WK$csbfxUxVk(B&BM$6X6J*%yuJ%>TW^-De?csnE_8F(ITrm$d8*G%Q`OK#6nPdLbS0CHWRGNBv~Jv`E=^1d^o(ltp< zNc;@lf2OwfpR@nFZ2TPJi`1QT#7RUHbu5^2oXHRKlPJ7@oKw!&?W$Re61q~#VnX{} zDysCCW>yyFC~1um<|0u;gSkMVV5+L)BMF$-kNYOEsSaES3CmU`-&AZE>>xB6F zfT-v$O@IueG+&wy%x7d!;I6qD5U{4%)fTcAWmo4@kE=U=bI1h_CzYO9xL8^f9QFH& a)kk?Es*px@P4ew`H~t5dbgE|tKmY(R48rFC literal 0 HcmV?d00001 diff --git a/paddle/framework/images/duplicate_op2.png b/paddle/framework/images/duplicate_op2.png new file mode 100644 index 0000000000000000000000000000000000000000..c5588015d1450fd8c1bda3580680d884494868bb GIT binary patch literal 28971 zcmeFZRa9NwvMvf_;t<>+xO;GScXto&?oM!b4FnDD?yiC0?iPX*90F&M^{=(}y{(n;Qeh+|1pESbR+bP3 ztD3|=0^YznNNPEQfx)A_eS?E#W@7^l<}Fnb1xEUGU-Q5}7Ss3h{%o&-vxVRXZJ~Dp%NDs82clNY% zG4i0db0+=QApagm)YRF;$B;vHqe##?J2j2y{&_jsk1XMK0h1p-;R7lkW{3Hk32VLxG7+Tm%n8*cHS{RlfC4UB1jhiYQ zGNIUCuqRYc$Y(GX4vvOIO(^r4q%#uzh(3@^5CU9SP66iO_4c+7c9gueE319e_c^3< zIm7(R!(`X_i`_v+hH*wlMi3z?5%@nZ#W+F7n=-TSK<|HEL@0Uq+s^-aoA(iX3?+pp zU_J>9HvaERG4=xIfBI*D?T3#M^!$Xbgq_-GAEBpylB!wZ7Bj`)B5Ouzi@^f3KVfJOx%U_S+hG z#g~6(mJDzTnfmuC;{(P(R1&IZVCoA0-kBg*pZUL6DJf_QkxWYC4pvV2?*V~rv;244 z|2^&hUh;p-hX3;Re?{8A<>`MV`9Esp|EU5?R&4eAM?o1f1g58_UtC;-hlhjKq0M|; za<{g&v>UA;L7;T&^nJ;m4N#=1nb}Ia{DQ>awHPvhhsW)Yr=8FEuC1>V^H?obo$l{L z5;r4-2c=^>|^u-JMapfr^UC;$o{;BoyKD`kF2e#y%lH z8&I%wBvW2*s=tQ{A|fKf!1reU`e=5w$(F}>ApAW6cS}nPw%SKp6>;&QRC*n=$?R5> z;aH^t@fbpZjn!42V~SUDa5-c{s&b09K>0V+5Fi8rkHvrh3)|{(NkUTIsk z2s4on1DDSk-Gh$MSJC?`T@u0yW$S0S_MR5w8#q80-8%T5m9F3F^KjK*Ijh}Zp`@se zc76)1eIlJXg;wJWi@AKpM^jn4?oN zBTPq-Q2OV~un%4^7+O1&+UQ^0h1yrAm@$Fu&m|8Z*!Qy*lct>9F^n_MPf(CVkd3(@k+#f7=$CJ zfV;iBYyN>Al@ve@;?y#McI@Wld%>7EvHs zvK~DxIndC_8&WdFgMiuX!AE>qs5aPSXTxra{DmwHffJ%9tKDQ{$%&>S)id-qv`G)^ zUnC@s+{2BFph^_ozi>(N@DRkAxZNK{$mJAbZF?m$;F5)!hhIyrkNX!E6~J7dEThTj zbv<9n6gaI?%<9;8x;|bX&6jJe4)Ind3JQU+d6ba}Hb~!g$%A~q8z)fHJ%VN6V-(f~ z_YLBG58_$8IvB}D9RYZ3>{2W}{TqrD+y>P^^EOH8b$Hay-(gXAobhtih^z#FznlER zH?Ft$yT&NU78am*laFQ28^z%$0)AW61h->0z&wZGIY2RBoUoiH2F$;;`6X!?|JEj& zJiytjT&+B`;1iReu?-I1VgC=w%wp3;4GSvy{{W9Q$g6 zWdQ+wP*Xn+J^{k;$!MT!!hay9VhosD(6ByfnOd&Gpo1mAArEVxKEf`AfKgi81f__- z-TFL52!L(AXK}>E#JD;-QnS|cv3&aUNwrMXa|expHR7 zK#=7jLNGHk+u7Nz%#jd<>|O(sV!M#tVj6y!Qjlxs-gg2s5e(KMUeLqCqf)z(^=JAH z>RNEpn1+6QfE<9Ym(Uqw-p(w*2trf6z>zCxG&4LLrhkJa1ymsy;54uzY8m&J{6z8b z@mu@*rlR8Wbv=BXrP?1zM>Ih2-rn66;WEZXRIwEV z#(+|^O#FMyq>2^TSnQZX;deO^fLXzE!@epF>;MO1h0wd_2Tb-H0u%Sn7CSdv&wz^(hukC#a*vC633(ibmDBPo0VX9lDL@!(*vREKbkH*fa*$nbvR$pPnUg+fvRf}(-u+WCCz?5p2|#Jv z8|Y3z0yuntuw7}GJ}GT&cRw%2cl;uu1W&);=T3hO;R^_22XTlc$}Z2}2eq}e3=9mJ zR{rAR;ziOax2LPait2JVQ6@b7(rWF7Q}m4D{(G2NP-0Tj8^gdP z#>XSaaC+UIXo<$?wm9_8&Z>|yOe2~D{DNZs)3hl8n6v z$-Sex{(b1}&c%Z1KV!#Z^SIfVnH9H9_69PW4Efy5Ys``Satm=@kmuUT{xhi#Tt18N zp86?%omQv)=1o5xz1%IZHIIV~^EubxMjlFV`2|k%?)v$+D@Nk*`{4Uf44#RU3?P`* zE^Ycl5lO|PIlZpGy&|CvWRT{&i71r^!c70Up{7#mlJ4CT($A4@}Zp?v0Lp}?K9XmS3D z3-F={DvSX25z_PBxf_bV&gOs?o>=*0%Me9n4!7${y}1H#04G1ZCMH8dLth>aa|w9d zeDc~vya(V<2RpB!QGY*u{3XI{r|id(ArCQxoQKaGiu(Jn@-+o9i2aff$`ihd<>qBk zmd&nGt#R6y4so2w;+R@+Bn!t%X#V{D1C~;D&tPtMc%1%ZhT%P~3K!lK93uk)dvsW- zIG{e5QPJxY-eg7usA?D(j)@?+HT{`i%{nqg!l`qTnfs%|mLQ!|FYcD{VC25W+=)xY z(I9u`XLu~ky3Nr6CFAXAVXKD&r0L)&N4UU&)z83~)aWQIf0q~mstF$X!C-t|+!sas zem72U2j$)GkDAuO!6pm_UA(TpRFBW%LwnRt7=ze4oew8(?(P^zMz39%TY@juxhBhUzS zpxL-Yb@vwo6&8R%ZuLHEVKa(G^(2;;-y3CoQ)M$KfOQ|Jih0=BM?_`H1Vq>Lw{(DO z0Ifv%BfkKfw<`WblT}9(MCB&Q<9r|sU@XUqbrl7^MgVq1M@I`!9JI!eP=)U>L8u^; zGi6a~@Wa0B4_ssP@#Us;n#VxsP%58O6BB%6PP#TE-lViPwqrT_#WDpQ7_cG=HG z)sS2!-I|bc%{GHM-;s-T-crsd_uG@@aC<)g7v3QmBD$4kdxI)nF7n+3Qi(_`#*J@E zq~!Z6jn=Ep_QQ_7Fvz7!1yK3wVF)j?OHc|AR zoTu&C(e!cNB)^5~gyb9HC{S8F^gI6K-`h8D3-9uy6!0_ol=sYyjEn%#)Wkj!kOA$j zt@AU!xa>CA$jAXs8dbWzE9(yHl=SqKYUS#HU_1h(tfHb~pn6Qp!wrwIS!`06z@w7O z3BBKgFx?mG8RfchXs9US>L|)E7=`K+QT+r#qc>qqI`H(D-Dm>m&@X?R&y;iX%{xqo zejm*BrY95&kBA7o2oUT|4~w!Rt$VsVrz9sA@)0Z=ff%fLXI5xD+mz-Vo^xE3j}7y1 z9KZ2utuathU8vSO8nLV{lY*kz`MhSKRAuZ&QY}g34{-NG{Wig0k_Yz;neIe!)N9<5 zR5a?6JDxfz=mSDB9)N}N1ejCJ zXePO13nbW(T3%r7-SC^r%b}b16Oq~=v#A`OG+SMm=rqFsGV{p!Dw$N&S&c|=#{C77 zZ+Xex*epUe-BkR~LWW)9FNGx$-3Q?tF@x}5jmK#MFERWii3kBKJPM7F^cTc?Kxi;V z*>7Ue9z;x3aM&Hy?c9v!zd8`x8#lQnc<422EESv%uF*e=?w%gzqK%oXx4AAYAZR^5 zKLhY6ILd0ijLm+7_f?p^>)mqg5hJsABxlE6U}o7I@>Dm;7hD=2Y5w?0MzR3Y%PSFC z^nWql0kUK`R<|E@rJ6l=GMIT&x`Z6b|{H_g{4|!@GOGjGL^j20iu6>ne$b}l~(LDFB z-_X?y3K1A^e7|7~hn4AQ9=1&!e`_}d{?IpmP76IW-Z>BVT@@|iU(TTK65jfi$!4vr zQWT(dvKASmUfa&g?lh?J4XvWIwgt)>J3Iyx-Y;Hry)~m>mwCFl+`}*WdJ(F9>+oB6 znpiN{zU1^zN-rq@xX?o!r-f6WM?`T2vH#*D*?7_BUD){ICoU#N!07(g_#9Z>FUVpR^hL?txv&NtE1ChbvFue^cz z1E7i@?1BDGktLZ?8qlAtQBJF;{UmW_+h@zEWdqdrQYOt>y@HD5q8v6?9O-`4)I!9nPn5aE)8-$b* zMfG)yq3G@jZi0j?N~-bfrW(h=h+8NFG=wEsxVYNIvKiT5*4o|efelep&up9`;E0F!GcAZLsMP_de#L|jtbj>RH1 z4PV3a#Xg)9rJi8TrdkO_5eZ^giP(__X(F;?PLp+Y&1k1wU%Pnj3>mW9lB>gFp39@~ z6by#ViZ*I{ZJwEj-$V@t3=hoG4{VONHkLQFO?Yi6#%jiq(*v`hyF%QZMF^OM$FdcQ zbzC;zR9gN}o!B5du)&PUv@G=HY~o;6;;w#A`KVs1Jea4UX;F}^8w|+_3kwTXZ^=Bp zDv_DD?4kbLpe+bVlS}#<&a4p0Ky19N05Kl=;7A9E*4X)68$>|Zd4g(^O-^%nBHDI# zl}H%W?MbObeY!>Ic!;!JSf|Qjq^ZN^K+w=w$iX(Ig_><_D0H1}gD~ZPJA-MEVidK>`z)p>g_(u1J59lyFpzd`#xQ zQYXpk8w}X)8x)hDoii}C00k3nY<#W=7RxLDm=&xy%{q@fn@z1F)bUcf_HZrnn5hnEWVXZJb>1kb%EF=40IxE*yrt56+dr^cJJYI+vupF*&B7! z87?p8hA$NyYzapnHk}-U$Fbvrw`Vh*FD4i;c%1YwfeyFsh9m25W^)+jO7HA4(8g;S z^M+PzB)4`3qv|j(FE6PLx;mKIh)e1op-{-`7?_KRe#Zy!y%~f_?8hW;)_xQWRoL8I zg_a6`;Sjd-5)@*49Fe(?kIyGEO15aDA>LacwqtC+Q}Ne4oFGNU!g7CVYIw#jx2$3A zxD>B})&|h(-t&^c2NwRJG*fxh8l<5X_5Qed;JqoXs_^dCNa zNVUONt~D0(EJwA$GiY!ZtEbRgnE?U&lXH2O^9I{edj3pI%r5Ru4z0J8tZvbE@3^a| zAPwlocDS~0**st@O19N?VYT;RG1^VP+S4BX+Q#oXUaYn*40LlQWu`yvrmfZ3f#+64 zjvKoyUzFF2laZ9E&oZg_215A@lhg;w!FlZ6{_r7AiXs@#rn%!x$$@nsx`b0?x zAljDPE8esdR#A{`cs;o?uB+on8o2Dtv5aec!R5-g<2n*-8Na+VS$OsRDQy#cwSRFl z(0FWHrv4}2Tz@ppnY*8_i0*$ju`0`<&U_r>6E_RtC(1_J!0YKhe^6HWW;&2V6u#H@8Ga-J zJn_d5#oaqb>bVyBk6T6_R-;#&Pd7ne1T8lEp@&~S)H!{Ruy))wc~K}mi|KBbU2cAj z^XvR#sc0ZCE3a_(WIXm%sUvH#sx7~aigGhlOmB&UgE*enj-TsrbhfTRXnQODyO`W* zq+Q1S?7d#2+ufiHH4YBdzIw;~?-*R4dFnZXW-4lp64KzXD4EKSFCOgrKHm~_qn_{f z8jlhE9vd?aMn`H3oOWzE+`@B^}MFpsRq5=hp4UA}-KQ3EgVI7sa_U%u+3`A#!?G+tM0U~ORO(Y{|Q3z;q(8!P}F{T|ImG>>@vJX)avQYAhzqvXq}*k#lY8jQjMhDtjoS7^}yga z?)Q2g80Poo=KP{}zTTL(d$sxXrn|Z0$G7bY{dHUoDnhherrj-n6Aqh51v&3(SNK+< zmzRxZEL9kWk75U1KgYX2@XuKG_Nf%;(;V-UANCdwJ)2NkJw3hpJ%i8{CN;1=J{u&3 zJ%!8-4K@}Ikk&3zoG$SlUrhV4)9%gEAD4D@5rqNCj}PAGVIwCJMFvR)bM zu^^ga^rL3Uq%jbG*atkjoZ1Rpa}>v2O)ro$Uy(Ta;PEndJJsXTt}ZN8Dt|@RZ#zm! zNpsjnwQVU^EH#b5N8)oiJV-zOdHOnEJ|6R6u{tTB@J0Xr)42qVTCxh0;76}NLotV> zi+ntS>If1h)%kbh-rkHpbqWA1#>u!_49)eoo>fOkvcY@KmVo_ptbF9M6j+?$e7zK} zX6?JS_NA<3a~bmJcPv5mzQN<#cb(M&h9)F0!?1KO({b6OH5AXpnOTM5bxnt7(Hug5 ztt-{dvqozY=5H>gGao*9s4K254m7S&=@$xMB0gdqrOtW+5|sxnyv-N-wUQUjZF@2& zX|dWqHoghkR;Lnf10P2VjX|e?VUAu03Kx9ONS%Qnln`k2^7v8Vg^t&$vJ#%zg1d6}9|Ev)A9$4&QB*1FOD z+_RN;q>=l#^X0CkIMJ_d3l+?@ddG}Bq52BalUg@tQPT;{_EX2R@A`~)TVaawe8B6S zE`?%njZgMAw;9DZWBl#M-5$<@x;kk*yxY+k`KuMCt~R`Ulg5R=8q|{WvWKSCXAlfz zmSZw9R!;GCtkF?L)L>k;B|jk~ zIQPp5k5sBl3qw;~kz&DF!Ajxuot=mM@~*Y|sre$0Zf;_Z#H8-^WeQ{05q7-r#mg8TxjPxB& z_GrIn@_xVutIaPQ$139E+cPh=Uq`0eeb;X=kiItJinlj>^=EwoX4Yei$0dX(-_NPaCV>2SzMuLVs6L%ZbS&=M z;7#V7v-%s2b$XL7+I?eog+kq2?x)BmJJ*HYZftb1@=qH3M9 zRPUIyR-?eRT!dG_}#ql`Z@ zq%#9YVqpnHv*O;?K5h4l`(CY=-DNuy^gPh(W%- z_7bsxRCXHP26Gdz=zsrRhuM91KPa9Qdpn+Ta>(ZXX4<>sVALr&^H!gJ{CY8Fexm+L7nYk3_;micSHi|1Pdurk!{M%9p z2;l|sn}PfCQ9UGdo=1DDoXsjOX;66|*&vJUtIz~j>g82Xt|Cj77Mg#gzri#nY9;i5h~c4JX!gnH7=pBw$NlNZUO6THf*GM?>!*Jih%XpfOMtP9ZU3TMcn_I?5DrE(%fW+g}XV=EFlz_S`jRC?fy&D%_ zOJz&J&bIpqADQNkK|?Jr6nn<(Y&=Jql`(=~5)a~S#UDcd`qEm0UBzQWiN2XAJgbfq|h(!Y@YAZt&&1CFL6qmYBsL1WBa9mtgA^1Lz&UaOt zZfD`1k-F~<$#gR$7zqozb!OHHdMiCR3|}C-YnXordE(rmk~8_fzYJ{Ve1Up=+<~U5 z*#3U$TJqjwRqckBYwzn&ov-RjkvMYIrE(yft{|ezWF$}TV!wuQoIyK-`H+FA6Kh4D z0Q_D3x;OK2oG9yssxJP95pWkPMahU#kx4n6WWip#njC-UCzVLB`-(EB+~p_6q02{c zQH!>z((Y55D>7UiFmmqynlIO>V3-en#MI38@t6BL4@QI>t=e;=hC7#l{%Sj;7_F&4w+pr-dmT3d^{&6; znuR3dew7o7audQ!zXq3+mi+MXYf_Fk6Fr0_1T4?lwAmoJnl@yC^%R>;fJK}RHT9`0 znh&6|=dT%KMvy>^ah4mRXYlqM8oY+_r19|Bxq{2To9#^QMR9Ng&vXgg+-#(NKvu!+7UlUPs-T(;9YcmYVgrcdShf3}9ToCH2M-!nmY3)A)Z~ zCCa&+AR^Bztj_+RSy!0YE5!gfk_ro!Wl@kn>E6lhlbiYn@CUKx@aava4|wL3YiFp}<-YL*I!gDH}w z;!RkB!-wLr+qBqk@{0%HDc5r0kuNF-Oq3HixX6|nZiaV7BPV$ad_>;Z;!1oI!uLw zI+JIO4(+Z#cVI(ec>>6sBv>I^JyD1P{a)VcG`Q7Q+PV(k7UfugnBt)CF3z^ke^$fyH-IJQM*K>cr4 zNd@z>*m>PyW(Xp->qyzpgN_b&NxzerT4`iz({GO!@Y`L;yj7VTEFYt4uASc-2?Mu% z{xl>C+c0_2^%$=p`c8JE5$il9s3iwWsAepSv6mK<2O{8z^j*q)AU#6 z=RSmRC);0;Rz4llV3>>dZS6)N83z4spVljVeHg-C6H;gtQUYgYY^Q!09M;)YohH2w zZ!ACIIzf=(E{y?k zg{r7&n6mGslPD{^=RYGL_F#$|3^6ZTJ!5#(;2RDhx`dT$c!v7@rH1taS0AGf%a<4zpC!Ojs0XTv*v<>(4X!wS#-l@sL-&5 zNz0*!&LLIe!(OYLv6LbyL?60k_Lt?*o7z6e>F;+ekowOxp+YdCPSM^C)CY=ty@|g$ z>cc8rFto3sSI~(i(%llwtv=}}R>8CkEv1tfCA|2i#h%-2#3G($YE*Sx*({}-$pL^Q zHo#gG#Yj;!c0}BU&R8xlV82ly(%e5cPSMybWgr_%v408uu+44_vhH1Zl)&*1c~3Kk zz+ouY(^l-9`edgVq234{H{w31n?onyS zxa&WhM!Aqpp3T1_E~*-gI>-44Q_qCGqtA9hT~UhDWTn@fsoVjq4&+jp46|y;&^<{MV>LumStBQ=waGBB%jg;!6#)tTb*f?l>E&?=;rD)KrQNqZ$ z^}?NA2SPtsK_&+Japt(8pFyo0PBE4P)+K(V?g(T3=c90<1PKUaE<8Lu>o06{VMF2s zyXoZ!rP`DIwi>GmObMBnFEG^&2gy>-jtPV>Xk z6qW*`+g^X24jpN=R$GJd*cN0lJj!h3S323T>-VMP&=RydFU>5vBC z4HY3Bs|dt#i>40BAPN~9ql7XE06B}8&4rU7S1(~hWzCJIkU$Cv?6TW1{GNB$jE=dx zo_YE6NUzUsF=$nRF}07~gwe}_mX@4N+Zj*|;}*IDuq^tg_(VBB?V2;FU>|7`h8i_q zag`d!_t&hUJ0$j-LN>?;re?P%6mhS_NbjLJudZrPtQYt?T|Re6`9RuNKlpkHt9CYe zTFYCvR-qI})=-{oLD#GNhJi=OH3Y|XnVO^IE+G13w?C$FX2z7ilIq=Jhm zL#Hv2$L;J@qRE)VLzl-^q9$f9c8O6x@Umbo&xsqw##$5*{)B`@hlEHUk1t{QWIn%+ z3x0_MS>d4Mu+vebPy7ojwO(68cQ-n|NiI^8R=GiUm+oQ) zi}{|Q_LJ&&ZAma7Msbh**G;SrLzF%x9yW(PD-3FQe3Z^g-i5&fqD1M+6Cx1m#DE7M zBbrtC&Bv}z6kx0A7>aZY zn${O^oO(*h06A0IjbnEMY&@(8!_Q;o-b~6fUB2EtbKll!Zf((mMab`d@zbgQR3QFQ z)hxEm5*v%Yzfo{bwj~KNQ<%0+$h;Z+!9h@JI@ADJZkeB-zn6WhyJ`KS843~C5So20 zGAb%6B7!-6SPK=8b=vk9t>`^$0)_4BR9wtk!~llwI}ox@7bYUsr~6XM81NNELqi3Q zM_3>KThtAmMMPX2Qj?`dv($x92E8x{cJjB%A|adg9W`~3mCXA!87%Yb!^v+)ak~q0 zTstkk(R~#*|FsE7}_|2*1fYD9xzLa_Bu+8KO^*0* za0woPy`);DBvyBIG>P(jz1_Szvr!|iI(MU_5Om6Q$oWAjmQ&uKj+{(U6*eRX`RZJr zP+;0zQOWi*q#teAoNA>u22ZS};b^9q^?cd>(GhIa($^axvvPq|uffLCGzUAI&K9=+ zf71tB&hXI7cR@Ypb5{HGAU+wX{H$7^m>QDt_fd2aws6Z76(Wf(^T~F3m&y?oAZfI2 zky^q=Dy<1fP>3eZ1KghLF{5XOo+KnBdzwlWW%O!g$;E&ROPftVz_JVi-k-^|fO_-f zP*ywjJ?*v&%EE2%1dH{_XeN-_35!h>pFsSwTA*ORJ}(FrCf|2Z5>M)cmNH1UV(B<( zyNmL}>#oX91N-#eoKT@ke_>IjD$$q5Wl|eRTl<@g7GMT5ARcwiI+|67A*Pjr)v`he zS8NF+(kU?tOT2sc?(|&nE1PD6*o*yEsZ7y;+1f>?Mk=d)<`{B-a z0y+v1WoU}GPTx=~+8Ev3z?=!MRtcO*dV5!F`AM{qMM?U5!C*tBP7tKO|3XC@AYZr9 zcEmza)RR-d@3b33kvKPMiUYCAZ{5mu22XOR6d=TaK%fyBE4EdQv8GE7`;CEVugsg- z$5?_t8f#PW6QQiAU-TE66{55NX04OFFaTDr21B)K^Rr1(V-Qk}W-#?)30uEPTjkgr zapvOcs*j!QivDng{pCWRFFuTrLozgCfdmjD^ob23fRhXpe6d2W+kqmS#WzoEuTuW6 z3sINrl!IV(Xk4d(oPvU8mwUy1RT&9`>k4^4yoe5-!61KYwFAgw6@+La&l^GU{*ab= z`NbB`G~^~z2;%if0EefpYU0nLX(B6eIDvk5j#Ed zzOx@JwUSpfLB9JxB~eaT+hiePMe|&=e66c=T?)-=>0<>AS8)>($=JvWYro}(OGsE` zK&=ARX}$7aWyrQJ<63?+-y>@7A7i9)5X3pz%s!!plt znw<*zdGW;y^rBC(H)~Zh!11lg%a2p$dotP|965Llsz*n#!|!+>cpe7hYo1`sIkmQZGSGoGQhv{HdL?{8G^(7UgxQkGj&-9 z4^3ur_bTsF3r^1cv$5HI=oudQTN~6z6u2roFAq|U$@z6!eikOCXJ?-sTn-K=x!JB^ z2kNx+=;_KnZRaJ1VX+fm9BVu}TYZ4XX#BNW2#5P!B*2Lb36%oST1&Ztcjh5!ajNu~ zi-s#3wfsph&E^MF3VCt{KCS0rmN|~o#jf+thxUVmM@>sJY;LQ!@C<`dwyUXp8m?PC zDc9b2BC8sVYGa+^4%B-zyXRV!R-?v)E|B=w3Jgc7$z+ywcLGaa>RZ*yU$@R4Anm9l zqF#6*zq&oTWMdTKg^FXo<~Q>`(-*<%|F6Ju01C**M{Mfw`oZsYIvhfbTE^vi-M;vw z+h2f%oBO5x9I5A~$+m3gT;O$J)J%frv<^58G_HNL}QMMmDY;@)1#9@o=jas0SU z2PwrEG#>6}_`LU@Lt7NDI5)%?jt=(sfxOhBPOZEj8@~_j^&(@t-Opb(I+vm0&zI5k zYJ=R@+e|Bo){Ff~W@gE1SK;<39sFueVNC$KlKWX5^g6gnpN40 zv;|oqx$PKeXpC4P_?F8be|-PGzc+f^J;A%ytT58$%j{nXbG$;XS$pDv6%%zs3tPWR#Rpkn7+?-_NnR!2ayZ=~OzmTk95rzCp2 zb%Oa@E#7XnKS3H8xRaILWH33J=laPwFYx;Sl_ezP{QRyHOWp6v<@>Lt8}kSSayld? z&Az(y52za}Kh~NoezH!Vg0nS1e5EFp`iAAc+MK2dkfK-)9p|nvvS#IY+HD3>;Py?} zGR{;0bt*tQECf*Jo{pb2a+_g%NhDvFNlC;MetPnLyl)vDoPS(x zQTm)x-q+t}H}Zb1E9ayAOGGH**zWNK$}uJb_sx|I660fmLNle6&&BlQ&2iCJBKRV_ z&~Rk6B$YN-n9BEGLy6I@(NE4--MM!CiZKdTBSeh93%BY8iI93B-#!89?|v0pXuw4v z265kFUcIj@9o;EWfFHG)U!CGtK{CNhrw*9!evKVKmuj|SbLcemtA6+TzIQN(hdPTx z!fpdKCAFCFbp(B<07+YDY$P_BdiS#t0)qF50sNn+3zP?m*GB6IN`;|qj3|hZ{2y+Y6ZWznFGd?*fy=ePg5 zs|ZD;h6f|fVzGYS&@drVPn;hYwlLsB^W#FGA@UjEuGUoX{m397o`|f9hnE$gXdrq( zQ*nFV9bJ>C{G#$}?#cA5W7p5GEyu0iMw?`vDG`(s>WHEjAA?RBghU3Dj4!OaE&Fsk zQ<3_R8`8dW?0!8M)8%nrC$OFy@SgR-bfmJ$)jM_Z-4S`-W;|+c2RVd2 z-9bXhY>{J53era7t+KKAawXvUPKu% zTg}6JM7auLi>$Q%CV?=ndB#3qCYfo}+9;SZ+SYXA_yK! zz8ugiGLtFPDOV3<3R&7XU1c1r*Ji?7>3Zx9)?kWGj!(CvE+^#2bZm>ygA3ew-B9s4 z`24@}wwQthe>de@`|EYdGCLNwl;|88igLl%8#)yli{(02+x}W?W)EdBL{vgaM_;OA zgHtNUMz}W{>y}B&Y4zq%i_M!*Tfc8^;=dlD3x@Y~-dF81{tgpl)Zb@s94js*Dtr8h zh~h`SPUzPXl=!U_eqb2krvcV)HbpYS&z86?&DMA1BSH2hE)^;z2*yseMReg>{Q;@Wy z7vR^zo2dh~sUd3MUDv$*46dIrRai#Gr{3#{d+NA-7VOj>gH~4MV!utx@l<%zF@G%0 zo3PGTQAWMB^Q-+3pG%{)MU5y{2BK@h&!@BkNe)&9~ zakrtyJ7DJ&|B)HXpj3BRLyGR+_%d-eo)eAVhRspfn*4BY>bC23JNCiZoBvh!9eM3V zRCY!3Fak3&uXeIKj$?K)wuYp{VDV(BhA=m z8KSgAf$|2-Z4)EF*w8_tQWBQUW(Z-xbULP~wP~uP$=5qZ=@GYu*;TxjFeG~aJRWP- zsL!`h6Os{l7k1nz*(otl!%=CJ%sv6sw;A*Eq>_Q7*3bu<*$(6}Khp3MQ05od=nz!$ zU<3jQt-(A+nC;2Pxqm2#{2!E3046WH*Wv;cNH1vPSr>Fv{2Og|kcSbjqIW+GL`NA= zaMk4{V}DTO4w{tNk+20aSFP^AjscKN6%Y_sa_{uz`UxZV9nV0vJ^aYH4w)f3cnEpt zWwxYz^2fyrMsZ(5Rcf<3W*`j^?_Kd*#t%F!s}+O(q;@j%zOUnB_~%>AwPjet+4|il za+oT+ovcm3My`K1E&fC;(J+m49R#R#Fo*(v^qVZnyIGaNr56Ytuiqj-qUrqui|_Oz zrE}(xO{6-bB$u&kAuCXwVQ33cRkgD!*4JM6>28PX@=VcML(qFtbgS!3_F$MNoVc1t zrM_`P{fsC7=0mLj$Yo-478Ypq^!%|K>nv{9ed-9^$Afw_DS9V$Az^Ux(Hns8%T;{ij;x z2G+$_nm4!c3=6O6!ygUbGz3qFmuF9|R&hjg0qlkOdEg}f#6AIauR+|*pb33T+JmD; z7@F*Bmd5bT7CarK{qWktbReNr&=G9ff9ywcpo+5T;al<}5pkZIa1h8xWl}Y5T*Pl#Soo~*k#ilB$1GeFumN{W`xxaBFrO8% z1DnQ-f0jHFKBvOV0I{Aygy`~(qzjxV|HA4veA(r^W zVYN_E+9r_cl&9P31k_Ha>T$cBr2hfzMPhz0EdzoN=`Z~!8f?}dfnwS4h@NzyMpuv& z^unP+xRB|npbh3D#Rz$FpjYKeGCe&F6rg`w2Pz6ZZ&tqJoN9!?pgy>4mTOcG4GpdI z8`-4YNed+8N2v>qj11u1eoJ;umjUp+t`i+l;-1FV)YLpWh7tUckX zy2NoRa4@yegfhUve3Q%*HkC?f@swl`&BiqO(r_r8<$ZUi*Xm>n+#n!2&grx_0%T2Z zL1HFSG3sITTGan~yg6=fY3XU^aX%vp7j;8z11*NVC#b8NV(CWDdYxH)c1iuFv^aKa zIk^JdsS3H|TL9>sNP$70`0`7H8!P8LHANzcFI75?ptM$ngn;ObQmRn~er((?*N=OK_)n>Qf=L<*Rn|Gdivhwn=K=prg^Z=_U;OG5q=5T;6Yi0zC z3MB?6#6K?nRS9s<0Usl^U;cmEJIl8yqpoiQf)b*1mvkdt3L`LdgGhrT-6+y2qBPQ- zQcAaU4Jj!wbW2G$lFuIR`@?g*f5Lk_es#0wx^}F+_HV6qUdf~iAuuZzd_pE-Im6Mk z!Hz_Z(m^xp4cK_N>;0iWV9e~KQMQ@&T@k6ij!9GX?Lre~23?OYcX&KX4@$X=^y|Fr zG7MBx{cGftzVk-9;QydvZfK7o9F? zqmt;ST5Q)RCQpV~UxHwcm4Z!>b%-}e% zKf`GTN%c>|Ab%BCCUJAL$-fSUKb`a z=;=|+mu-cdAc`9q9xy@5%p=>P0r%F{hBI2bK!vLG?8hWKomX^|o0uPST>8C~-5Yi-eaj4ze#|;UV+QV5wxeeuX$W_tYT)P)mE7jkfk({8bN8(#I}d4T zTl@*X>V#V4v#lKurfNklT^n=eRSZSu{MFgC5-vX~G-`CM7aA8RRt4_JUI> zoy?xjQC8G7S2>yQJ`45Hx@G7KG-G4q?Ck7*U+3<~$R+KR=w@h&%TxqrDMLONVQK08 zUH%G8{1Ipd=@Y*#c4Uo2l46!FZ@_agt=O*b7@DJ^i`Z^zhnEHS2W4br3|oC~`r|n& z8XlA6uV{f1innY17!MwBK%a_zFcK(NMK6NJ$rvZSXq%WICcC@ZmrOf~&7|s^W~<|t zy;-cS&vP`EHX`@E<<=cHs?6F|9&ieQYClnbXh2=0$;cPeP;J}Z>*~(CRK(Z% z;6XLd!prMCmak$9>~oa`ZVq|Yvn)w7V)|`@>mb?o>8j_RCz}h+UchlUSoug*(Gl(F z3eDQ;FSqAVt=^igE2CPmDc53u#VUC_kg!w~ZKd#?^~}yQsJTySi^$Q@QKjz75HX;K z!0{1oJV8CH=WkgVYGYE%iYA0>dO~5LkM`x^>ER)7 z-@D&E!*18yJv%6_Cn;j@zbkYlPt&2AqASzk;^5p{m`ksOwIm6>FgfVUhM6G<7C8-e z;y4MQrzWZhqKS;Ji=b-@Jb5Lh&i#GD2WR`m+uM=g!anS^7}CBvQ2vP?lVb~15ret0 z97tMTU$@A>XW?OT>u(QyAZ$0~Ur2pV;C7n*lN<5*uRH@TaV1LvVzL(ls=RIL#|l)` zqu!*WFMuD+fgc81+nbxd9F{nrPL&xG^6ZQ?A;$52T1L<$alSW?lUkmZmS&fM zZ^@nKg8h^NLBGtB_x#qJi{Mvp65-Jl8}u+Fhl(wHTPC`zK0TW#Yi@XJ6Xm zctDwdm(2w5zTP3a$?X2RhH8>E9~)9SUzW%Dk9porG5rzHbfEBPSLG#$`!fv~b^B;y z$cm?TXznbp*SbIj7Vge^1|ClrQ*S{>2R_bVOwjJud!HBxZUsWrJ!56cCd>Nj9|AVr`QW!&sB_Rw#oy-SqLSTjLX%7ZA;mQuF~1 zM1%|aJP0M0W1;R82d#ab+8c99%GG39$H4pQWR)m^+B5giN4nnn7e>N+0jrUv&C#y` z0Rha!E*S$sZvE9EE1#oIA;!t9-DX^ef{FOzU50PZIOIfOl8E9EkVkuqE*nL&yS4TA zbkc;>_`POMuKc6D@eS`|9Nv|Z4khw_Uf*l?9cKxjOBZS3WM=HGxV+Yo^UqtnufwAI z5XpaG1y5mj~_2 z$XJ)fanh3&?xV*aqFac6v56zXqMK#9pR?I+m#!2{U^w#9oAd8=s`5t0+SD5|tELs| zuN5c#tCCYN-I=NY83oAd)9@1bgvv8s2-U3|Tt)$UJ53+j7`$#D!IcsJc$F78zdsYW zKse7thvII6khM@Nl9q_)J*o9KO@wEZ{BjWu4Fau4wSq)}a(a0)IHK(rCkS!(qsPR= zII-`8d%`us30hQ1e>>BfGA`0s7KP{kB5GC&>#zg}3I{bRMmW1G?M zVJ#SAlK6{B&&&foofQxARZ_>hvq~VY;uHN!VV>4j4MV=R(8VA+mpt``A-j z^oZ`UF93}=5uw>4inm6;eoiiNK1bI3*3O@H`TaHYb~uts1ptsTqeg`C1FGtzWmN^$ z7%5lamq)ms6Y=YWEa6e2nifo97Oz=|>b((_)(7irgdVkcXTHXkR7!EsjhjoQNep7A z_pR<=j$@_s+f;9de4xk?l1ZftvoZ6v?6t_!;^TvFw!mg>zWwbA4YU3UO1NvK+f^&5 z4RDZ^jZ*%cs^B64MbpsG0A+b3*7yQAEGNFLGAf1vN(E$(Ok1u)phX#P3xho_fuhzxfGe)Nr2KQn zf!KX9srYyG7KV6NzWbn%swhlIj07kgBLD@4()7M~-Q;j(<#-y@>^HXy`UOwk4?Q`N z+Lb0)rZ%(^V(B}86xf+?Fkff3EbGEgw#r-s+f*wkZDQGTsCcnR`H@xFxM+B&h&To- z0V1qNZ@q+UgSf8RZDD+^L~P~I>FrL{3R3H_ zKiO2g5v1>s-C&_zVA$R05fpz*KKSZ>DEJ(;_y_sCTy8!Q3c7@@{ZOI5O=vHdPk~I3 zl0jKMqk=I2JeVM`9cjI4{nKG;Gu@L=QP?w%b?|PieCP)_QT*9r`;YZ zr1fzpC-)^)3g4Ls$Nbim@@~}?uqjn!m}2{G%2Tb`A1#H?dk@G=qYUbxa01-I?AooR zCrvyJfatPtTjAHW^k@YUkv5U-dVxnpmBv5nQMO3C_ji%o=UDPqq59{H|6+^G6x-RF zy_9_%19KZp2ME*jGFg_x8AY$QYia7z#{zpoLio$;>a{z+-66p*xbp$k4HUSU!!U8n zYCCdEywWrl@gH^8q@#XG`a_jkzcSMG5 z%!b-tO4~U3(s;AR>=bXG?2lm#(T)4$(Cl81PIw^%9b*JtLlu7PBqz&lPXDJ$xSTcs z4u=D8u?G%c()CE4Y`A=Uh`?ECaVSE37wl>(!D`97hlU2gEjzor_Qx|dhVA*S@T39| z_At0PdrsJDs9vu~EdE}Rh7%N%@5DXRu8vvab6$}txsZ|s=dpa7C;HsnoGPI70Mrox zVCfCpNF+#6gzBGS7&nUytq-XfUK9Ct31{_>sY#}*p>D`aRLh0$aPhg!P73)dUsulz z_VoCs$9z8A$n=Zsg<0;$J<}D4`CkW~L;xU}0E_r3C@7vytNRjhtUuSFAHK`;OUs#s z`Rmk;XlhY4_F(EyJ2k7%vN*v0*I{|;d19reHeA+vJM$+2mvXK~1KgD~WAhr<*hA_KWFi_qy%Tc< zxz2|ttAHeAW1ft?GVCW4Z-;NXuR5AdBkB8WeLr6krKsbI=pypnP|!Lw;*(V%Wd_9Bt=oe+-ln#jb1)Q%^)=O?efjT%e9+8=4$MW>tmm9I$?Vf^HZ=y~gz7#A7lb>TcaQHu3*(T_3TFX!BB zhmrTP6Rk$ z8@chUAu-^z?x<$jO;?U!pYJcyJ~j1!zbbEvD^%w&A6hj{GeU3mCcPVW>uPO{C82-? zyC8%u3N;GPa})3B@-Rn^PmbrOI_zI}Y`+B0dmbjLO$VMA%|!>vA;Ctq3IY%)AOgX{ z(UrK0SMKe!QP_3x8JFI-gLw(C5^!F2v50CkjT`OE+tC?Bt4bB`72T8hzm3y24ul*4d(k*Yl*?vvCJw`dbt13(x2dHDNzbwU!nK|`MYKNk7Qj?viZu!dO9*Wy4O zs(qvM6Rf-$CVwtg-|Is;J8&nx_Mj?409J})>&@9*{W}cz@W$S;Z0QGS1Ee%Gk(K49 z9Xcd0KjiTegwrksM*NmLC!C!mQ8&W>5yT~Hyu~t<9%uOeJ)z_+)6++0?Q%h?6>SD2 zHQcDB#TDFY`oMW$^+d-DYw$R>pnBfwd1~|c@ng=mZ@LxquFhQ#vY%YQySwR?1(}%g zfE@={T7TEWqa(hm^rRWGabp=d`L09k%m(d`X98n!f4UYZ03l|~Lqpm7xN-9^ocMXA zZGrBnV+vm)SpD_Cbpa`^4IfJe3N>o>B)*i^c~Z&UzKBiCu3ct4FUHmKRzZ_7aYzY~ zCEsGeDRx#HY3`-pSKrn9&qbaoee2~Cy4zG>z6#_IvYwFof3`Lh9rl$8`c^|&Si9q; zCS&@g?r77X%8lTwrsJHJ1XP|7HQ!o~h~l`ag`5pcp04pWi=T|JDB#tPZ=?a zosZ=g)Hh$cHIgBqxdjSa_8EARB_!P3^~VR|SV`a#4wo?<0GkZh1bU~_N-`>T`eDVh z(0(!nxE?TpD|=jh8(+F%%MQ!C3`QXK@xOFkBrK`aD4IwCPTqH)2#el8{G<||VgGX> zobVpam|SyQ8E3*Z2m2o(tqS{+>OCU#A0v}&4apHm|A{r=C>}mnlH+OL*X*?gGxV#l zkB~J|>0J3O;mNhK#Wgsp9%fZt6q;)QlD#TtHwC}~UB&aKPp(tZE@G;|L?wTdF~0f8 zdjApe-EMRmr;efb@#n(3V3)ziSh`43J{-p{Zb3{wfIn=AE zt15vi#{BMHrS2-*j*u*QWj?Iu6Ag|e&D@5tArR0Lc@~g_^J`o3EM{EY(S+Bs%4*!O zM)u9}aO@zwhO7@2J_}=0>C^l!+8Iin0*n_`gV(eO(r1vP%SuVAdl^^6D4IdW^XWZt zK~&*baGV+M_d4USrD~v&Ws_(Wu%zYa^y3$7S_lE?V-_oZp^nzTJiTIKtataPH_~Du zPV?XDT^%|fFu)J*X{K{C!OxXszBmY0ru&$_R|Q#MaO;K?Tp$y%N}=0~6TcI}3RhTq z$DsMHnapz#VO5vfO6v%n}?s;j>yTu|t-p8Jn2Bs}m62%T+L;P#8sHWJ0qH>v) zj1e|;Bs+L{*Y?}IpI+m&cy8ot|776G9x0AUs?`=7 zx}SMK2$Kr)_cp{N`Lx)-rYBASk6SNtHX2(5r&B&jJY&jD60P;VxS^;Vac zQP)s!P^mw(0z+Y96$k#&Joj|jUuf1UP+d^+BaSthn7#In+!X(Ta@7PyLxA%TW|UD4 zik~59LJUt=wGQVWcy9BwFfXLT%JYHK6eV$K1tNohj{14Xf2L4R%Z(y3dv0SWJ5kN*kgvD6$Eh-2qZBh$xK4u6Rt2SGQ!E%z0CU-A!E%V|8>nT6^$h$R7H z;_dOUWKRU;`Q>)m)y2i{CWC|bQc@jpWi6L`sbvpz$_^I34+hjTJ7aSXG%}j z5C8J-g9B|2=;avli3KM$K7SK*<7)(i@xYtegKYDr5l6qi8B@$&BGo23Z9=_^hPs0rg`?` zb?5z$pL7?B6q)?Z%eo{c#nRUJeUQA%p9@(nLf=X2YI(iLrR5JHfFo0;x!;mkzyr zmHo`Wu8IUz93Z(~nfMUbh-1nLNxuQ8(wvG%+2Y7y_#znqN;T8^!{+;x=kH1Q#WK&e z*9!qcY~g%^S6b2%YLyr_67uV?SZNO0Ka%^e$f5-e^Jn_g#nzrGWdByA)b0xs4>i7j z&-0&kkR{NqvQUYlc^*Ds7iI@Ii6)J%C0v|i`LTpcW)c6jJP-|f|M+GQrjU~VrmvbQ z?pBJFKhBF1?iEjJAOXl4UJ1Lv8FLc&uf`E%6aC|dh#69nM&0dc*`HeE6ciM6%mBn# zF5PlE$A+{IGq0b4^!e6@U@aYk13sxIBJbY;zkpt9+^k<}STALpF6k#h{|xY$ukLo| zRe~cNX&)S{C=h6j_eymV5_Cz01hF}4EGQbsu$sNdA1A`e(*OkaU7)|6c0YqLQF#o#FH2|txexv5N@_+$=hp>9iDlu-g z&V+rUQBcv)+1c=(WKF(Yrj_)i`eqJrvG_bf_A@TvGO61t7#eOq2GCO=XE@+^W4K2z z7AU*K>|m01fxMZTlCn~l`c*Efwz`@~;?JRqOpb_56z#NpmK~~duAP_(Bg7v9#h^z& zp^#e%?*&;DJ>le9T3UoSiAJryJ|K{xv4$j`@)*yJjNt9B?(_od9z`Weu)o?dCC}mr zYBVsQE5p+uj+-LLu@L7D#a)rdqNR;zS?WnSn)40=j~`DGJVub`#}s1T4>BJ}Uirdf z&AE-6M0_q!I`##JGbK%0ygjm4fL`U+61DqoePC;VzGAKQt_8dC^n%&uB6Ts`^SU$w z{g!cQr#I8gjs@c1c0d671jd6~L(420klMQh=?_Ea=@a4n?yHt5A{+RUa5yz@&_2lm zseKeRHZ8F$w$%0`Q2RBfPn(=^|>YwehD8C!}sy=*>E^48q8IQ{qyG! z@aV}c6<1Weyu2py8(O0%Zr6QR_e`pTAXs3M&?BQT@#vn0qkk_d5*HJL8I9A}hH5c5 zSNs`B=5O?+a69O>VHzA91QKqTmyPTG0x(r@Kxyt;D@||gf|ea_b$)*Q#>?>X5*Wn# zcN?zM*_%F8&uv)66h|d)8Pz}eLsx!6j)~FGF&{>>-QHX=a8pTm|7hy$nP5yO&FN-h zhBCnQOIL4wi0yeUkHLk*nPUA{o)UESMjuit6av#!E)!t;VOrTjNu_g z9lGE31#g0D$@M5TuOq6Z+|gdyG(O#zc*t*mkn7N`UnI>}1QuJjXIW+MmnBO-bxXNQ z#|kV0(4-BiXL9KixoxOsJgbi(43nFMUoxp>JWJ+%Bg+)Et;-+-p(1}0A*6zaeuOUq zcH^^`Ys+_zK ztLH`p?6N39@D$}b1p00M*oQNV7MQ zHpB}~(=i>5KkG@z0->`HcYDq z8^#MnjrnnB4vC~(gHVlV*9XZl9i3k_Y?zYTEBL7<1|?^pgHm-U#7y6@k=H#B&n zqa?x5z<{8sXwrj;!Nlt|XscoHIG@}Bdl;D7|L6Z8RDF%D7IMhRMkm0-lOwQ-Apq|P z8ZLlN7H$N^k$r%Lvek-z6nsu(~!R5UEiUR=bOa6eZ&RrMy^TBsLZy(Upa@P4(9i=X`2Sw*|7H#S|LX;L@qnhgNztfq_73?4 OLj|a+OsSOVhyMXT=uKV# literal 0 HcmV?d00001 From 0cca0fcf1152b6d2a7c9068934aa2505491ca856 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Tue, 1 Aug 2017 11:35:17 +0800 Subject: [PATCH 47/58] Add SliceProjection. --- paddle/gserver/layers/SliceProjection.cpp | 96 +++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 paddle/gserver/layers/SliceProjection.cpp diff --git a/paddle/gserver/layers/SliceProjection.cpp b/paddle/gserver/layers/SliceProjection.cpp new file mode 100644 index 0000000000..a361d19bde --- /dev/null +++ b/paddle/gserver/layers/SliceProjection.cpp @@ -0,0 +1,96 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "Projection.h" + +namespace paddle { + +/** + * SliceProjection can slice the input value into multiple parts, + * and then select some of them to merge into a new output. + * + * First, calculate the slices that need to be merged into the output. + * slices = input.slices().for_output() + * + * Second, merge each slice into the output. + * for(auto slice: slices) { + * out.addAtOffset(slice, offset); + * } + * + * Input slices as output: s0, s1, ...: + * ----------------------- + * |///| |//////| | + * |/s0| |//s1//| | + * |///| |//////| | + * ----------------------- + * Output, merge s0, s1, ... into one output: + * ---------------- + * |///|//////| | + * |/s0|//s1//|...| + * |///|//////| | + * ---------------- + * + * The config file api is slice_projection. + */ +class SliceProjection : public Projection { +public: + SliceProjection(const ProjectionConfig& config, + const ParameterPtr& parameter, + bool useGpu); + virtual void forward(); + virtual void backward(const UpdateCallback& callback); + +protected: + std::vector> slices_; +}; + +REGISTER_PROJECTION(slice, SliceProjection); + +/** + * Constructed function. + * @note SliceProjection should not have any parameter. + */ +SliceProjection::SliceProjection(const ProjectionConfig& config, + const ParameterPtr& parameter, + bool useGpu) + : Projection(config, parameter, useGpu) { + CHECK(!parameter) << "'slice' projection should not have any parameter"; + + slices_.reserve(config.slices_size()); + for (const auto& slice : config.slices()) { + slices_.push_back(std::make_pair(slice.start(), slice.end())); + } +} + +void SliceProjection::forward() { + size_t offset = 0; + for (auto& slice : slices_) { + auto slice_out = in_->value->subColMatrix(slice.first, slice.second); + out_->value->addAtOffset(*slice_out, offset); + offset += slice_out->getWidth(); + } +} + +void SliceProjection::backward(const UpdateCallback& callback) { + if (in_->grad) { + size_t offset = 0; + for (auto& slice : slices_) { + auto slice_out = in_->grad->subColMatrix(slice.first, slice.second); + slice_out->addAtOffset(*out_->grad, config_.offset()); + offset += slice_out->getWidth(); + } + } +} + +} // namespace paddle From 4b1bc6815e81b8370ce373b58fb4db1affdec029 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Tue, 1 Aug 2017 11:45:49 +0800 Subject: [PATCH 48/58] Fix comments of slice_projection, and add unit test of SliceProjection. --- paddle/gserver/tests/test_LayerGrad.cpp | 20 +++++++++++++++++++ .../paddle/trainer_config_helpers/layers.py | 6 +++--- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 0975c3bc95..00ca4982e9 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -152,6 +152,26 @@ TEST(Projection, identity) { } } +TEST(Projection, slice) { + ProjectionConfig conf; + conf.set_type("slice"); + conf.set_input_size(100); + SliceConfig& slice1 = *conf.add_slices(); + slice1.set_start(10); + slice1.set_end(20); + SliceConfig& slice2 = *conf.add_slices(); + slice2.set_start(50); + slice2.set_end(70); + conf.set_output_size(30); + for (auto useGpu : {false, true}) { + testProjectionGrad(conf, + INPUT_DATA, + /* parameterSize */ 0, + /* batchSize */ 100, + useGpu); + } +} + TEST(Projection, scaling) { ProjectionConfig conf; conf.set_type("scaling"); diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index d1c2cecc6c..2045233522 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -539,11 +539,11 @@ def identity_projection(input, offset=None, size=None): def slice_projection(input, slices): """ - slice_projection can get multiple outputs, and each output is a slice - of the input. + slice_projection can slice the input value into multiple parts, + and then select some of them to merge into a new output. .. math:: - output[i] = input.slice(slices[i]) + output = [input.slices()] The example usage is: From d456c286093ca6c74f7c6d02b67d3339877f564a Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Tue, 1 Aug 2017 13:50:05 +0800 Subject: [PATCH 49/58] Fix some bug. --- paddle/gserver/layers/SliceProjection.cpp | 2 +- paddle/gserver/tests/test_LayerGrad.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/gserver/layers/SliceProjection.cpp b/paddle/gserver/layers/SliceProjection.cpp index a361d19bde..267dd6154b 100644 --- a/paddle/gserver/layers/SliceProjection.cpp +++ b/paddle/gserver/layers/SliceProjection.cpp @@ -87,7 +87,7 @@ void SliceProjection::backward(const UpdateCallback& callback) { size_t offset = 0; for (auto& slice : slices_) { auto slice_out = in_->grad->subColMatrix(slice.first, slice.second); - slice_out->addAtOffset(*out_->grad, config_.offset()); + slice_out->addAtOffset(*out_->grad, offset); offset += slice_out->getWidth(); } } diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 00ca4982e9..8ce8600c67 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -167,7 +167,7 @@ TEST(Projection, slice) { testProjectionGrad(conf, INPUT_DATA, /* parameterSize */ 0, - /* batchSize */ 100, + /* batchSize */ 10, useGpu); } } From bfdd9a1ced319bce91a0b4fb197cf323a9a6dfe8 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Tue, 1 Aug 2017 13:56:05 +0800 Subject: [PATCH 50/58] Add a unit test for concat and slice network in test_NetworkCompare.cpp --- paddle/gserver/tests/concat_slice_a.conf | 41 ++++++++++++++++++++ paddle/gserver/tests/concat_slice_b.conf | 41 ++++++++++++++++++++ paddle/gserver/tests/test_NetworkCompare.cpp | 6 +++ 3 files changed, 88 insertions(+) create mode 100644 paddle/gserver/tests/concat_slice_a.conf create mode 100644 paddle/gserver/tests/concat_slice_b.conf diff --git a/paddle/gserver/tests/concat_slice_a.conf b/paddle/gserver/tests/concat_slice_a.conf new file mode 100644 index 0000000000..dccf911089 --- /dev/null +++ b/paddle/gserver/tests/concat_slice_a.conf @@ -0,0 +1,41 @@ +#edit-mode: -*- python -*- +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from paddle.trainer_config_helpers import * + +settings(batch_size=10) + +data = data_layer(name ="input", size=8*16*16) + +conv1 = img_conv_layer(input=data, filter_size=1, filter_size_y=1, + num_channels=8, + num_filters=16, stride=1, + bias_attr=False, + act=ReluActivation()) +conv2 = img_conv_layer(input=data, filter_size=1, filter_size_y=1, + num_channels=8, + num_filters=16, stride=1, + bias_attr=False, + act=ReluActivation()) + +proj1 = slice_projection(input=conv1, slices=[(0, 4), (4, 12)]) + +proj2 = slice_projection(input=conv2, slices=[(1, 5), (5, 15)]) + +concat = concat_layer(input=[proj1, proj2]) + +outputs(concat) + diff --git a/paddle/gserver/tests/concat_slice_b.conf b/paddle/gserver/tests/concat_slice_b.conf new file mode 100644 index 0000000000..29686ef281 --- /dev/null +++ b/paddle/gserver/tests/concat_slice_b.conf @@ -0,0 +1,41 @@ +#edit-mode: -*- python -*- +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from paddle.trainer_config_helpers import * + +settings(batch_size=10) + +data = data_layer(name ="input", size=8*16*16) + +conv1 = img_conv_layer(input=data, filter_size=1, filter_size_y=1, + num_channels=8, + num_filters=16, stride=1, + bias_attr=False, + act=ReluActivation()) +conv2 = img_conv_layer(input=data, filter_size=1, filter_size_y=1, + num_channels=8, + num_filters=16, stride=1, + bias_attr=False, + act=ReluActivation()) + +proj1 = slice_projection(input=conv1, slices=[(0, 12)]) + +proj2 = slice_projection(input=conv2, slices=[(1, 15)]) + +concat = concat_layer(input=[proj1, proj2]) + +outputs(concat) + diff --git a/paddle/gserver/tests/test_NetworkCompare.cpp b/paddle/gserver/tests/test_NetworkCompare.cpp index 40e662b22b..f930c72fde 100644 --- a/paddle/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/gserver/tests/test_NetworkCompare.cpp @@ -237,6 +237,12 @@ TEST(Compare, concat_table) { compareNetwork(config_file_a, config_file_b); } +TEST(Compare, concat_slice) { + std::string config_file_a = "./gserver/tests/concat_slice_a.conf"; + std::string config_file_b = "./gserver/tests/concat_slice_b.conf"; + compareNetwork(config_file_a, config_file_b); +} + #ifndef PADDLE_ONLY_CPU TEST(Compare, img_pool) { std::string config_file_a = "./gserver/tests/img_pool_a.conf"; From 46988517f2b352919caaa0cf6879f800d42ed8ae Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Tue, 1 Aug 2017 14:08:43 +0800 Subject: [PATCH 51/58] Fix a small bug. --- python/paddle/trainer_config_helpers/layers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 2045233522..965874ddf6 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -558,7 +558,7 @@ def slice_projection(input, slices): :param slices: An array of slice parameters. Each slice contains the start and end offsets based on the input. - :type offset: pair of int + :type slices: pair of int :return: A SliceProjection object :rtype: SliceProjection """ From e2fd2bd0d1edea7b4c06bf93d192bede4f22e3ad Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 1 Aug 2017 15:40:49 +0800 Subject: [PATCH 52/58] Follow comments and merge develop --- paddle/framework/backward.cc | 92 +++++++++++------------- paddle/framework/backward.h | 8 +-- paddle/framework/backward_test.cc | 1 - paddle/operators/fill_zeros_like_op.cc | 18 ++--- paddle/operators/fill_zeros_like_op.h | 4 +- paddle/operators/recurrent_network_op.cc | 11 +-- 6 files changed, 60 insertions(+), 74 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 472a671e47..c8fda8e260 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -31,88 +31,74 @@ static bool AllInSet(const std::vector& names, return true; } -static std::vector InSetIdx( - const std::vector& names, const std::string& suffix, - const std::unordered_set& set) { - std::vector ret_val; - ret_val.reserve(names.size()); - for (size_t i = 0; i < names.size(); ++i) { - if (set.find(names[i] + suffix) != set.end()) { - ret_val.push_back(i); - } - } - return ret_val; -} - -static std::shared_ptr EmptyOp() { +static std::shared_ptr NOP() { auto net_op = std::make_shared(); - net_op->type_ = "@EMPTY_OP@"; + net_op->type_ = "@NOP@"; net_op->CompleteAddOp(); return net_op; } -/** - * @brief Backward an operator, implementation - * @param forwardOp the forward operator - * @param no_grad_names variable names not calculate for gradient. Like X@GRAD - * is not needed. - * @param uniq_id a unique index used inside BackwardImpl, it will be shared - * through recursive invoke. - * @return The backward operator. For simple situation, it is a simple operator. - * For complex situation, it is a NetOp. - * - * See Backward.h for details - */ -static std::shared_ptr BackwardImpl( +// Get backward operator from a forward operator, recursively implementation. +// +// no_grad_names the gradient variable names without gradient calculating. +// +// uniq_id is a unique index used inside recursively calling BackwardRecursive. +// use `uid = uniq_id++;` to get the unique index, and pass `uniq_id` through +// recursive calling. +// +// returns The backward operator. For simple situation, it is a simple +// operator. For complex situation, it is a NetOp. +// +// See Backward.h for details +static std::shared_ptr BackwardRecursive( + const OperatorBase& forwardOp, + std::unordered_set& no_grad_names, size_t& uniq_id); +std::shared_ptr BackwardRecursive( const OperatorBase& forwardOp, std::unordered_set& no_grad_names, size_t& uniq_id) { - /** - * If all input gradients of forwarding operator do not need to calculate, - * just return an EmptyOp. Not return null ptr because EmptyOp does not take - * too much time for calculation, but it is useful for simplifying logic. - */ + // If all input gradients of forwarding operator do not need to calculate, + // just return an NOP. Not return null ptr because NOP does not take + // too much time for calculation, but it is useful for simplifying logic. if (AllInSet(forwardOp.inputs_, OperatorBase::GRAD_VAR_SUFFIX(), no_grad_names)) { - return EmptyOp(); + return NOP(); } - /** - * All output gradients of forwarding operator do not need to calculate. Then - * all input gradients cannot be computed at all, and we put them into - * `no_grad_names` set. Return an EmptyOp. - */ + // All output gradients of forwarding operator do not need to calculate. Then + // all input gradients cannot be computed at all, and we put them into + // `no_grad_names` set. Return an NOP. if (AllInSet(forwardOp.outputs_, OperatorBase::GRAD_VAR_SUFFIX(), no_grad_names)) { for (auto& name : forwardOp.inputs_) { - /// Mark all input is not need + // Mark all input is not need no_grad_names.insert(name + OperatorBase::GRAD_VAR_SUFFIX()); } - return EmptyOp(); + return NOP(); } - //! Returned gradient network + // Returned gradient network auto net = std::make_shared(); if (forwardOp.IsNetOp()) { - /// Because forwardOp is a net op, it can static_cast. + // Because forwardOp is a net op, it can static_cast. auto& forwardNet = static_cast(forwardOp); - //! Map from output gradient variable name to operator's indices in backward - //! net. That operator generates that variable. + // Map from output gradient variable name to operator's indices in backward + // net. That operator generates that variable. std::unordered_map> dup_output_ops; size_t local_op_id = 0; - /// reversely travel forwardNet + // reversely travel forwardNet for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend(); ++it, ++local_op_id) { auto fwd = *it; - auto bwd = BackwardImpl(*fwd, no_grad_names, uniq_id); + auto bwd = BackwardRecursive(*fwd, no_grad_names, uniq_id); net->AddOp(bwd); for (auto& out : bwd->outputs_) { dup_output_ops[out].emplace_back(local_op_id); } } - /// Get unique ID for this method. + // Get unique ID for this method. auto uid = uniq_id++; // TODO(dzh): more comment using Pos = std::pair>; @@ -145,13 +131,15 @@ static std::shared_ptr BackwardImpl( } } else { - //! TODO(fjy) std::shared_ptr grad_op = OpRegistry::CreateGradOp(forwardOp); for (std::string& grad_input : grad_op->inputs_) { if (no_grad_names.count(grad_input)) { std::string prefix = grad_input.substr( 0, grad_input.size() - OperatorBase::GRAD_VAR_SUFFIX().size()); grad_input = prefix + OperatorBase::ZERO_VAR_SUFFIX(); + + // If part of input gradient of that operator is not calculated, fill + // zero variables to that input gradient. net->AddOp(OpRegistry::CreateOp("fill_zeros_like", {prefix}, {grad_input}, {})); } @@ -173,8 +161,8 @@ static std::shared_ptr BackwardImpl( return net; } -//! See header for comments -extern std::shared_ptr Backward( +// See header for comments +std::shared_ptr Backward( const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars) { std::unordered_set no_grad_names; @@ -184,7 +172,7 @@ extern std::shared_ptr Backward( no_grad_names.insert(name + OperatorBase::GRAD_VAR_SUFFIX()); } size_t uid = 0; - return BackwardImpl(forwardOp, no_grad_names, uid); + return BackwardRecursive(forwardOp, no_grad_names, uid); } } // namespace framework } // namespace paddle diff --git a/paddle/framework/backward.h b/paddle/framework/backward.h index d711c7bbb6..c181919dc1 100644 --- a/paddle/framework/backward.h +++ b/paddle/framework/backward.h @@ -18,12 +18,8 @@ namespace paddle { namespace framework { -/** - * @brief - * @param forwardOp - * @param no_grad_vars ignored input name of forward - * @return - */ +// Create the backward operator from a forward operator. +// TODO(yuyang18): Add more API reference comment. extern std::shared_ptr Backward( const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars); diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index ec55661e79..cb14ef9573 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -169,7 +169,6 @@ TEST(Backward, simple_op_grad) { ASSERT_EQ("X" + f::OperatorBase::GRAD_VAR_SUFFIX(), gop->Output("X" + f::OperatorBase::GRAD_VAR_SUFFIX())); - // LOG(INFO) << gop->Output("X" + "@GRAD"); } TEST(Backward, simple_op_not_need_grad) { diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index d641bc4ada..79a0e3d7e9 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -21,15 +21,17 @@ namespace operators { class FillZerosLikeOp : public framework::OperatorWithKernel { protected: - void InferShape( - const std::vector &inputs, - const std::vector &outputs) const override { - PADDLE_ENFORCE(inputs.size() == 1, + void InferShape(const framework::InferShapeContext &ctx) const override { + PADDLE_ENFORCE(ctx.InputSize() == 1UL, "Input size of FillZerosLikeOp must be one."); - PADDLE_ENFORCE(outputs.size() == 1, "Output size of AddOp must be one."); - PADDLE_ENFORCE(inputs[0] != nullptr && outputs[0] != nullptr, - "Outputs of FillZerosLikeOp must all be set."); - outputs[0]->Resize(inputs[0]->dims()); + PADDLE_ENFORCE(ctx.OutputSize() == 1UL, + "Output size of AddOp must be one."); + PADDLE_ENFORCE(ctx.InputVar(0) != nullptr, + "Input of FillZerosLikeOp must be set."); + PADDLE_ENFORCE(ctx.OutputVar(0) != nullptr, + "Output of FillZerosLikeOp must be set."); + ctx.Output(0)->Resize( + ctx.Input(0)->dims()); } }; diff --git a/paddle/operators/fill_zeros_like_op.h b/paddle/operators/fill_zeros_like_op.h index ca44a201f7..05272964ab 100644 --- a/paddle/operators/fill_zeros_like_op.h +++ b/paddle/operators/fill_zeros_like_op.h @@ -23,8 +23,8 @@ namespace operators { template class FillZerosLikeKernel : public framework::OpKernel { public: - void Compute(const framework::KernelContext& context) const override { - auto* output = context.Output(0)->GetMutable(); + void Compute(const framework::ExecutionContext& context) const override { + auto* output = context.Output(0); output->mutable_data(context.GetPlace()); framework::EigenVector::Flatten(*output).setZero(); } diff --git a/paddle/operators/recurrent_network_op.cc b/paddle/operators/recurrent_network_op.cc index 1a101d6ddf..4ad3133184 100644 --- a/paddle/operators/recurrent_network_op.cc +++ b/paddle/operators/recurrent_network_op.cc @@ -312,13 +312,14 @@ public: : OpProtoAndCheckerMaker(proto, op_checker) { const auto& name = RecurrentOp::kArgName; // inputs and outputs stored in proto - AddInputs(name.inlinks, - "the input that need to be segmented for each step."); - AddInputs(name.boot_memories, "variables to initialize memories."); + AddInput(name.inlinks, "the input that need to be segmented for each step.") + .SetMultiple(); + AddInput(name.boot_memories, "variables to initialize memories.") + .SetMultiple(); AddInput(name.step_net, "network shared by all steps."); - AddOutputs(name.outlinks, - "the output that need to concated for all steps."); + AddOutput(name.outlinks, "the output that need to concated for all steps.") + .SetMultiple(); AddOutput(name.step_scopes, "step scopes"); // Attributes stored in AttributeMap From 3b58574ba9fb5d007a0c82d87ea631a18698f169 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 1 Aug 2017 16:18:36 +0800 Subject: [PATCH 53/58] add check in OPeratorContext Input/Output --- paddle/framework/operator.cc | 6 ++++-- paddle/framework/operator.h | 40 +++++++++++++++++++++++++----------- 2 files changed, 32 insertions(+), 14 deletions(-) diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 9bf60b7b11..c08c6bba59 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -52,7 +52,8 @@ std::vector OperatorBase::Inputs(const std::string& name) const { PADDLE_ENFORCE(in_out_idxs_ != nullptr, "IO Idx could not be nullptr"); auto input_format = GetAttr>("input_format"); auto offset = in_out_idxs_->at(name); - PADDLE_ENFORCE(input_format.at((size_t)offset + 1) <= inputs_.size(), + PADDLE_ENFORCE(input_format.at(static_cast(offset) + 1) <= + static_cast(inputs_.size()), "Input Out Of Range"); return std::vector{ @@ -78,7 +79,8 @@ std::vector OperatorBase::Outputs(const std::string& name) const { PADDLE_ENFORCE(in_out_idxs_ != nullptr, "InOut Indice could not be nullptr"); auto output_format = GetAttr>("output_format"); auto offset = in_out_idxs_->at(name); - PADDLE_ENFORCE(output_format.at((size_t)offset + 1) <= outputs_.size(), + PADDLE_ENFORCE(output_format.at(static_cast(offset) + 1) <= + static_cast(outputs_.size()), "Output Out of Range"); return std::vector{ outputs_.begin() + output_format.at(offset), diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index ef1521b83b..ff518265a4 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -108,11 +108,11 @@ class OperatorContext { size_t OutputSize() const { return op_.outputs_.size(); } - const Variable* InputVar(const size_t& index) const { + const Variable* InputVar(const size_t index) const { return scope_->GetVariable(op_.inputs_.at(index)); } - Variable* OutputVar(const size_t& index) const { + Variable* OutputVar(const size_t index) const { return scope_->GetVariable(op_.outputs_.at(index)); } @@ -146,23 +146,31 @@ class OperatorContext { } template - const T* Input(const size_t& index) const { - return &(InputVar(index)->Get()); + const T* Input(const size_t index) const { + auto var = InputVar(index); + PADDLE_ENFORCE(var != nullptr, "Input(%d) should not be nullptr", index); + return &var->Get(); } template - T* Output(const size_t& index) const { - return OutputVar(index)->GetMutable(); + T* Output(const size_t index) const { + auto var = OutputVar(index); + PADDLE_ENFORCE(var != nullptr, "Output(%d) should not be nullptr", index); + return var->GetMutable(); } template const T* Input(const std::string& name) const { - return &(InputVar(name)->Get()); + auto var = InputVar(name); + PADDLE_ENFORCE(var != nullptr, "Input(%s) should not be nullptr", name); + return &var->Get(); } template T* Output(const std::string& name) const { - return OutputVar(name)->GetMutable(); + auto var = OutputVar(name); + PADDLE_ENFORCE(var != nullptr, "Output(%s) should not be nullptr", name); + return var->GetMutable(); } template @@ -171,8 +179,12 @@ class OperatorContext { std::vector res; res.reserve(names.size()); std::transform(names.begin(), names.end(), std::back_inserter(res), - [this](const std::string& name) { - return &scope_->GetVariable(name)->Get(); + [&](const std::string& sub_name) { + auto var = scope_->GetVariable(sub_name); + PADDLE_ENFORCE(var != nullptr, + "MultiInput(%s:%s) should not be nullptr", + name, sub_name); + return &var->Get(); }); return res; } @@ -183,8 +195,12 @@ class OperatorContext { std::vector res; res.reserve(names.size()); std::transform(names.begin(), names.end(), std::back_inserter(res), - [this](const std::string& name) { - return scope_->GetVariable(name)->GetMutable(); + [&](const std::string& sub_name) { + auto var = scope_->GetVariable(sub_name); + PADDLE_ENFORCE(var != nullptr, + "MultiOutput(%s:%s) should not be nullptr", + name, sub_name); + return var->GetMutable(); }); return res; } From 737ea05491fb0f0a95160db294f172d04e1ac925 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 1 Aug 2017 16:57:02 +0800 Subject: [PATCH 54/58] Use static_cast, Fix unittest --- paddle/framework/backward.cc | 2 +- paddle/framework/backward_test.cc | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index c8fda8e260..0da11b91a7 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -120,7 +120,7 @@ std::shared_ptr BackwardRecursive( OpRegistry::CreateOp( "add", {dup_outputs}, {name}, {{"input_format", - std::vector{0, (int)dup_outputs.size()}}})}); + std::vector{0, static_cast(dup_outputs.size())}}})}); } insert_position.sort( diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index cb14ef9573..2e892f12fb 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -181,6 +181,8 @@ TEST(Backward, simple_op_not_need_grad) { auto no_input_gop = f::Backward(*fwd, {"X", "b"}); ASSERT_NE(no_input_gop, nullptr); + ASSERT_TRUE(no_input_gop->IsNetOp()); + ASSERT_EQ(0UL, std::static_pointer_cast(no_input_gop)->ops_.size()); } TEST(Backward, net_fc_backward_normal) { From 8395af06defb6e3820832ea6bcb22cab54644744 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 1 Aug 2017 17:33:07 +0800 Subject: [PATCH 55/58] Add Backtrace for enforce --- paddle/platform/enforce.h | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index fd4adbd9de..26c8eb78e6 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -14,7 +14,9 @@ limitations under the License. */ #pragma once +#include #include +#include #include #include #include @@ -39,12 +41,22 @@ namespace platform { struct EnforceNotMet : public std::exception { std::exception_ptr exp_; std::string err_str_; - EnforceNotMet(std::exception_ptr e, const char* f, int l) : exp_(e) { + static constexpr int TRACE_STACK_LIMIT = 100; try { std::rethrow_exception(exp_); } catch (const std::exception& exp) { - err_str_ = string::Sprintf("%s at [%s:%d]", exp.what(), f, l); + std::ostringstream sout; + sout << string::Sprintf("%s at [%s:%d]", exp.what(), f, l) << std::endl; + sout << "Call Stacks: " << std::endl; + void* call_stack[TRACE_STACK_LIMIT]; + int sz = backtrace(call_stack, TRACE_STACK_LIMIT); + auto line = backtrace_symbols(call_stack, sz); + for (int i = 0; i < sz; ++i) { + sout << line[i] << std::endl; + } + free(line); + err_str_ = sout.str(); } } From 90846f3c9d8db875522442cfadbd10c7f5710b12 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Tue, 1 Aug 2017 19:11:21 +0800 Subject: [PATCH 56/58] Add interface description into api documentation. --- doc/api/v2/config/layer.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/api/v2/config/layer.rst b/doc/api/v2/config/layer.rst index daee55b7f9..ec7f1446cf 100644 --- a/doc/api/v2/config/layer.rst +++ b/doc/api/v2/config/layer.rst @@ -198,6 +198,10 @@ identity_projection .. autoclass:: paddle.v2.layer.identity_projection :noindex: +slice_projection +------------------- +.. autoclass:: paddle.v2.layer.slice_projection + :noindex: table_projection ---------------- From 051d6c86922c89a7c73ca4628ccceb9a1c09fdb9 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 1 Aug 2017 19:41:04 +0800 Subject: [PATCH 57/58] Merge develop --- paddle/framework/backward_test.cc | 4 ++-- paddle/framework/net_op_test.cc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 2e892f12fb..b095c2c3d5 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -23,8 +23,8 @@ namespace framework { class EmptyOp : public OperatorBase { public: - void InferShape(const std::shared_ptr &scope) const override {} - void Run(const std::shared_ptr &scope, + void InferShape(const Scope &scope) const override {} + void Run(const Scope &scope, const platform::DeviceContext &dev_ctx) const override {} }; diff --git a/paddle/framework/net_op_test.cc b/paddle/framework/net_op_test.cc index 5b5972b3b2..f32e456e5d 100644 --- a/paddle/framework/net_op_test.cc +++ b/paddle/framework/net_op_test.cc @@ -22,8 +22,8 @@ class TestOp : public OperatorBase { class EmptyOp : public OperatorBase { public: - void InferShape(const std::shared_ptr& scope) const override {} - void Run(const std::shared_ptr& scope, + void InferShape(const Scope& scope) const override {} + void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override {} }; From ec9d4d527e5f93e963e0d3b3c1c030cad3b0e375 Mon Sep 17 00:00:00 2001 From: Yancey Date: Tue, 1 Aug 2017 20:27:54 +0800 Subject: [PATCH 58/58] Add start_record interface (#3128) * add start_record interface * call master client in reader * update * add demo code in comments * update comments * delete unittest for recordio reader --- go/pserver/client/c/test/test_train.py | 27 ++++------- python/paddle/v2/master/client.py | 3 ++ python/paddle/v2/reader/creator.py | 48 +++++++++++-------- python/paddle/v2/reader/tests/creator_test.py | 9 ---- 4 files changed, 39 insertions(+), 48 deletions(-) diff --git a/go/pserver/client/c/test/test_train.py b/go/pserver/client/c/test/test_train.py index 85cb399590..572a61e4cc 100644 --- a/go/pserver/client/c/test/test_train.py +++ b/go/pserver/client/c/test/test_train.py @@ -3,24 +3,11 @@ import paddle.v2.dataset.uci_housing as uci_housing import paddle.v2.master as master import os import cPickle as pickle +from paddle.v2.reader.creator import cloud_reader etcd_ip = os.getenv("MASTER_IP", "127.0.0.1") -etcd_endpoint = "http://" + etcd_ip + ":2379" -print "connecting to master, etcd endpoints: ", etcd_endpoint -master_client = master.client(etcd_endpoint, 5, 64) - - -def cloud_reader(): - global master_client - master_client.set_dataset( - ["/pfs/dlnel/public/dataset/uci_housing/uci_housing-*"], passes=30) - while 1: - r, e = master_client.next_record() - if not r: - if e != -2: # other errors - print "get record error:", e - break - yield pickle.loads(r) +etcd_endpoints = "http://" + etcd_ip + ":2379" +print "etcd endpoints: ", etcd_endpoints def main(): @@ -49,7 +36,7 @@ def main(): parameters=parameters, update_equation=optimizer, is_local=False, - pserver_spec=etcd_endpoint, + pserver_spec=etcd_endpoints, use_etcd=True) # event_handler to print training and testing info @@ -75,7 +62,11 @@ def main(): trainer.train( reader=paddle.batch( paddle.reader.shuffle( - cloud_reader, buf_size=500), batch_size=2), + cloud_reader( + ["/pfs/dlnel/public/dataset/uci_housing/uci_housing*"], + etcd_endpoints), + buf_size=500), + batch_size=2), feeding={'x': 0, 'y': 1}, event_handler=event_handler, diff --git a/python/paddle/v2/master/client.py b/python/paddle/v2/master/client.py index b658a81630..fc718f031e 100644 --- a/python/paddle/v2/master/client.py +++ b/python/paddle/v2/master/client.py @@ -76,3 +76,6 @@ class client(object): # Memory created from C should be freed. get_c_lib().mem_free(ret.contents) return record, 0 + + def paddle_start_get_records(self, pass_id): + get_c_lib().paddle_start_get_records(self.c, pass_id) diff --git a/python/paddle/v2/reader/creator.py b/python/paddle/v2/reader/creator.py index 55a0fcdf56..d0f18e4b66 100644 --- a/python/paddle/v2/reader/creator.py +++ b/python/paddle/v2/reader/creator.py @@ -16,7 +16,7 @@ Creator package contains some simple reader creator, which could be used in user program. """ -__all__ = ['np_array', 'text_file', "recordio"] +__all__ = ['np_array', 'text_file', "cloud_reader"] def np_array(x): @@ -81,35 +81,41 @@ def recordio_local(paths, buf_size=100): return dec.buffered(reader, buf_size) -def recordio(paths, buf_size=100): +pass_num = 0 + + +def cloud_reader(paths, etcd_endpoints, timeout_sec=5, buf_size=64): """ - Creates a data reader that outputs record one one by one - from given local or cloud recordio path. + Create a data reader that yield a record one by one from + the paths: :path: path of recordio files. + :etcd_endpoints: the endpoints for etcd cluster :returns: data reader of recordio files. + + .. code-block:: python + from paddle.v2.reader.creator import cloud_reader + etcd_endpoints = "http://127.0.0.1:2379" + trainer.train.( + reader=cloud_reader(["/work/dataset/uci_housing/uci_housing*"], etcd_endpoints), + ) """ import os - import paddle.v2.master.client as cloud - - if "KUBERNETES_SERVICE_HOST" not in os.environ.keys(): - return recordio_local(paths) - - host_name = "MASTER_SERVICE_HOST" - if host_name not in os.environ.keys(): - raise Exception('not find ' + host_name + ' in environment variable.') - - addr = os.environ(host) + import cPickle as pickle + import paddle.v2.master as master + c = master.client(etcd_endpoints, timeout_sec, buf_size) + c.set_dataset(paths) def reader(): - c = cloud(addr, buf_size) - c.set_dataset(paths) + global pass_num + c.paddle_start_get_records(pass_num) + pass_num += 1 while True: - r, err = client.next_record() - if err < 0: + r, e = c.next_record() + if not r: + if e != -2: + print "get record error: ", e break - yield r - - c.release() + yield pickle.loads(r) return reader diff --git a/python/paddle/v2/reader/tests/creator_test.py b/python/paddle/v2/reader/tests/creator_test.py index b42d273ecf..359f3eeefb 100644 --- a/python/paddle/v2/reader/tests/creator_test.py +++ b/python/paddle/v2/reader/tests/creator_test.py @@ -34,14 +34,5 @@ class TestTextFile(unittest.TestCase): self.assertEqual(e, str(idx * 2) + " " + str(idx * 2 + 1)) -class TestRecordIO(unittest.TestCase): - def test_recordio(self): - path = os.path.join( - os.path.dirname(__file__), "test_recordio_creator.dat") - reader = paddle.v2.reader.creator.recordio([path]) - for idx, r in enumerate(reader()): - self.assertSequenceEqual(r, str(idx)) - - if __name__ == '__main__': unittest.main()