From 39f14f1dd6fd6810472fd100ad59a1d1cdb661f1 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Wed, 9 Aug 2017 15:24:32 -0700 Subject: [PATCH 01/92] scatter update implemented --- paddle/operators/CMakeLists.txt | 2 + paddle/operators/scatter.h | 92 ++++++++++++++++++++++++++++++++ paddle/operators/scatter_test.cc | 52 ++++++++++++++++++ 3 files changed, 146 insertions(+) create mode 100644 paddle/operators/scatter.h create mode 100644 paddle/operators/scatter_test.cc diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index e018a112a4..7ba9384fa8 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -43,6 +43,8 @@ endfunction() cc_test(gather_test SRCS gather_test.cc DEPS tensor) +cc_test(scatter_test SRCS scatter_test.cc DEPS tensor) + cc_library(net_op SRCS net_op.cc DEPS op_registry) cc_test(net_op_test SRCS net_op_test.cc DEPS net_op) diff --git a/paddle/operators/scatter.h b/paddle/operators/scatter.h new file mode 100644 index 0000000000..714c022c02 --- /dev/null +++ b/paddle/operators/scatter.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include + +#include "paddle/framework/ddim.h" +#include "paddle/framework/eigen.h" +#include "paddle/framework/tensor.h" +#include "paddle/platform/place.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; + +// Implementation of CPU copy +template +void CPUScatterUpdate(const paddle::framework::Tensor* src, const int* index, + const size_t index_size, + paddle::framework::Tensor* output) { + paddle::framework::DDim output_dims = output->dims(); + + for (size_t i = 0; i < index_size; ++i) { + int index_ = index[i]; + + paddle::framework::Tensor src_ = *src; + paddle::framework::Tensor output_ = *output; + if (index_size > 1) src_ = src->Slice(i, i + 1); + if (output_dims[0] > 1) output_ = output->Slice(index_, index_ + 1); + + auto X = EigenVector::Flatten(src_); + auto Y = EigenVector::Flatten(output_); + + Y = X + Y; + } +} + +// Implementation of GPU scatter: +template +void GPUScatterUpdate(const T* src, const int* index, const int slice_size, + const int index_size, T* output); + +/** + * Return a updated tensor from source tensor, scattered according to index: + * dst[i] += src[index[i]] + * input[src]: type-T source Tensor + * input[index]: type-int index Tensor (1-D) + * return: output tensor + */ +template +void ScatterUpdate(const platform::Place& place, + const paddle::framework::Tensor* src, + const paddle::framework::Tensor* index, + paddle::framework::Tensor* output) { + // check index of shape 1-D + PADDLE_ENFORCE(index->dims().size() == 1); + int index_size = index->dims()[0]; + + auto src_dims = src->dims(); + auto dst_dims = output->dims(); + + // check src shape and dst shape should match + for (size_t i = 1; i < src_dims.size(); i++) + PADDLE_ENFORCE(src_dims[i] == dst_dims[i]); + + // slice size + size_t slice_size = 1; + for (size_t i = 0; i < src_dims.size(); ++i) slice_size *= src_dims[i]; + + if (platform::is_cpu_place(place)) { + CPUScatterUpdate(src, index->data(), index_size, output); + } else { + } +} + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/scatter_test.cc b/paddle/operators/scatter_test.cc new file mode 100644 index 0000000000..4449ce6564 --- /dev/null +++ b/paddle/operators/scatter_test.cc @@ -0,0 +1,52 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/scatter.h" +#include "paddle/framework/ddim.h" +#include "paddle/framework/tensor.h" +#include "paddle/platform/place.h" + +#include +#include +#include + +TEST(scatter, ScatterUpdate) { + using namespace paddle::framework; + using namespace paddle::platform; + using namespace paddle::operators; + + Tensor* src = new Tensor(); + Tensor* index = new Tensor(); + Tensor* output = new Tensor(); + + float* p_src = nullptr; + int* p_index = nullptr; + p_src = src->mutable_data(make_ddim({1, 4}), CPUPlace()); + p_index = index->mutable_data(make_ddim({1}), CPUPlace()); + + for (size_t i = 0; i < 4; ++i) p_src[i] = float(i); + p_index[0] = 1; + + float* p_output = output->mutable_data(make_ddim({4, 4}), CPUPlace()); + + ScatterUpdate(CPUPlace(), src, index, output); + + for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], float(0)); + for (size_t i = 0; i < 4; ++i) EXPECT_EQ(output->data()[i], float(0)); + for (size_t i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], float(i - 4)); + for (size_t i = 4; i < 8; ++i) + EXPECT_EQ(output->data()[i], float(i - 4)); + for (size_t i = 8; i < 16; ++i) EXPECT_EQ(p_output[i], float(0)); + for (size_t i = 8; i < 16; ++i) EXPECT_EQ(output->data()[i], float(0)); +} From 03d0040c591dc5e682a686fb3ec89ae2c003b240 Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Thu, 10 Aug 2017 16:32:23 -0700 Subject: [PATCH 02/92] gather warning fixed --- paddle/operators/gather.h | 4 ++-- paddle/operators/gather_test.cc | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/paddle/operators/gather.h b/paddle/operators/gather.h index 0c73717d38..d6e6990394 100644 --- a/paddle/operators/gather.h +++ b/paddle/operators/gather.h @@ -29,7 +29,7 @@ void CPUGather(const T* params, const int* indices, const int slice_size, const int index_size, T* output) { const size_t slice_bytes = slice_size * sizeof(T); - for (size_t i = 0; i < index_size; ++i) { + for (int i = 0; i < index_size; ++i) { int index_ = indices[i]; memcpy(output + i * slice_size, params + index_ * slice_size, slice_bytes); } @@ -60,7 +60,7 @@ void Gather(const platform::Place& place, const paddle::framework::Tensor* src, // slice size int slice_size = 1; - for (size_t i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; + for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; // Gathering if (platform::is_cpu_place(place)) { diff --git a/paddle/operators/gather_test.cc b/paddle/operators/gather_test.cc index 5de748ec46..d24d83f299 100644 --- a/paddle/operators/gather_test.cc +++ b/paddle/operators/gather_test.cc @@ -35,7 +35,7 @@ TEST(Gather, GatherData) { p_src = src->mutable_data(make_ddim({3, 4}), CPUPlace()); p_index = index->mutable_data(make_ddim({2}), CPUPlace()); - for (size_t i = 0; i < 12; ++i) p_src[i] = i; + for (int i = 0; i < 12; ++i) p_src[i] = i; p_index[0] = 1; p_index[1] = 0; @@ -43,6 +43,6 @@ TEST(Gather, GatherData) { Gather(CPUPlace(), src, index, output); - for (size_t i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4); - for (size_t i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4); + for (int i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4); + for (int i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4); } From fb61512c5c9c72b8678757520eb9d283e9e24845 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 11 Aug 2017 14:31:32 +0800 Subject: [PATCH 03/92] add for test, revert me when function done --- paddle/operators/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index b3399aaf0f..02e4dda942 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -50,7 +50,7 @@ op_library(add_op SRCS add_op.cc add_op.cu) op_library(mean_op SRCS mean_op.cc mean_op.cu) -op_library(mul_op SRCS mul_op.cc mul_op.cu) +op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS cblas) op_library(rowwise_add_op SRCS rowwise_add_op.cu rowwise_add_op.cc) op_library(sigmoid_op SRCS sigmoid_op.cc sigmoid_op.cu) From ab08575adf4c13a874aeb7cb1ad7b3904ba45f82 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 11 Aug 2017 11:03:26 -0700 Subject: [PATCH 04/92] WIP --- paddle/framework/grad_op_builder.cc | 12 +- paddle/framework/op_registry.h | 178 +++++++++++----------------- paddle/framework/pybind.cc | 10 +- 3 files changed, 85 insertions(+), 115 deletions(-) diff --git a/paddle/framework/grad_op_builder.cc b/paddle/framework/grad_op_builder.cc index 6d032fb78f..ff8a5583af 100644 --- a/paddle/framework/grad_op_builder.cc +++ b/paddle/framework/grad_op_builder.cc @@ -76,8 +76,16 @@ static void TransOpArg(const OperatorBase* src_op, OperatorBase* dst_op, } OperatorBase* BuildGradOp(const OperatorBase* op) { - std::string grad_op_type = OpRegistry::grad_ops().at(op->type_); - OperatorBase* grad_op = OpRegistry::op_creators().at(grad_op_type)(); + auto it = op_info_map().find(op->type_); + PADDLE_ENFORCE(it != OpRegistry::op_info_map().end(), + "'%s' has not been registered.", op->type); + std::string grad_op_type = it->second.grad_op_type_; + PADDLE_ENFORCE(!grad_op_type.empty(), "'%s' has no gradient operator.", + op->type); + it = op_info_map().find(grad_op_type); + PADDLE_ENFORCE(it != OpRegistry::op_info_map().end(), + "'%s' has not been registered.", grad_op_type); + OperatorBase* grad_op = it->second.creator_(); grad_op->type_ = grad_op_type; grad_op->attrs_ = op->attrs_; grad_op->attrs_.erase("input_format"); diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 84bf325fed..b88559f82b 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include +#include #include #include #include "paddle/framework/attribute.h" @@ -174,6 +175,15 @@ Add a mark to which output is temporary is helpful for future optimization. bool has_temporary_output_{false}; }; +class NOPMaker : public OpProtoAndCheckerMaker {}; + +struct OpInfo { + std::function creator_; + std::string grad_op_type_; + OpProto* proto_; + OpAttrChecker* checker_; +}; + class OpRegistry { using OpCreator = std::function; using VarIndexMap = std::unordered_map; @@ -181,52 +191,55 @@ class OpRegistry { public: template - static void RegisterOp(const std::string& op_type) { - op_creators()[op_type] = [] { return new OpType; }; - OpAttrChecker& op_checker = op_checkers()[op_type]; - OpProto& op_proto = protos()[op_type]; - auto maker = ProtoMakerType(&op_proto, &op_checker); - maker.Validate(); - *op_proto.mutable_type() = op_type; - PADDLE_ENFORCE( - op_proto.IsInitialized(), - "Fail to initialize %s's OpProto, because %s is not initialized", - op_type, op_proto.InitializationErrorString()); - - VarIndexMaps()[op_type].reset(new VarIndexMap()); - auto& varmap = *VarIndexMaps()[op_type]; - int idx = 0; - for (auto& var : op_proto.inputs()) { - varmap[var.name()] = idx++; - } - idx = 0; - for (auto& var : op_proto.outputs()) { - varmap[var.name()] = idx++; + static void RegisterOp(const std::string& op_type, + const std::string& grad_op_type) { + PADDLE_ENFORCE(op_info_map().count(op_type) == 0, + "'%s' is registered more than once.", op_type); + OpInfo op_info; + op_info.creator_ = [] { return new OpType; }; + op_info.grad_op_type_ = grad_op_type; + if (std::type_index(typeid(ProtoMakerType)) != + std::type_index(typeid(NOPMaker))) { + op_info.proto_ = new OpProto; + op_info.op_checker_ = new OpAttrChecker; + auto maker = ProtoMakerType(op_info.proto_, op_info.op_checker_); + maker.Validate(); + *op_info.proto_->mutable_type() = op_type; + PADDLE_ENFORCE( + op_info.proto_->IsInitialized(), + "Fail to initialize %s's OpProto, because %s is not initialized", + op_type, op_info.proto_->InitializationErrorString()); + //======will be refactored in following PRs============// + VarIndexMaps()[op_type].reset(new VarIndexMap()); + auto& varmap = *VarIndexMaps()[op_type]; + int idx = 0; + for (auto& var : op_proto.inputs()) { + varmap[var.name()] = idx++; + } + idx = 0; + for (auto& var : op_proto.outputs()) { + varmap[var.name()] = idx++; + } + //================================================// } - } - - template - static void RegisterGradOp(const std::string& op_type, - const std::string& grad_op_type) { - op_creators()[grad_op_type] = [] { return new GradOpType; }; - grad_ops()[op_type] = grad_op_type; + op_info_map.insert(std::make_pair(op_type, op_info)); } static std::shared_ptr CreateOp(const std::string& type, const VarNameList& inputs, const VarNameList& outputs, const AttributeMap& attrs) { - auto op_create_it = op_creators().find(type); - PADDLE_ENFORCE(op_create_it != op_creators().end(), - "Operator %s cannot be found.", type); + auto it = op_info_map().find(type); + PADDLE_ENFORCE(it != op_info_map().end(), "'%s' has not been registered.", + type); - auto op = op_create_it->second(); + auto op = it->second.creator_(); op->type_ = type; op->inputs_ = inputs; op->outputs_ = outputs; op->attrs_ = attrs; - op_checkers().at(type).Check(op->attrs_); + it->second.checker_->Check(op->attrs_); GenerateTempVariableName(op); @@ -268,14 +281,9 @@ class OpRegistry { return grad_op; } - static std::unordered_map& protos() { - static std::unordered_map protos_; - return protos_; - } - - static std::unordered_map& grad_ops() { - static std::unordered_map grad_ops_; - return grad_ops_; + static std::unordered_map& op_info_map() { + static std::unordered_map op_info_map_; + return op_info_map_; } static std::unordered_map>& @@ -284,17 +292,7 @@ class OpRegistry { return maps_; } - static std::unordered_map& op_creators() { - static std::unordered_map op_creators_; - return op_creators_; - } - private: - static std::unordered_map& op_checkers() { - static std::unordered_map op_checkers_; - return op_checkers_; - } - static void GenerateTempVariableName(OperatorBase* op) { static std::atomic gUniqId(0UL); for (auto& outname : op->outputs_) { @@ -323,16 +321,9 @@ class Registrar { template class OpRegistrar : public Registrar { public: - explicit OpRegistrar(const char* op_type) { - OpRegistry::RegisterOp(op_type); - } -}; - -template -class GradOpRegistrar : public Registrar { - public: - GradOpRegistrar(const char* op_type, const char* grad_op_type) { - OpRegistry::RegisterGradOp(op_type, grad_op_type); + OpRegistrar(const char* op_type) { OpRegistrar(op_type, ""); } + OpRegistrar(const char* op_type, const char* grad_op_type) { + OpRegistry::RegisterOp(op_type, grad_op_type); } }; @@ -358,30 +349,21 @@ class OpKernelRegistrar : public Registrar { /** * Macro to register Operator. */ -#define REGISTER_OP(op_type, op_class, op_maker_class) \ +#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type) \ STATIC_ASSERT_GLOBAL_NAMESPACE( \ __reg_op__##op_type, "REGISTER_OP must be called in global namespace"); \ static ::paddle::framework::OpRegistrar \ - __op_registrar_##op_type##__(#op_type); \ + __op_registrar_##op_type##__(#op_type, #grad_op_type); \ int TouchOpRegistrar_##op_type() { \ __op_registrar_##op_type##__.Touch(); \ return 0; \ } -/** - * Macro to register Gradient Operator. - */ -#define REGISTER_GRADIENT_OP(op_type, grad_op_type, grad_op_class) \ - STATIC_ASSERT_GLOBAL_NAMESPACE( \ - __reg_gradient_op__##op_type##_##grad_op_type, \ - "REGISTER_GRADIENT_OP must be called in global namespace"); \ - static ::paddle::framework::GradOpRegistrar \ - __op_gradient_registrar_##op_type##_##grad_op_type##__(#op_type, \ - #grad_op_type); \ - int TouchOpGradientRegistrar_##op_type() { \ - __op_gradient_registrar_##op_type##_##grad_op_type##__.Touch(); \ - return 0; \ - } +#define REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) \ + REGISTER_OP(op_type, op_class, op_maker_class, ) + +#define REGISTER_GRADIENT_OP(op_type, op_class) \ + REGISTER_OP(op_type, op_class, ::paddle::framework::NOPMaker, ) /** * Macro to register OperatorKernel. @@ -400,10 +382,12 @@ class OpKernelRegistrar : public Registrar { /** * Macro to Forbid user register Gradient Operator. */ +/* #define NO_GRADIENT(op_type) \ - STATIC_ASSERT_GLOBAL_NAMESPACE( \ - __reg_gradient_op__##op_type##_##op_type##_grad, \ - "NO_GRADIENT must be called in global namespace") + STATIC_ASSERT_GLOBAL_NAMESPACE( \ + __reg_gradient_op__##op_type##_##op_type##_grad, \ + "NO_GRADIENT must be called in global namespace") +*/ #define REGISTER_OP_GPU_KERNEL(op_type, ...) \ REGISTER_OP_KERNEL(op_type, GPU, ::paddle::platform::GPUPlace, __VA_ARGS__) @@ -423,23 +407,6 @@ class OpKernelRegistrar : public Registrar { static int use_op_itself_##op_type##_ __attribute__((unused)) = \ TouchOpRegistrar_##op_type() -// TODO(fengjiayi): Most ops' gradient op have not been compeleted. So we use -// `NO_GRAD` to disable micro USE_OP_GRADIENT(op_type). Otherwise the code can't -// be compiled. `NO_GRAD` should be removed after all gradient ops are -// compeleted. -#define NO_GRAD -#ifndef NO_GRAD -#define USE_OP_GRADIENT(op_type) \ - STATIC_ASSERT_GLOBAL_NAMESPACE( \ - __use_op_gradient_##op_type, \ - "USE_OP_GRADIENT must be called in global namespace"); \ - extern int TouchOpGradientRegistrar_##op_type(); \ - static int use_op_gradient_##op_type##_ __attribute__((unused)) = \ - TouchOpGradientRegistrar_##op_type() -#else -#define USE_OP_GRADIENT(op_type) -#endif - #define USE_OP_DEVICE_KERNEL(op_type, DEVICE_TYPE) \ STATIC_ASSERT_GLOBAL_NAMESPACE( \ __use_op_kernel_##op_type##_##DEVICE_TYPE##__, \ @@ -459,18 +426,13 @@ class OpKernelRegistrar : public Registrar { USE_OP_DEVICE_KERNEL(op_type, GPU) #endif -#define USE_NO_GRAD_OP(op_type) \ - USE_OP_ITSELF(op_type); \ - USE_OP_KERNEL(op_type) +#define USE_CPU_ONLY_OP(op_type) \ + USE_OP_ITSELF(op_type); \ + USE_OP_DEVICE_KERNEL(op_type, CPU); -#define USE_CPU_OP(op_type) \ - USE_OP_ITSELF(op_type); \ - USE_OP_DEVICE_KERNEL(op_type, CPU); \ - USE_OP_GRADIENT(op_type) - -#define USE_OP(op_type) \ - USE_NO_GRAD_OP(op_type); \ - USE_OP_GRADIENT(op_type) +#define USE_OP(op_type) \ + USE_OP_ITSELF(op_type); \ + USE_OP_KERNEL(op_type) } // namespace framework } // namespace paddle diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index c18d38d2f9..412b416266 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -173,13 +173,13 @@ All parameter, weight, gradient are variables in Paddle. //! @note: Be careful! PyBind will return std::string as an unicode, not //! Python str. If you want a str object, you should cast them in Python. m.def("get_all_op_protos", []() -> std::vector { - auto &protos = OpRegistry::protos(); + auto &op_info_map = OpRegistry::op_info_map(); std::vector ret_values; - for (auto it = protos.begin(); it != protos.end(); ++it) { - PADDLE_ENFORCE(it->second.IsInitialized(), - "OpProto must all be initialized"); + for (auto it = op_info_map.begin(); it != op_info_map.end(); ++it) { + const OpProto *proto = it->second.proto_; + PADDLE_ENFORCE(proto->IsInitialized(), "OpProto must all be initialized"); std::string str; - PADDLE_ENFORCE(it->second.SerializeToString(&str), + PADDLE_ENFORCE(proto->SerializeToString(&str), "Serialize OpProto Error. This could be a bug of Paddle."); ret_values.push_back(py::bytes(str)); } From 9430bc3207953aaade0417e667300886034db65d Mon Sep 17 00:00:00 2001 From: zchen0211 Date: Fri, 11 Aug 2017 11:57:46 -0700 Subject: [PATCH 05/92] fix all bugs --- paddle/operators/scatter.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/scatter.h b/paddle/operators/scatter.h index 714c022c02..6b542675c2 100644 --- a/paddle/operators/scatter.h +++ b/paddle/operators/scatter.h @@ -75,12 +75,12 @@ void ScatterUpdate(const platform::Place& place, auto dst_dims = output->dims(); // check src shape and dst shape should match - for (size_t i = 1; i < src_dims.size(); i++) + for (int i = 1; i < src_dims.size(); i++) PADDLE_ENFORCE(src_dims[i] == dst_dims[i]); // slice size size_t slice_size = 1; - for (size_t i = 0; i < src_dims.size(); ++i) slice_size *= src_dims[i]; + for (int i = 0; i < src_dims.size(); ++i) slice_size *= src_dims[i]; if (platform::is_cpu_place(place)) { CPUScatterUpdate(src, index->data(), index_size, output); From f784741d4aad7d57417fc60d9f956320c4779a9f Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 11 Aug 2017 11:59:22 -0700 Subject: [PATCH 06/92] Refine macro --- paddle/framework/backward_test.cc | 27 ++++++++++++------------ paddle/framework/grad_op_builder_test.cc | 8 +++---- paddle/framework/op_registry_test.cc | 8 +++---- paddle/framework/operator_test.cc | 10 +++++---- paddle/framework/pybind.cc | 4 ++-- paddle/operators/add_op.cc | 4 ++-- paddle/operators/cross_entropy_op.cc | 4 ++-- paddle/operators/fc_op.cc | 3 ++- paddle/operators/fill_zeros_like_op.cc | 3 ++- paddle/operators/gaussian_random_op.cc | 3 ++- paddle/operators/mean_op.cc | 4 ++-- paddle/operators/mul_op.cc | 4 ++-- paddle/operators/recurrent_op.cc | 5 +++-- paddle/operators/rowwise_add_op.cc | 3 ++- paddle/operators/sgd_op.cc | 2 +- paddle/operators/sigmoid_op.cc | 4 ++-- paddle/operators/softmax_op.cc | 4 ++-- paddle/operators/uniform_random_op.cc | 4 ++-- 18 files changed, 56 insertions(+), 48 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 1677a3ed4c..38194b716d 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -150,19 +150,20 @@ class AddOpMaker : public OpProtoAndCheckerMaker { namespace f = paddle::framework; namespace ops = paddle::operators; using EnforceNotMet = paddle::platform::EnforceNotMet; -REGISTER_OP(rowwise_add, f::EmptyOp, f::RowWiseAddOpMaker); -REGISTER_GRADIENT_OP(rowwise_add, rowwise_add_grad, f::EmptyOp); -REGISTER_OP(mul, f::EmptyOp, f::MulOpMaker); -REGISTER_GRADIENT_OP(mul, mul_grad, f::EmptyOp); -REGISTER_OP(sigmoid, f::EmptyOp, f::SigmoidOpMaker); -REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, f::EmptyOp); -REGISTER_OP(nograd, f::EmptyOp, f::NoGradOpMaker); -REGISTER_OP(fill_zeros_like, f::EmptyOp, f::FillZeroOpMaker); -REGISTER_OP(add, f::EmptyOp, f::AddOpMaker); -REGISTER_GRADIENT_OP(add, add_grad, f::EmptyOp); -REGISTER_OP(fc, f::FcOp, f::FcOpMaker); -REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker); -REGISTER_GRADIENT_OP(many_output_op, many_output_op_grad, f::EmptyOp); +REGISTER_OP(rowwise_add, f::EmptyOp, f::RowWiseAddOpMaker, rowwise_add_grad); +REGISTER_GRADIENT_OP(rowwise_add_grad, f::EmptyOp); +REGISTER_OP(mul, f::EmptyOp, f::MulOpMaker, mul_grad); +REGISTER_GRADIENT_OP(mul_grad, f::EmptyOp); +REGISTER_OP(sigmoid, f::EmptyOp, f::SigmoidOpMaker, sigmoid_grad); +REGISTER_GRADIENT_OP(sigmoid_grad, f::EmptyOp); +REGISTER_OP_WITHOUT_GRADIENT(nograd, f::EmptyOp, f::NoGradOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, f::EmptyOp, f::FillZeroOpMaker); +REGISTER_OP(add, f::EmptyOp, f::AddOpMaker, add_grad); +REGISTER_GRADIENT_OP(add_grad, f::EmptyOp); +REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); +REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker, + many_output_op_grad); +REGISTER_GRADIENT_OP(many_output_op_grad, f::EmptyOp); TEST(Backward, simple_op_grad) { auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index f1ebbae52f..ad61b482e0 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -61,10 +61,10 @@ TEST(GradOpBuilder, AddTwo) { EXPECT_EQ(grad_add_op->Output("Y@GRAD"), "y@GRAD"); } -REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker); -REGISTER_GRADIENT_OP(mult_io, mult_io_grad, f::NOP); -REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker); -REGISTER_GRADIENT_OP(io_ignored, io_ignored_grad, f::NOP); +REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad); +REGISTER_GRADIENT_OP(mult_io_grad, f::NOP); +REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker, io_ignored_grad); +REGISTER_GRADIENT_OP(io_ignored_grad, f::NOP); TEST(GradOpBuilder, MutiInOut) { f::AttributeMap attrs{{"input_format", std::vector{0, 1, 4, 5}}, diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index 9894928a7a..6f21ffc8a4 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -49,10 +49,10 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { } // namespace framework } // namespace paddle -REGISTER_OP(cos_sim, paddle::framework::CosineOp, - paddle::framework::CosineOpProtoAndCheckerMaker); -REGISTER_OP(my_test_op, paddle::framework::MyTestOp, - paddle::framework::MyTestOpProtoAndCheckerMaker); +REGISTER_OP_WITHOUT_GRADIENT(cos_sim, paddle::framework::CosineOp, + paddle::framework::CosineOpProtoAndCheckerMaker); +REGISTER_OP_WITHOUT_GRADIENT(my_test_op, paddle::framework::MyTestOp, + paddle::framework::MyTestOpProtoAndCheckerMaker); TEST(OpRegistry, CreateOp) { paddle::framework::OpDesc op_desc; diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index 387aada749..b1976a6514 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -54,8 +54,9 @@ class OpeWithoutKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { } // namespace framework } // namespace paddle -REGISTER_OP(test_operator, paddle::framework::OpWithoutKernelTest, - paddle::framework::OpeWithoutKernelTestProtoAndCheckerMaker); +REGISTER_OP_WITHOUT_GRADIENT( + test_operator, paddle::framework::OpWithoutKernelTest, + paddle::framework::OpeWithoutKernelTestProtoAndCheckerMaker); TEST(OperatorBase, all) { paddle::framework::OpDesc op_desc; @@ -212,8 +213,9 @@ TEST(OpKernel, all) { ASSERT_EQ(paddle::framework::cpu_kernel_run_num, 1); } -REGISTER_OP(op_multi_inputs_with_kernel, paddle::framework::OpWithKernelTest, - paddle::framework::OpKernelTestMultiInputsProtoAndCheckerMaker); +REGISTER_OP_WITHOUT_GRADIENT( + op_multi_inputs_with_kernel, paddle::framework::OpWithKernelTest, + paddle::framework::OpKernelTestMultiInputsProtoAndCheckerMaker); REGISTER_OP_CPU_KERNEL(op_multi_inputs_with_kernel, paddle::framework::CPUKernalMultiInputsTest); diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index 412b416266..0416793d3a 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -30,9 +30,9 @@ limitations under the License. */ namespace py = pybind11; USE_OP(add_two); -USE_CPU_OP(onehot_cross_entropy); +USE_CPU_ONLY_OP(onehot_cross_entropy); USE_OP_ITSELF(fc); -USE_NO_GRAD_OP(sgd); +USE_OP(sgd); USE_OP(mul); USE_OP(mean); USE_OP(sigmoid); diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc index 086245ef62..e8e26cbe9b 100644 --- a/paddle/operators/add_op.cc +++ b/paddle/operators/add_op.cc @@ -55,8 +55,8 @@ class AddOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker); -REGISTER_GRADIENT_OP(add_two, add_two_grad, ops::AddOpGrad); +REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker, add_two_grad); +REGISTER_GRADIENT_OP(add_two_grad, ops::AddOpGrad); REGISTER_OP_CPU_KERNEL(add_two, ops::AddKernel); diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index c813d54e17..7d0e74e5e4 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -69,11 +69,11 @@ OnehotCrossEntropy Operator. namespace ops = paddle::operators; REGISTER_OP(onehot_cross_entropy, ops::OnehotCrossEntropyOp, - ops::OnehotCrossEntropyOpMaker); + ops::OnehotCrossEntropyOpMaker, onehot_cross_entropy_grad); REGISTER_OP_CPU_KERNEL( onehot_cross_entropy, ops::OnehotCrossEntropyOpKernel); -REGISTER_GRADIENT_OP(onehot_cross_entropy, onehot_cross_entropy_grad, +REGISTER_GRADIENT_OP(onehot_cross_entropy_grad, ops::OnehotCrossEntropyGradientOp); REGISTER_OP_CPU_KERNEL( onehot_cross_entropy_grad, diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index 01a1a81206..9d32f327bf 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -73,4 +73,5 @@ USE_OP(sigmoid); USE_OP(softmax); namespace ops = paddle::operators; -REGISTER_OP(fc, ops::FullyConnectedOp, ops::FullyConnectedOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(fc, ops::FullyConnectedOp, + ops::FullyConnectedOpMaker); diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index 3759a88678..d6fd368b07 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -51,7 +51,8 @@ The output will have the same size with input. } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(fill_zeros_like, ops::FillZerosLikeOp, ops::FillZerosLikeOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, ops::FillZerosLikeOp, + ops::FillZerosLikeOpMaker); REGISTER_OP_CPU_KERNEL( fill_zeros_like, ops::FillZerosLikeKernel); diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index ef417ae2f0..0bbbeaa08a 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -78,5 +78,6 @@ Use to initialize tensor with gaussian random generator. } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(gaussian_random, ops::GaussianRandomOp, ops::GaussianRandomOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(gaussian_random, ops::GaussianRandomOp, + ops::GaussianRandomOpMaker); REGISTER_OP_CPU_KERNEL(gaussian_random, ops::GaussianRandomKernel); diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 2ea049cb36..15e0708c46 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -50,9 +50,9 @@ class MeanGradOp : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(mean, ops::MeanOp, ops::MeanOpMaker); +REGISTER_OP(mean, ops::MeanOp, ops::MeanOpMaker, mean_grad); REGISTER_OP_CPU_KERNEL(mean, ops::MeanKernel); -REGISTER_GRADIENT_OP(mean, mean_grad, ops::MeanGradOp); +REGISTER_GRADIENT_OP(mean_grad, ops::MeanGradOp); REGISTER_OP_CPU_KERNEL(mean_grad, ops::MeanGradKernel); diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index db81fd555d..60550a2742 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -65,7 +65,7 @@ class MulOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker); -REGISTER_GRADIENT_OP(mul, mul_grad, ops::MulOpGrad); +REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad); +REGISTER_GRADIENT_OP(mul_grad, ops::MulOpGrad); REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 2438374205..91be1ce519 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -235,5 +235,6 @@ void RecurrentGradientOp::Init() { } // namespace operators } // namespace paddle -REGISTER_OP(recurrent_op, paddle::operators::RecurrentOp, - paddle::operators::RecurrentAlgorithmProtoAndCheckerMaker); +REGISTER_OP_WITHOUT_GRADIENT( + recurrent_op, paddle::operators::RecurrentOp, + paddle::operators::RecurrentAlgorithmProtoAndCheckerMaker); diff --git a/paddle/operators/rowwise_add_op.cc b/paddle/operators/rowwise_add_op.cc index 55ed1c2f4c..262a4127ef 100644 --- a/paddle/operators/rowwise_add_op.cc +++ b/paddle/operators/rowwise_add_op.cc @@ -53,6 +53,7 @@ for i in xrange(X.shape[0]): } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(rowwise_add, ops::RowWiseAddOp, ops::RowWiseAddOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(rowwise_add, ops::RowWiseAddOp, + ops::RowWiseAddOpMaker); REGISTER_OP_CPU_KERNEL( rowwise_add, ops::RowWiseAddKernel); diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index f9a28ff8a6..94d0fe0466 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -52,6 +52,6 @@ param_out = param - learning_rate * grad; } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(sgd, ops::SGDOp, ops::SGDOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(sgd, ops::SGDOp, ops::SGDOpMaker); REGISTER_OP_CPU_KERNEL(sgd, ops::SGDOpKernel); diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc index bc5e0bbb18..fb27ffbfa1 100644 --- a/paddle/operators/sigmoid_op.cc +++ b/paddle/operators/sigmoid_op.cc @@ -48,8 +48,8 @@ class SigmoidOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker); -REGISTER_GRADIENT_OP(sigmoid, sigmoid_grad, ops::SigmoidOpGrad); +REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker, sigmoid_grad); +REGISTER_GRADIENT_OP(sigmoid_grad, ops::SigmoidOpGrad); REGISTER_OP_CPU_KERNEL(sigmoid, ops::SigmoidKernel); diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index 3dd4e86918..abc21337c5 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -64,9 +64,9 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; -REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker); +REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker, softmax_grad); REGISTER_OP_CPU_KERNEL(softmax, ops::SoftmaxKernel); -REGISTER_GRADIENT_OP(softmax, softmax_grad, ops::SoftmaxOpGrad); +REGISTER_GRADIENT_OP(softmax_grad, ops::SoftmaxOpGrad); REGISTER_OP_CPU_KERNEL( softmax_grad, ops::SoftmaxGradKernel); diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 405b84b76d..37ec7fe427 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -78,7 +78,7 @@ Used to initialize tensor with uniform random generator. } // namespace operators } // namespace paddle -REGISTER_OP(uniform_random, paddle::operators::UniformRandomOp, - paddle::operators::UniformRandomOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(uniform_random, paddle::operators::UniformRandomOp, + paddle::operators::UniformRandomOpMaker); REGISTER_OP_CPU_KERNEL(uniform_random, paddle::operators::CPUUniformRandomKernel); From 6768b31037161fa8a9979bd2b4294adbf11966c2 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 11 Aug 2017 13:43:31 -0700 Subject: [PATCH 07/92] Fix compile error --- paddle/framework/grad_op_builder.cc | 10 +++++----- paddle/framework/op_registry.h | 29 ++++++++++++++++------------- paddle/framework/operator_test.cc | 5 +++-- 3 files changed, 24 insertions(+), 20 deletions(-) diff --git a/paddle/framework/grad_op_builder.cc b/paddle/framework/grad_op_builder.cc index ff8a5583af..f534b2c336 100644 --- a/paddle/framework/grad_op_builder.cc +++ b/paddle/framework/grad_op_builder.cc @@ -50,7 +50,7 @@ static void TransOpArg(const OperatorBase* src_op, OperatorBase* dst_op, std::vector& dst_inout = dst_type == OpArgType::IN ? dst_op->inputs_ : dst_op->outputs_; std::vector* dst_format = GetOpFormat(dst_op, dst_type); - const OpProto& proto = OpRegistry::protos().at(src_op->type_); + const OpProto& proto = *(OpRegistry::op_info_map().at(src_op->type_).proto_); const auto& src_arg_list = src_type == OpArgType::IN ? proto.inputs() : proto.outputs(); @@ -76,13 +76,13 @@ static void TransOpArg(const OperatorBase* src_op, OperatorBase* dst_op, } OperatorBase* BuildGradOp(const OperatorBase* op) { - auto it = op_info_map().find(op->type_); + auto it = OpRegistry::op_info_map().find(op->type_); PADDLE_ENFORCE(it != OpRegistry::op_info_map().end(), - "'%s' has not been registered.", op->type); + "'%s' has not been registered.", op->type_); std::string grad_op_type = it->second.grad_op_type_; PADDLE_ENFORCE(!grad_op_type.empty(), "'%s' has no gradient operator.", - op->type); - it = op_info_map().find(grad_op_type); + op->type_); + it = OpRegistry::op_info_map().find(grad_op_type); PADDLE_ENFORCE(it != OpRegistry::op_info_map().end(), "'%s' has not been registered.", grad_op_type); OperatorBase* grad_op = it->second.creator_(); diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index b88559f82b..69c5f549e3 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -175,17 +175,20 @@ Add a mark to which output is temporary is helpful for future optimization. bool has_temporary_output_{false}; }; -class NOPMaker : public OpProtoAndCheckerMaker {}; +class NOPMaker : public OpProtoAndCheckerMaker { + public: + NOPMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) {} +}; struct OpInfo { - std::function creator_; + std::function creator_; std::string grad_op_type_; OpProto* proto_; OpAttrChecker* checker_; }; class OpRegistry { - using OpCreator = std::function; using VarIndexMap = std::unordered_map; using VarNameList = std::vector; @@ -201,28 +204,28 @@ class OpRegistry { if (std::type_index(typeid(ProtoMakerType)) != std::type_index(typeid(NOPMaker))) { op_info.proto_ = new OpProto; - op_info.op_checker_ = new OpAttrChecker; - auto maker = ProtoMakerType(op_info.proto_, op_info.op_checker_); + op_info.checker_ = new OpAttrChecker; + auto maker = ProtoMakerType(op_info.proto_, op_info.checker_); maker.Validate(); *op_info.proto_->mutable_type() = op_type; PADDLE_ENFORCE( op_info.proto_->IsInitialized(), "Fail to initialize %s's OpProto, because %s is not initialized", op_type, op_info.proto_->InitializationErrorString()); - //======will be refactored in following PRs============// + // ======will be refactored in following PRs============ // VarIndexMaps()[op_type].reset(new VarIndexMap()); auto& varmap = *VarIndexMaps()[op_type]; int idx = 0; - for (auto& var : op_proto.inputs()) { + for (auto& var : op_info.proto_->inputs()) { varmap[var.name()] = idx++; } idx = 0; - for (auto& var : op_proto.outputs()) { + for (auto& var : op_info.proto_->outputs()) { varmap[var.name()] = idx++; } - //================================================// + // ================================================ // } - op_info_map.insert(std::make_pair(op_type, op_info)); + op_info_map().insert(std::make_pair(op_type, op_info)); } static std::shared_ptr CreateOp(const std::string& type, @@ -281,8 +284,8 @@ class OpRegistry { return grad_op; } - static std::unordered_map& op_info_map() { - static std::unordered_map op_info_map_; + static std::unordered_map& op_info_map() { + static std::unordered_map op_info_map_; return op_info_map_; } @@ -321,7 +324,7 @@ class Registrar { template class OpRegistrar : public Registrar { public: - OpRegistrar(const char* op_type) { OpRegistrar(op_type, ""); } + explicit OpRegistrar(const char* op_type) { OpRegistrar(op_type, ""); } OpRegistrar(const char* op_type, const char* grad_op_type) { OpRegistry::RegisterOp(op_type, grad_op_type); } diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index b1976a6514..3887cadc60 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -188,8 +188,9 @@ class CPUKernalMultiInputsTest : public OpKernel { } // namespace framework } // namespace paddle -REGISTER_OP(op_with_kernel, paddle::framework::OpWithKernelTest, - paddle::framework::OpKernelTestProtoAndCheckerMaker); +REGISTER_OP_WITHOUT_GRADIENT( + op_with_kernel, paddle::framework::OpWithKernelTest, + paddle::framework::OpKernelTestProtoAndCheckerMaker); REGISTER_OP_CPU_KERNEL(op_with_kernel, paddle::framework::CPUKernelTest); From 2ea2fbea1bfb6f73c87f7029953ba8007e8cf4fb Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 11 Aug 2017 17:30:49 -0700 Subject: [PATCH 08/92] Merge REGISTER_OP and REGISTER_GRADIENT_OP --- paddle/framework/backward_test.cc | 16 ++++++---------- paddle/framework/grad_op_builder_test.cc | 13 ++----------- paddle/framework/op_registry.h | 22 +++++++++++++--------- paddle/framework/operator.h | 7 +++++++ paddle/operators/add_op.cc | 3 +-- paddle/operators/cross_entropy_op.cc | 5 ++--- paddle/operators/mean_op.cc | 3 +-- paddle/operators/mul_op.cc | 4 +--- paddle/operators/sigmoid_op.cc | 5 ++--- paddle/operators/softmax_op.cc | 4 ++-- 10 files changed, 37 insertions(+), 45 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 38194b716d..4136e2c36a 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -150,20 +150,16 @@ class AddOpMaker : public OpProtoAndCheckerMaker { namespace f = paddle::framework; namespace ops = paddle::operators; using EnforceNotMet = paddle::platform::EnforceNotMet; -REGISTER_OP(rowwise_add, f::EmptyOp, f::RowWiseAddOpMaker, rowwise_add_grad); -REGISTER_GRADIENT_OP(rowwise_add_grad, f::EmptyOp); -REGISTER_OP(mul, f::EmptyOp, f::MulOpMaker, mul_grad); -REGISTER_GRADIENT_OP(mul_grad, f::EmptyOp); -REGISTER_OP(sigmoid, f::EmptyOp, f::SigmoidOpMaker, sigmoid_grad); -REGISTER_GRADIENT_OP(sigmoid_grad, f::EmptyOp); +REGISTER_OP(rowwise_add, f::EmptyOp, f::RowWiseAddOpMaker, rowwise_add_grad, + f::EmptyOp); +REGISTER_OP(mul, f::EmptyOp, f::MulOpMaker, mul_grad, f::EmptyOp); +REGISTER_OP(sigmoid, f::EmptyOp, f::SigmoidOpMaker, sigmoid_grad, f::EmptyOp); REGISTER_OP_WITHOUT_GRADIENT(nograd, f::EmptyOp, f::NoGradOpMaker); REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, f::EmptyOp, f::FillZeroOpMaker); -REGISTER_OP(add, f::EmptyOp, f::AddOpMaker, add_grad); -REGISTER_GRADIENT_OP(add_grad, f::EmptyOp); +REGISTER_OP(add, f::EmptyOp, f::AddOpMaker, add_grad, f::EmptyOp); REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker, - many_output_op_grad); -REGISTER_GRADIENT_OP(many_output_op_grad, f::EmptyOp); + many_output_op_grad, f::EmptyOp); TEST(Backward, simple_op_grad) { auto fwd = f::OpRegistry::CreateOp("rowwise_add", {"X", "b"}, {"Out"}, {}); diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index ad61b482e0..3d7f1a753d 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -8,13 +8,6 @@ USE_OP(add_two); namespace paddle { namespace framework { -class NOP : public OperatorBase { - public: - void InferShape(const Scope &scope) const override {} - void Run(const Scope &scope, - const platform::DeviceContext &dev_ctx) const override {} -}; - class MutiInOutOpMaker : public OpProtoAndCheckerMaker { public: MutiInOutOpMaker(OpProto *proto, OpAttrChecker *op_checker) @@ -61,10 +54,8 @@ TEST(GradOpBuilder, AddTwo) { EXPECT_EQ(grad_add_op->Output("Y@GRAD"), "y@GRAD"); } -REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad); -REGISTER_GRADIENT_OP(mult_io_grad, f::NOP); -REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker, io_ignored_grad); -REGISTER_GRADIENT_OP(io_ignored_grad, f::NOP); +REGISTER_OP(mult_io, f::NOP, f::MutiInOutOpMaker, mult_io_grad, f::NOP); +REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker, io_ignored_grad, f::NOP); TEST(GradOpBuilder, MutiInOut) { f::AttributeMap attrs{{"input_format", std::vector{0, 1, 4, 5}}, diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 69c5f549e3..080a7149bb 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -193,7 +193,7 @@ class OpRegistry { using VarNameList = std::vector; public: - template + template static void RegisterOp(const std::string& op_type, const std::string& grad_op_type) { PADDLE_ENFORCE(op_info_map().count(op_type) == 0, @@ -226,6 +226,10 @@ class OpRegistry { // ================================================ // } op_info_map().insert(std::make_pair(op_type, op_info)); + // register gradient op + if (!grad_op_type.empty()) { + RegisterOp(grad_op_type, ""); + } } static std::shared_ptr CreateOp(const std::string& type, @@ -321,12 +325,13 @@ class Registrar { void Touch() {} }; -template +template class OpRegistrar : public Registrar { public: explicit OpRegistrar(const char* op_type) { OpRegistrar(op_type, ""); } OpRegistrar(const char* op_type, const char* grad_op_type) { - OpRegistry::RegisterOp(op_type, grad_op_type); + OpRegistry::RegisterOp(op_type, + grad_op_type); } }; @@ -352,10 +357,12 @@ class OpKernelRegistrar : public Registrar { /** * Macro to register Operator. */ -#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type) \ +#define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, \ + grad_op_class) \ STATIC_ASSERT_GLOBAL_NAMESPACE( \ __reg_op__##op_type, "REGISTER_OP must be called in global namespace"); \ - static ::paddle::framework::OpRegistrar \ + static ::paddle::framework::OpRegistrar \ __op_registrar_##op_type##__(#op_type, #grad_op_type); \ int TouchOpRegistrar_##op_type() { \ __op_registrar_##op_type##__.Touch(); \ @@ -363,10 +370,7 @@ class OpKernelRegistrar : public Registrar { } #define REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) \ - REGISTER_OP(op_type, op_class, op_maker_class, ) - -#define REGISTER_GRADIENT_OP(op_type, op_class) \ - REGISTER_OP(op_type, op_class, ::paddle::framework::NOPMaker, ) + REGISTER_OP(op_type, op_class, op_maker_class, , ::paddle::framework::NOP) /** * Macro to register OperatorKernel. diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index f5d167a16e..13308e0dae 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -125,6 +125,13 @@ class OperatorBase { std::shared_ptr> in_out_idxs_; }; +class NOP : public OperatorBase { + public: + void InferShape(const Scope& scope) const override {} + void Run(const Scope& scope, + const platform::DeviceContext& dev_ctx) const override {} +}; + class InferShapeContext { public: InferShapeContext(const OperatorBase& op, const Scope& scope) diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc index e8e26cbe9b..447e7b3915 100644 --- a/paddle/operators/add_op.cc +++ b/paddle/operators/add_op.cc @@ -55,8 +55,7 @@ class AddOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker, add_two_grad); -REGISTER_GRADIENT_OP(add_two_grad, ops::AddOpGrad); +REGISTER_OP(add_two, ops::AddOp, ops::AddOpMaker, add_two_grad, ops::AddOpGrad); REGISTER_OP_CPU_KERNEL(add_two, ops::AddKernel); diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index 7d0e74e5e4..3dcaccd756 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -69,12 +69,11 @@ OnehotCrossEntropy Operator. namespace ops = paddle::operators; REGISTER_OP(onehot_cross_entropy, ops::OnehotCrossEntropyOp, - ops::OnehotCrossEntropyOpMaker, onehot_cross_entropy_grad); + ops::OnehotCrossEntropyOpMaker, onehot_cross_entropy_grad, + ops::OnehotCrossEntropyGradientOp); REGISTER_OP_CPU_KERNEL( onehot_cross_entropy, ops::OnehotCrossEntropyOpKernel); -REGISTER_GRADIENT_OP(onehot_cross_entropy_grad, - ops::OnehotCrossEntropyGradientOp); REGISTER_OP_CPU_KERNEL( onehot_cross_entropy_grad, ops::OnehotCrossEntropyGradientOpKernel); diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 15e0708c46..c41208014a 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -50,9 +50,8 @@ class MeanGradOp : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(mean, ops::MeanOp, ops::MeanOpMaker, mean_grad); +REGISTER_OP(mean, ops::MeanOp, ops::MeanOpMaker, mean_grad, ops::MeanGradOp); REGISTER_OP_CPU_KERNEL(mean, ops::MeanKernel); -REGISTER_GRADIENT_OP(mean_grad, ops::MeanGradOp); REGISTER_OP_CPU_KERNEL(mean_grad, ops::MeanGradKernel); diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index 60550a2742..0c4547f04d 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -65,7 +65,5 @@ class MulOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad); -REGISTER_GRADIENT_OP(mul_grad, ops::MulOpGrad); - +REGISTER_OP(mul, ops::MulOp, ops::MulOpMaker, mul_grad, ops::MulOpGrad); REGISTER_OP_CPU_KERNEL(mul, ops::MulKernel); diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc index fb27ffbfa1..4f3a880b40 100644 --- a/paddle/operators/sigmoid_op.cc +++ b/paddle/operators/sigmoid_op.cc @@ -48,9 +48,8 @@ class SigmoidOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker, sigmoid_grad); -REGISTER_GRADIENT_OP(sigmoid_grad, ops::SigmoidOpGrad); - +REGISTER_OP(sigmoid, ops::SigmoidOp, ops::SigmoidOpMaker, sigmoid_grad, + ops::SigmoidOpGrad); REGISTER_OP_CPU_KERNEL(sigmoid, ops::SigmoidKernel); REGISTER_OP_CPU_KERNEL( diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index abc21337c5..99bc5b77d1 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -64,9 +64,9 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; -REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker, softmax_grad); +REGISTER_OP(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker, softmax_grad, + ops::SoftmaxOpGrad); REGISTER_OP_CPU_KERNEL(softmax, ops::SoftmaxKernel); -REGISTER_GRADIENT_OP(softmax_grad, ops::SoftmaxOpGrad); REGISTER_OP_CPU_KERNEL( softmax_grad, ops::SoftmaxGradKernel); From e0ccc178bc3201d10c916eb2be2a6261c7472495 Mon Sep 17 00:00:00 2001 From: dong zhihong Date: Sat, 12 Aug 2017 15:50:41 +0800 Subject: [PATCH 09/92] simplify docker script --- Dockerfile | 17 +++++++++++------ python/requirements.txt | 9 +++++++++ 2 files changed, 20 insertions(+), 6 deletions(-) create mode 100644 python/requirements.txt diff --git a/Dockerfile b/Dockerfile index 41b6729124..3ef3aa14f4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -35,7 +35,7 @@ RUN apt-get update && \ apt-get clean -y # paddle is using numpy.flip, which is introduced since 1.12.0 -RUN pip --no-cache-dir install 'numpy>=1.12.0' +# RUN pip --no-cache-dir install 'numpy>=1.12.0' # Install Go and glide RUN wget -qO- https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \ @@ -58,13 +58,18 @@ RUN localedef -i en_US -f UTF-8 en_US.UTF-8 # FIXME: due to temporary ipykernel dependency issue, specify ipykernel jupyter # version util jupyter fixes this issue. RUN pip install --upgrade pip && \ - pip install -U 'protobuf==3.1.0' && \ - pip install -U wheel pillow BeautifulSoup && \ + pip install -U wheel && \ pip install -U docopt PyYAML sphinx && \ - pip install -U sphinx-rtd-theme==0.1.9 recommonmark && \ - pip install pre-commit 'requests==2.9.2' 'ipython==5.3.0' && \ + pip install -U sphinx-rtd-theme==0.1.9 recommonmark + +RUN pip install pre-commit 'ipython==5.3.0' && \ pip install 'ipykernel==4.6.0' 'jupyter==1.0.0' && \ - pip install opencv-python rarfile 'scipy>=0.19.0' 'nltk>=3.2.2' + pip install opencv-python + +# paddle is using numpy.flip, which is introduced since 1.12.0 +RUN pip --no-cache-dir install 'numpy>=1.12.0' +COPY ./python/requirements.txt /root/ +RUN pip install -r /root/requirements.txt # To fix https://github.com/PaddlePaddle/Paddle/issues/1954, we use # the solution in https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl-py2 diff --git a/python/requirements.txt b/python/requirements.txt new file mode 100644 index 0000000000..3df822bd76 --- /dev/null +++ b/python/requirements.txt @@ -0,0 +1,9 @@ +requests==2.9.2 +numpy>=1.12 +protobuf==3.1 +recordio +matplotlib +rarfile +scipy>=0.19.0 +Pillow +nltk>=3.2.2 From 6f045f9a04547f3627204c56c6bac108a0825507 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Sun, 13 Aug 2017 21:52:16 +0800 Subject: [PATCH 10/92] add mkl shared lib into whl --- paddle/scripts/submit_local.sh.in | 2 ++ python/CMakeLists.txt | 14 +++++++++++++- python/setup.py.in | 10 +++++++++- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in index 12bf629ea9..2ab7d5b52f 100755 --- a/paddle/scripts/submit_local.sh.in +++ b/paddle/scripts/submit_local.sh.in @@ -18,6 +18,8 @@ function version(){ echo "PaddlePaddle @PADDLE_VERSION@, compiled with" echo " with_avx: @WITH_AVX@" echo " with_gpu: @WITH_GPU@" + echo " with_mkldnn: @WITH_MKLDNN" + echo " with_mklml: @WITH_MKLML@" echo " with_double: @WITH_DOUBLE@" echo " with_python: @WITH_PYTHON@" echo " with_rdma: @WITH_RDMA@" diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 16c519d45a..d2f064bea0 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -21,6 +21,18 @@ if(WITH_GOLANG) add_dependencies(copy_paddle_master paddle_master) endif(WITH_GOLANG) +set(MKL_SHARED_LIBS "") +set(MKL_DEPENDS "") +if(WITH_MKLML) + list(APPEND MKL_SHARED_LIBS ${MKLML_LIB} ${MKLML_IOMP_LIB}) + list(APPEND MKL_DEPENDS mklml) +endif() + +if(WITH_MKLDNN) + list(APPEND MKL_SHARED_LIBS "${MKLDNN_LIB}" "${MKLDNN_LIB}.0") + list(APPEND MKL_DEPENDS mkldnn) +endif() + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in ${CMAKE_CURRENT_BINARY_DIR}/setup.py) @@ -39,7 +51,7 @@ add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp DEPENDS gen_proto_py copy_paddle_pybind framework_py_proto ${PY_FILES} ${external_project_dependencies} ${COPY_PADDLE_MASTER}) add_custom_target(paddle_python ALL DEPENDS - ${PADDLE_PYTHON_BUILD_DIR}/.timestamp paddle_pserver_main paddle_trainer paddle_merge_model python_api_wheel) + ${PADDLE_PYTHON_BUILD_DIR}/.timestamp paddle_pserver_main paddle_trainer paddle_merge_model python_api_wheel ${MKL_DEPENDS}) set(PADDLE_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/) diff --git a/python/setup.py.in b/python/setup.py.in index 38728aa2fd..4b3fd1a779 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -23,6 +23,13 @@ with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f: if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: setup_requires+=["opencv-python"] +mkl_shared_libs='${MKL_SHARED_LIBS}' + +mkl_libs = [] +if mkl_shared_libs != '': + mkl_libs += mkl_shared_libs.split(';') +print mkl_libs + setup(name='paddlepaddle', version='${PADDLE_VERSION}', description='Parallel Distributed Deep Learning', @@ -46,5 +53,6 @@ setup(name='paddlepaddle', ['${PADDLE_BINARY_DIR}/paddle/scripts/paddle_usage', '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer', '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_merge_model', - '${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main'])] + '${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main']), + ('/usr/local/opt/paddle/lib', mkl_libs)] ) From 19dfe1f38bcb100cc8e3e1b50ef891b32d614223 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Sun, 13 Aug 2017 13:57:49 -0700 Subject: [PATCH 11/92] fix compile errors --- paddle/framework/operator.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 058fe3282f..c8c49b5455 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -55,6 +55,16 @@ class OperatorBase; class InferShapeContext; class ExecutionContext; +#define DEFINE_OPERATOR_CTOR(Class, ParentClass) \ + public: \ + Class() { /* TODO(yi): This constructor is to be removed. */ \ + } \ + Class(const std::string& type, const std::vector& inputs, \ + const std::vector& outputs, \ + const ::paddle::framework::AttributeMap& attrs, \ + std::unordered_map* in_out_idxs) \ + : ParentClass(type, inputs, outputs, attrs, in_out_idxs) {} + /** * OperatorBase has the basic element that Net will call to do computation. * Only CreateOperator from OpRegistry will new Operator directly. User @@ -366,15 +376,5 @@ class OperatorWithKernel : public OperatorBase { virtual void InferShape(const InferShapeContext& ctx) const = 0; }; -#define DEFINE_OPERATOR_CTOR(Class, ParentClass) \ - public: \ - Class() { /* TODO(yi): This constructor is to be removed. */ \ - } \ - Class(const std::string& type, const std::vector& inputs, \ - const std::vector& outputs, \ - const ::paddle::framework::AttributeMap& attrs, \ - std::unordered_map* in_out_idxs) \ - : ParentClass(type, inputs, outputs, attrs, in_out_idxs) {} - } // namespace framework } // namespace paddle From fb6bec6a8f5d8cb57773e2ca1e438476fa695892 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Sun, 13 Aug 2017 15:19:40 -0700 Subject: [PATCH 12/92] Fix a bug --- paddle/framework/op_registry.h | 3 +++ paddle/framework/pybind.cc | 3 +++ 2 files changed, 6 insertions(+) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index a561b5f48e..23f641cba2 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -222,6 +222,9 @@ class OpRegistry { varmap[var.name()] = idx++; } // ================================================ // + } else { + op_info.proto_ = nullptr; + op_info.checker_ = nullptr; } op_info_map().insert(std::make_pair(op_type, op_info)); // register gradient op diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index 3343a51c8d..56a89d87fd 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -176,6 +176,9 @@ All parameter, weight, gradient are variables in Paddle. std::vector ret_values; for (auto it = op_info_map.begin(); it != op_info_map.end(); ++it) { const OpProto *proto = it->second.proto_; + if (proto == nullptr) { + continue; + } PADDLE_ENFORCE(proto->IsInitialized(), "OpProto must all be initialized"); std::string str; PADDLE_ENFORCE(proto->SerializeToString(&str), From 0405e88440aac1788b1bb9eef3303fe301842f9c Mon Sep 17 00:00:00 2001 From: dong zhihong Date: Mon, 14 Aug 2017 14:43:14 +0800 Subject: [PATCH 13/92] fix numpy docker --- Dockerfile | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 3ef3aa14f4..885bec9ba0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -34,9 +34,6 @@ RUN apt-get update && \ net-tools && \ apt-get clean -y -# paddle is using numpy.flip, which is introduced since 1.12.0 -# RUN pip --no-cache-dir install 'numpy>=1.12.0' - # Install Go and glide RUN wget -qO- https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | \ tar -xz -C /usr/local && \ @@ -67,7 +64,7 @@ RUN pip install pre-commit 'ipython==5.3.0' && \ pip install opencv-python # paddle is using numpy.flip, which is introduced since 1.12.0 -RUN pip --no-cache-dir install 'numpy>=1.12.0' +# RUN pip --no-cache-dir install 'numpy>=1.12.0' COPY ./python/requirements.txt /root/ RUN pip install -r /root/requirements.txt From 07fdaf79b6eac37c4aa76081229490c1a0242a7e Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 14 Aug 2017 15:12:16 +0800 Subject: [PATCH 14/92] 1. use local lib as runtime path of paddle for mkl shared libs. 2. fix path of bin files --- python/setup.py.in | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/python/setup.py.in b/python/setup.py.in index 4b3fd1a779..36438d3573 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -23,12 +23,20 @@ with open('@PADDLE_SOURCE_DIR@/python/requirements.txt') as f: if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: setup_requires+=["opencv-python"] -mkl_shared_libs='${MKL_SHARED_LIBS}' +# the prefix is sys.prefix which should always be usr +paddle_bin_dir = 'local/opt/paddle/bin' +paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/scripts/paddle_usage', + '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer', + '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_merge_model', + '${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main'] + +paddle_rt_lib_dir = 'local/lib' +paddle_rt_libs = [] -mkl_libs = [] +mkl_shared_libs='${MKL_SHARED_LIBS}' if mkl_shared_libs != '': - mkl_libs += mkl_shared_libs.split(';') -print mkl_libs + paddle_rt_libs += mkl_shared_libs.split(';') +print paddle_rt_libs setup(name='paddlepaddle', version='${PADDLE_VERSION}', @@ -49,10 +57,6 @@ setup(name='paddlepaddle', }, scripts=['${PADDLE_BINARY_DIR}/paddle/scripts/paddle'], distclass=BinaryDistribution, - data_files=[('/usr/local/opt/paddle/bin', - ['${PADDLE_BINARY_DIR}/paddle/scripts/paddle_usage', - '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer', - '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_merge_model', - '${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main']), - ('/usr/local/opt/paddle/lib', mkl_libs)] + data_files=[(paddle_bin_dir, paddle_bins), + (paddle_rt_lib_dir, paddle_rt_libs)] ) From d9ea9047b4432d5367000633032ac7e58bf164c8 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 14 Aug 2017 15:19:21 +0800 Subject: [PATCH 15/92] Revert "add for test, revert me when function done" This reverts commit fb61512c5c9c72b8678757520eb9d283e9e24845. --- paddle/operators/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 13bdf321e5..c181bd7b88 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -50,7 +50,7 @@ op_library(add_op SRCS add_op.cc add_op.cu) op_library(mean_op SRCS mean_op.cc mean_op.cu) -op_library(mul_op SRCS mul_op.cc mul_op.cu DEPS cblas) +op_library(mul_op SRCS mul_op.cc mul_op.cu) op_library(rowwise_add_op SRCS rowwise_add_op.cu rowwise_add_op.cc) op_library(sigmoid_op SRCS sigmoid_op.cc sigmoid_op.cu) From c7372256f2727461252f41124cf55ab02bd96e84 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 14 Aug 2017 15:34:15 +0800 Subject: [PATCH 16/92] open MKLDNN and MKLML as default --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c75b83e50c..dcd1218a5b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -36,8 +36,8 @@ include(simd) ################################ Configurations ####################################### option(WITH_GPU "Compile PaddlePaddle with NVIDIA GPU" ${CUDA_FOUND}) option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND}) -option(WITH_MKLDNN "Compile PaddlePaddle with mkl-dnn support." OFF) -option(WITH_MKLML "Compile PaddlePaddle with mklml package." OFF) +option(WITH_MKLDNN "Compile PaddlePaddle with mkl-dnn support." ${AVX_FOUND}) +option(WITH_MKLML "Compile PaddlePaddle with mklml package." ${AVX_FOUND}) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) option(WITH_TESTING "Compile PaddlePaddle with unit testing" ON) option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON) From b2e3824e4149e592635e1938188415b663446a8d Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 14 Aug 2017 15:34:38 +0800 Subject: [PATCH 17/92] change operator --- paddle/framework/op_registry.h | 25 ++++++++++++------------- paddle/framework/operator.h | 6 ++++-- paddle/operators/net_op.cc | 4 ++-- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index e93ee14425..55cf7fbe31 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -120,8 +120,10 @@ class OpProtoAndCheckerMaker { }; class OpRegistry { - using OpCreator = std::function; using VarNameMap = OperatorBase::VarNameMap; + using OpCreator = std::function; public: template @@ -153,14 +155,9 @@ class OpRegistry { PADDLE_ENFORCE(op_create_it != op_creators().end(), "Operator %s cannot be found.", type); - auto op = op_create_it->second(); - op->type_ = type; - op->inputs_ = inputs; - op->outputs_ = outputs; - - op->attrs_ = attrs; - op_checkers().at(type).Check(op->attrs_); - + auto attrMap = attrs; + op_checkers().at(type).Check(attrMap); + auto op = op_create_it->second(type, inputs, outputs, attrMap); GenerateTempVariableName(op); op->Init(); @@ -217,12 +214,14 @@ class OpRegistry { static void GenerateTempVariableName(OperatorBase* op) { static std::atomic gUniqId(0UL); - for (auto& output : op->outputs_) { + for (auto& output : op->Outputs()) { for (auto& output_name : output.second) { if (output_name == kTempVarName) { - output_name += op->type_; - output_name += "@"; - output_name += std::to_string(gUniqId.fetch_add(1)); + auto new_name = output_name; + new_name += op->Type(); + new_name += "@"; + new_name += std::to_string(gUniqId.fetch_add(1)); + op->Rename(output_name, new_name); } } } diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index e145649d30..038e6fe7a2 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -105,6 +105,8 @@ class OperatorBase { /// rename inputs outputs name void Rename(const std::string& old_name, const std::string& new_name); + const VarNameMap& Inputs() const { return inputs_; } + const VarNameMap& Outputs() const { return outputs_; } //! Get a input with argument's name described in `op_proto` const std::string& Input(const std::string& name) const; //! Get a input which has multiple variables. @@ -118,10 +120,10 @@ class OperatorBase { virtual std::vector OutputVars(bool has_intermediate) const; - std::string Type() const { return type_; } + const std::string& Type() const { return type_; } const AttributeMap& Attrs() const { return attrs_; } - public: + protected: std::string type_; // NOTE: in case of OpGrad, inputs_ contains: // I (Inputs) diff --git a/paddle/operators/net_op.cc b/paddle/operators/net_op.cc index 6a118087a7..61e1377af8 100644 --- a/paddle/operators/net_op.cc +++ b/paddle/operators/net_op.cc @@ -29,7 +29,7 @@ void NetOp::CompleteAddOp(bool calc) { std::set input_set; std::set output_set; for (auto& op : ops_) { - for (auto& ipt : op->inputs_) { + for (auto& ipt : op->Inputs()) { for (auto& var_name : ipt.second) { if (!Contains(output_set, var_name)) { // Not other op's output input_set.insert(var_name); @@ -39,7 +39,7 @@ void NetOp::CompleteAddOp(bool calc) { } } - for (auto& opt : op->outputs_) { + for (auto& opt : op->Outputs()) { for (auto& var_name : opt.second) { output_set.insert(var_name); } From b4755c5aa7ede517bf9bc559e9247c050c6711f2 Mon Sep 17 00:00:00 2001 From: liaogang Date: Mon, 14 Aug 2017 15:50:28 +0800 Subject: [PATCH 18/92] Demangle exception call stack for PADDLE_ENFORCE --- paddle/platform/enforce.h | 47 ++++++++++++++++++++++++++++++++------- 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index 337a059fb1..aa0660df88 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -14,14 +14,20 @@ limitations under the License. */ #pragma once -#include +#include // for dladdr +#include // for backtrace #include #include #include #include + #include "paddle/string/printf.h" #include "paddle/string/to_string.h" +#ifdef __GNUC__ +#include // for __cxa_demangle +#endif + #ifndef PADDLE_ONLY_CPU #include "paddle/platform/dynload/cublas.h" @@ -39,6 +45,19 @@ limitations under the License. */ namespace paddle { namespace platform { +namespace { +#ifdef __GNUC__ +inline std::string demangle(std::string name) { + int status = -4; // some arbitrary value to eliminate the compiler warning + std::unique_ptr res{ + abi::__cxa_demangle(name.c_str(), NULL, NULL, &status), std::free}; + return (status == 0) ? res.get() : name; +} +#else +inline std::string demangle(std::string name) { return name; } +#endif +} + struct EnforceNotMet : public std::exception { std::exception_ptr exp_; std::string err_str_; @@ -48,15 +67,27 @@ struct EnforceNotMet : public std::exception { std::rethrow_exception(exp_); } catch (const std::exception& exp) { std::ostringstream sout; + sout << string::Sprintf("%s at [%s:%d]", exp.what(), f, l) << std::endl; - sout << "Call Stacks: " << std::endl; + sout << "PaddlePaddle Call Stacks: " << std::endl; + void* call_stack[TRACE_STACK_LIMIT]; - int sz = backtrace(call_stack, TRACE_STACK_LIMIT); - auto line = backtrace_symbols(call_stack, sz); - for (int i = 0; i < sz; ++i) { - sout << line[i] << std::endl; + auto size = backtrace(call_stack, TRACE_STACK_LIMIT); + auto symbols = backtrace_symbols(call_stack, size); + + Dl_info info; + for (int i = 0; i < size; ++i) { + if (dladdr(call_stack[i], &info)) { + auto demangled = demangle(info.dli_sname); + sout << string::Sprintf( + "%-3d %*0p %s + %zd\n", i, 2 + sizeof(void*) * 2, call_stack[i], + demangled, (char*)call_stack[i] - (char*)info.dli_saddr); + } else { + sout << string::Sprintf("%-3d %*0p %s\n", i, 2 + sizeof(void*) * 2, + call_stack[i]); + } } - free(line); + free(symbols); err_str_ = sout.str(); } } @@ -170,7 +201,7 @@ inline void throw_on_error(T e) { * PADDLE_ENFORCE_EQ(a, b); * * will raise an expression described as follows: - * "enforce a == b failed, 1 != 2" with detailed stack infomation. + * "enforce a == b failed, 1 != 2" with detailed stack information. * * extra messages is also supported, for example: * PADDLE_ENFORCE(a, b, "some simple enforce failed between %d numbers", 2) From 8210bcea6116be008aa027c6e868acbf5ebfeda6 Mon Sep 17 00:00:00 2001 From: liaogang Date: Mon, 14 Aug 2017 15:59:02 +0800 Subject: [PATCH 19/92] Using static_cast to replace (char*) --- paddle/platform/enforce.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index aa0660df88..cc38dc4ffe 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -79,9 +79,11 @@ struct EnforceNotMet : public std::exception { for (int i = 0; i < size; ++i) { if (dladdr(call_stack[i], &info)) { auto demangled = demangle(info.dli_sname); - sout << string::Sprintf( - "%-3d %*0p %s + %zd\n", i, 2 + sizeof(void*) * 2, call_stack[i], - demangled, (char*)call_stack[i] - (char*)info.dli_saddr); + auto addr_offset = static_cast(call_stack[i]) - + static_cast(info.dli_saddr); + sout << string::Sprintf("%-3d %*0p %s + %zd\n", i, + 2 + sizeof(void*) * 2, call_stack[i], + demangled, addr_offset); } else { sout << string::Sprintf("%-3d %*0p %s\n", i, 2 + sizeof(void*) * 2, call_stack[i]); From ffbb4c19634778d5380954dd55f4f56ec17b3859 Mon Sep 17 00:00:00 2001 From: liaogang Date: Mon, 14 Aug 2017 16:27:25 +0800 Subject: [PATCH 20/92] memory.h for unique_ptr --- paddle/platform/enforce.h | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index cc38dc4ffe..15fdf7a94f 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -17,6 +17,7 @@ limitations under the License. */ #include // for dladdr #include // for backtrace #include +#include #include #include #include From 186fb0c1185b6b1b94a7eeac54fa1cbd001debfd Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 14 Aug 2017 16:31:54 +0800 Subject: [PATCH 21/92] Remove input_format in backward.cc --- paddle/framework/backward.cc | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 315bdde76d..855e2cae20 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -127,11 +127,8 @@ std::shared_ptr BackwardRecursive( net->ops_[op_offset]->Rename(name, dup_outputs.back()); } insert_position.push_back( - {dup_op.back(), - OpRegistry::CreateOp( - "add", {{"X", {dup_outputs}}}, {{"Out", {name}}}, - {{"input_format", - std::vector{0, static_cast(dup_outputs.size())}}})}); + {dup_op.back(), OpRegistry::CreateOp("add", {{"X", {dup_outputs}}}, + {{"Out", {name}}}, {})}); } insert_position.sort( @@ -140,7 +137,6 @@ std::shared_ptr BackwardRecursive( for (auto& pos : insert_position) { net->InsertOp(pos.first + 1, pos.second); } - } else { std::shared_ptr grad_op = OpRegistry::CreateGradOp(forwardOp); @@ -176,7 +172,7 @@ std::shared_ptr BackwardRecursive( net->type_ = "@GENERATED_BACKWARD@"; net->CompleteAddOp(); return net; -} +} // namespace framework // See header for comments std::shared_ptr Backward( From 5d33ef61388aa022d58176f06c86285e8a06322c Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 14 Aug 2017 17:08:46 +0800 Subject: [PATCH 22/92] change op_register and grad_op_builder --- paddle/framework/grad_op_builder.cc | 38 +++++++++++++++++------------ paddle/framework/op_registry.h | 12 +++++++-- 2 files changed, 32 insertions(+), 18 deletions(-) diff --git a/paddle/framework/grad_op_builder.cc b/paddle/framework/grad_op_builder.cc index 7319fcc88c..048864c700 100644 --- a/paddle/framework/grad_op_builder.cc +++ b/paddle/framework/grad_op_builder.cc @@ -13,22 +13,22 @@ express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/grad_op_builder.h" -#include "paddle/framework/framework.pb.h" #include "paddle/framework/op_registry.h" namespace paddle { namespace framework { enum class OpArgType { IN, OUT }; -static void TransOpArg(const OperatorBase* src_op, OperatorBase* dst_op, - const OpArgType& src_type, const OpArgType& dst_type, - bool is_grad) { +using VarNameMap = OperatorBase::VarNameMap; + +static VarNameMap TransOpArg(const OperatorBase* src_op, + const OpArgType& src_type, + const OpArgType& dst_type, bool is_grad) { const auto& src_inout = - src_type == OpArgType::IN ? src_op->inputs_ : src_op->outputs_; - auto& dst_inout = - dst_type == OpArgType::IN ? dst_op->inputs_ : dst_op->outputs_; + src_type == OpArgType::IN ? src_op->Inputs() : src_op->Outputs(); + VarNameMap dst_inout; - const OpProto& proto = OpProtos().at(src_op->type_); + const OpProto& proto = OpProtos().at(src_op->Type()); const auto& src_arg_list = src_type == OpArgType::IN ? proto.inputs() : proto.outputs(); for (const auto& arg : src_arg_list) { @@ -41,17 +41,23 @@ static void TransOpArg(const OperatorBase* src_op, OperatorBase* dst_op, dst_inout[dst_name].emplace_back(s); } } + return dst_inout; } OperatorBase* BuildGradOp(const OperatorBase* op) { - std::string grad_op_type = OpRegistry::grad_ops().at(op->type_); - OperatorBase* grad_op = OpRegistry::op_creators().at(grad_op_type)(); - grad_op->type_ = grad_op_type; - grad_op->attrs_ = op->attrs_; - TransOpArg(op, grad_op, OpArgType::IN, OpArgType::IN, false); // I - TransOpArg(op, grad_op, OpArgType::OUT, OpArgType::IN, false); // O - TransOpArg(op, grad_op, OpArgType::OUT, OpArgType::IN, true); // OG - TransOpArg(op, grad_op, OpArgType::IN, OpArgType::OUT, true); // IG + std::string grad_op_type = OpRegistry::grad_ops().at(op->Type()); + auto I = TransOpArg(op, OpArgType::IN, OpArgType::IN, false); // I + auto O = TransOpArg(op, OpArgType::OUT, OpArgType::IN, false); // O + auto OG = TransOpArg(op, OpArgType::OUT, OpArgType::IN, true); // OG + auto IG = TransOpArg(op, OpArgType::IN, OpArgType::OUT, true); // IG + // TODO(merge I/O/OG) + VarNameMap GradIn; + GradIn.insert(I.begin(), I.end()); + GradIn.insert(O.begin(), O.end()); + GradIn.insert(OG.begin(), OG.end()); + + OperatorBase* grad_op = OpRegistry::op_creators().at(grad_op_type)( + grad_op_type, GradIn, IG, op->Attrs()); return grad_op; } diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 55cf7fbe31..ffd48160b8 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -128,7 +128,11 @@ class OpRegistry { public: template static void RegisterOp(const std::string& op_type) { - op_creators()[op_type] = [] { return new OpType; }; + op_creators()[op_type] = []( + const std::string& type, const VarNameMap& inputs, + const VarNameMap& outputs, const AttributeMap& attrs) { + return new OpType(type, inputs, outputs, attrs); + }; OpAttrChecker& op_checker = op_checkers()[op_type]; OpProto& op_proto = OpProtos()[op_type]; auto maker = ProtoMakerType(&op_proto, &op_checker); @@ -143,7 +147,11 @@ class OpRegistry { template static void RegisterGradOp(const std::string& op_type, const std::string& grad_op_type) { - op_creators()[grad_op_type] = [] { return new GradOpType; }; + op_creators()[grad_op_type] = []( + const std::string& type, const VarNameMap& inputs, + const VarNameMap& outputs, const AttributeMap& attrs) { + return new GradOpType(type, inputs, outputs, attrs); + }; grad_ops()[op_type] = grad_op_type; } From 32a60971f05da4e65b913752608fd0ec68d028a0 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Mon, 14 Aug 2017 17:45:26 +0800 Subject: [PATCH 23/92] Fix pnpair_evaluator. --- .../trainer_config_helpers/evaluators.py | 20 ++++++++----------- 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/python/paddle/trainer_config_helpers/evaluators.py b/python/paddle/trainer_config_helpers/evaluators.py index 567521ee9d..e272f76a81 100644 --- a/python/paddle/trainer_config_helpers/evaluators.py +++ b/python/paddle/trainer_config_helpers/evaluators.py @@ -230,9 +230,8 @@ def auc_evaluator( def pnpair_evaluator( input, label, - info, - name=None, - weight=None, ): + weight, + name=None, ): """ Positive-negative pair rate Evaluator which adapts to rank task like learning to rank. This evaluator must contain at least three layers. @@ -241,27 +240,24 @@ def pnpair_evaluator( .. code-block:: python - eval = pnpair_evaluator(input, info, label) + eval = pnpair_evaluator(input, label, weight) - :param name: Evaluator name. - :type name: None|basestring :param input: Input Layer name. The output prediction of network. :type input: LayerOutput :param label: Label layer name. :type label: LayerOutput - :param info: Label layer name. (TODO, explaination) - :type info: LayerOutput :param weight: Weight Layer name. It should be a matrix with size [sample_num, 1]. (TODO, explaination) :type weight: LayerOutput + :param name: Evaluator name. + :type name: None|basestring """ evaluator_base( - name=name, - type="pnpair", input=input, + type="pnpair", label=label, - info=info, - weight=weight) + weight=weight, + name=name, ) @evaluator(EvaluatorAttribute.FOR_CLASSIFICATION) From d2c2f7855185ec7b683cba02d0e9ce9e42db1257 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 14 Aug 2017 17:47:16 +0800 Subject: [PATCH 24/92] change backward --- paddle/framework/backward.cc | 26 ++++++++++---------- paddle/framework/backward_test.cc | 40 +++++++++++++++---------------- paddle/framework/operator.h | 1 + 3 files changed, 34 insertions(+), 33 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 315bdde76d..a82dc4ef4b 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -22,7 +22,7 @@ namespace paddle { namespace framework { template -static void ForEachVarName(Map& names, T callback) { +static void ForEachVarName(const Map& names, T callback) { for (auto& name : names) { for (auto& n : name.second) { if (callback(n)) return; @@ -43,7 +43,7 @@ static bool AllInSet( static std::shared_ptr NOP() { auto net_op = std::make_shared(); - net_op->type_ = "@NOP@"; + net_op->SetType("@NOP@"); net_op->CompleteAddOp(); return net_op; } @@ -69,15 +69,15 @@ std::shared_ptr BackwardRecursive( // If all input gradients of forwarding operator do not need to calculate, // just return an NOP. Not return null ptr because NOP does not take // too much time for calculation, but it is useful for simplifying logic. - if (AllInSet(forwardOp.inputs_, kGradVarSuffix, no_grad_names)) { + if (AllInSet(forwardOp.Inputs(), kGradVarSuffix, no_grad_names)) { return NOP(); } // All output gradients of forwarding operator do not need to calculate. // Then all input gradients cannot be computed at all, and we put them into // `no_grad_names` set. Return an NOP. - if (AllInSet(forwardOp.outputs_, kGradVarSuffix, no_grad_names)) { - ForEachVarName(forwardOp.inputs_, + if (AllInSet(forwardOp.Outputs(), kGradVarSuffix, no_grad_names)) { + ForEachVarName(forwardOp.Inputs(), [&no_grad_names](const std::string& name) -> bool { no_grad_names.insert(GradVarName(name)); return false; @@ -103,7 +103,7 @@ std::shared_ptr BackwardRecursive( auto fwd = *it; auto bwd = BackwardRecursive(*fwd, no_grad_names, uniq_id); net->AddOp(bwd); - ForEachVarName(bwd->outputs_, + ForEachVarName(bwd->Outputs(), [&dup_output_ops, local_op_id](const std::string& out) { dup_output_ops[out].emplace_back(local_op_id); return false; @@ -144,13 +144,13 @@ std::shared_ptr BackwardRecursive( } else { std::shared_ptr grad_op = OpRegistry::CreateGradOp(forwardOp); - ForEachVarName(grad_op->inputs_, [&no_grad_names, - &net](std::string& grad_input) { + ForEachVarName(grad_op->Inputs(), [&no_grad_names, &net, + grad_op](const std::string& grad_input) { if (no_grad_names.count(grad_input)) { // +1 for \0 std::string prefix = grad_input.substr( 0, grad_input.size() - sizeof(kGradVarSuffix) / sizeof(char) + 1); - grad_input = prefix + kZeroVarSuffix; + grad_op->Rename(grad_input, prefix + kZeroVarSuffix); // If part of input gradient of that operator is not calculated, fill // zero variables to that input gradient. @@ -160,10 +160,10 @@ std::shared_ptr BackwardRecursive( return false; }); - ForEachVarName(grad_op->outputs_, - [&no_grad_names](std::string& grad_output) { + ForEachVarName(grad_op->Outputs(), + [&no_grad_names, &grad_op](const std::string& grad_output) { if (no_grad_names.count(grad_output)) { - grad_output = kEmptyVarName; + grad_op->Rename(grad_output, kEmptyVarName); } return false; }); @@ -173,7 +173,7 @@ std::shared_ptr BackwardRecursive( } net->AddOp(grad_op); } - net->type_ = "@GENERATED_BACKWARD@"; + net->SetType("@GENERATED_BACKWARD@"); net->CompleteAddOp(); return net; } diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index e1e5379009..5874ef2f1f 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -173,8 +173,8 @@ TEST(Backward, simple_op_grad) { "rowwise_add", {{"X", {"x"}}, {"b", {"b"}}}, {{"Out", {"out"}}}, {}); ASSERT_NE(fwd, nullptr); auto gop = f::OpRegistry::CreateGradOp(*fwd); - ASSERT_EQ(1UL, gop->inputs_.size()); - ASSERT_EQ("rowwise_add_grad", gop->type_); + ASSERT_EQ(1UL, gop->Inputs().size()); + ASSERT_EQ("rowwise_add_grad", gop->Type()); ASSERT_EQ(f::GradVarName("x"), gop->Output(f::GradVarName("X"))); ASSERT_EQ(f::GradVarName("b"), gop->Output(f::GradVarName("b"))); } @@ -210,13 +210,13 @@ TEST(Backward, net_fc_backward_normal) { ASSERT_EQ(3UL, net->ops_.size()); f::OperatorBase &d_sigmoid = *net->ops_[0]; - ASSERT_EQ("sigmoid_grad", d_sigmoid.type_); + ASSERT_EQ("sigmoid_grad", d_sigmoid.Type()); f::OperatorBase &d_add = *net->ops_[1]; - ASSERT_EQ("rowwise_add_grad", d_add.type_); + ASSERT_EQ("rowwise_add_grad", d_add.Type()); f::OperatorBase &d_mul = *net->ops_[2]; - ASSERT_EQ("mul_grad", d_mul.type_); + ASSERT_EQ("mul_grad", d_mul.Type()); } TEST(Backward, net_fc_backward_not_have_b) { @@ -236,10 +236,10 @@ TEST(Backward, net_fc_backward_not_have_b) { ASSERT_EQ(2UL, net->ops_.size()); f::OperatorBase &d_sigmoid = *net->ops_[0]; - ASSERT_EQ("sigmoid_grad", d_sigmoid.type_); + ASSERT_EQ("sigmoid_grad", d_sigmoid.Type()); f::OperatorBase &d_mul = *net->ops_[1]; - ASSERT_EQ("mul_grad", d_mul.type_); + ASSERT_EQ("mul_grad", d_mul.Type()); } TEST(Backward, net_input_of_network_not_need_grad) { @@ -293,7 +293,7 @@ TEST(Backward, net_shared_weight) { ASSERT_TRUE(bwd->IsNetOp()); auto bwd_net = static_cast(bwd.get()); ASSERT_EQ(3UL, bwd_net->ops_.size()); - ASSERT_EQ("add", bwd_net->ops_[2]->type_); + ASSERT_EQ("add", bwd_net->ops_[2]->Type()); } TEST(Backward, op_register_grad_not_for_network) { @@ -334,15 +334,15 @@ TEST(Backward, op_part_of_output_are_not_need) { ASSERT_EQ(net->ops_.size(), 2UL); auto &fill_zero = *net->ops_[0]; - ASSERT_EQ("fill_zeros_like", fill_zero.type_); + ASSERT_EQ("fill_zeros_like", fill_zero.Type()); ASSERT_EQ(1UL, fill_zero.Inputs("Src").size()); ASSERT_EQ("Z", fill_zero.Input("Src")); ASSERT_EQ(1UL, fill_zero.Outputs("Dst").size()); ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.Output("Dst")); auto &d_many_out = *net->ops_[1]; - ASSERT_EQ("many_output_op_grad", d_many_out.type_); - ASSERT_EQ(1UL + 2UL + 2UL, d_many_out.inputs_.size()); // I/O/OG + ASSERT_EQ("many_output_op_grad", d_many_out.Type()); + ASSERT_EQ(1UL + 2UL + 2UL, d_many_out.Inputs().size()); // I/O/OG ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, d_many_out.Input(f::GradVarName("z"))); ASSERT_EQ(f::GradVarName("Y"), d_many_out.Input(f::GradVarName("y"))); @@ -354,9 +354,9 @@ TEST(Backward, op_part_of_input_are_not_need) { {{"Out", {"out"}}}, {}); auto backward = f::Backward(*fwd, {"a"}); auto &grad_mul = *backward; - ASSERT_EQ(grad_mul.type_, "mul_grad"); - ASSERT_EQ(grad_mul.inputs_.size(), 2UL + 1UL + 1UL); - ASSERT_EQ(grad_mul.outputs_.size(), 2UL); + ASSERT_EQ(grad_mul.Type(), "mul_grad"); + ASSERT_EQ(grad_mul.Inputs().size(), 2UL + 1UL + 1UL); + ASSERT_EQ(grad_mul.Outputs().size(), 2UL); ASSERT_EQ(grad_mul.Output(f::GradVarName("X")), f::kEmptyVarName); ASSERT_EQ(grad_mul.Output(f::GradVarName("Y")), f::GradVarName("b")); ASSERT_EQ(grad_mul.Input(f::GradVarName("Out")), f::GradVarName("out")); @@ -394,18 +394,18 @@ TEST(Backward, linear_net_intermediate_variable_has_no_grad) { auto &grad_fc = *bwd_net->ops_[0]; const char *all = paddle::operators::NetOp::kAll; - EXPECT_EQ(grad_fc.inputs_[all].size(), + EXPECT_EQ(grad_fc.Inputs(all).size(), 2UL /* external input number */ + 1UL /* external output number*/ + 1UL /* number of gradient of external output*/ + 2U /* internal variable number*/); - EXPECT_EQ(grad_fc.outputs_[all].size(), + EXPECT_EQ(grad_fc.Outputs(all).size(), 2UL /* input number of mul*/ + 2UL /* input number of rowwise_add */ + 1UL /* input number of sigmod */); - EXPECT_EQ(bwd_net->ops_[1]->inputs_[all].size(), 0UL); - EXPECT_EQ(bwd_net->ops_[1]->outputs_[all].size(), 0UL); - EXPECT_EQ(bwd_net->ops_[2]->inputs_[all].size(), 0UL); - EXPECT_EQ(bwd_net->ops_[2]->outputs_[all].size(), 0UL); + EXPECT_EQ(bwd_net->ops_[1]->Inputs(all).size(), 0UL); + EXPECT_EQ(bwd_net->ops_[1]->Outputs(all).size(), 0UL); + EXPECT_EQ(bwd_net->ops_[2]->Inputs(all).size(), 0UL); + EXPECT_EQ(bwd_net->ops_[2]->Outputs(all).size(), 0UL); } diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 038e6fe7a2..acff4f0ca0 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -121,6 +121,7 @@ class OperatorBase { virtual std::vector OutputVars(bool has_intermediate) const; const std::string& Type() const { return type_; } + void SetType(const std::string& type) { type_ = type; } const AttributeMap& Attrs() const { return attrs_; } protected: From 957aa691b49037bcf245f848706e85ac2649c56a Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Mon, 14 Aug 2017 18:47:38 +0800 Subject: [PATCH 25/92] Fix pnpair_evaluator. 1. Append info into input after label and before weight. --- .../trainer_config_helpers/evaluators.py | 28 ++++++++++++------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/python/paddle/trainer_config_helpers/evaluators.py b/python/paddle/trainer_config_helpers/evaluators.py index 72cfbecf6d..57979db4de 100644 --- a/python/paddle/trainer_config_helpers/evaluators.py +++ b/python/paddle/trainer_config_helpers/evaluators.py @@ -297,7 +297,8 @@ def auc_evaluator( def pnpair_evaluator( input, label, - weight, + info, + weight=None, name=None, ): """ Positive-negative pair rate Evaluator which adapts to rank task like @@ -307,22 +308,29 @@ def pnpair_evaluator( .. code-block:: python - eval = pnpair_evaluator(input, label, weight) + eval = pnpair_evaluator(input, label, info) :param input: Input Layer name. The output prediction of network. :type input: LayerOutput :param label: Label layer name. :type label: LayerOutput + :param info: Info layer name. (TODO, explaination) + :type info: LayerOutput :param weight: Weight Layer name. It should be a matrix with size [sample_num, 1]. (TODO, explaination) :type weight: LayerOutput :param name: Evaluator name. :type name: None|basestring """ + if not isinstance(input, list): + input = [input] + if label: + input.append(label) + if info: + input.append(info) evaluator_base( input=input, type="pnpair", - label=label, weight=weight, name=name, ) @@ -425,12 +433,12 @@ def chunk_evaluator( .. code-block:: text - Scheme Description + Scheme Description plain Use the same label for the whole chunk. - IOB Two labels for chunk type X, B-X for chunk begining and I-X for chunk inside. + IOB Two labels for chunk type X, B-X for chunk begining and I-X for chunk inside. IOE Two labels for chunk type X, E-X for chunk ending and I-X for chunk inside. - IOBES Four labels for chunk type X, B-X for chunk begining, I-X for chunk inside, E-X for chunk end and S-X for single word chunk. - + IOBES Four labels for chunk type X, B-X for chunk begining, I-X for chunk inside, E-X for chunk end and S-X for single word chunk. + To make it clear, let's illustrate by an NER example. Assuming that there are three named entity types including ORG, PER and LOC which are called 'chunk type' here, if 'IOB' scheme were used, the label set will be extended to a set including B-ORG, I-ORG, B-PER, I-PER, B-LOC, I-LOC and O, @@ -447,7 +455,7 @@ def chunk_evaluator( tagType = label % numTagType chunkType = label / numTagType otherChunkType = numChunkTypes - + The following table shows the mapping rule between tagType and tag type in each scheme. .. code-block:: text @@ -471,7 +479,7 @@ def chunk_evaluator( O 6 In this example, chunkType has three values: 0 for ORG, 1 for PER, 2 for LOC, because the scheme is - "IOB" so tagType has two values: 0 for B and 1 for I. + "IOB" so tagType has two values: 0 for B and 1 for I. Here we will use I-LOC to explain the above mapping rules in detail. For I-LOC, the label id is 5, so we can get tagType=1 and chunkType=2, which means I-LOC is a part of NER chunk LOC and the tag is I. @@ -482,7 +490,7 @@ def chunk_evaluator( eval = chunk_evaluator(input, label, chunk_scheme, num_chunk_types) - + :param input: The input layers. :type input: LayerOutput :param label: An input layer containing the ground truth label. From 991c4d807959fc1fc9e54d17f545fd46e0226bbf Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Mon, 14 Aug 2017 19:04:38 +0800 Subject: [PATCH 26/92] add some doc to backward (#3474) --- paddle/framework/backward.cc | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 855e2cae20..2118c9d5d4 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -30,6 +30,7 @@ static void ForEachVarName(Map& names, T callback) { } } +// return whether all the names + suffixes in the set static bool AllInSet( const std::map>& names, const std::string& suffix, const std::unordered_set& set) { @@ -48,7 +49,7 @@ static std::shared_ptr NOP() { return net_op; } -// Get backward operator from a forward operator, recursively implementation. +// Get backward operator from a forward operator, a recursive implementation. // // no_grad_names the gradient variable names without gradient calculating. // @@ -56,27 +57,30 @@ static std::shared_ptr NOP() { // BackwardRecursive. use `uid = uniq_id++;` to get the unique index, and // pass `uniq_id` through recursive calling. // -// returns The backward operator. For simple situation, it is a simple -// operator. For complex situation, it is a NetOp. +// returns The backward operator. In a simple situation, it may be a simple +// operator, in a complex situation, it maybe a NetOp. // // See Backward.h for details static std::shared_ptr BackwardRecursive( const OperatorBase& forwardOp, std::unordered_set& no_grad_names, size_t& uniq_id); + std::shared_ptr BackwardRecursive( const OperatorBase& forwardOp, std::unordered_set& no_grad_names, size_t& uniq_id) { // If all input gradients of forwarding operator do not need to calculate, // just return an NOP. Not return null ptr because NOP does not take - // too much time for calculation, but it is useful for simplifying logic. - if (AllInSet(forwardOp.inputs_, kGradVarSuffix, no_grad_names)) { + // much time for calculation, but it is useful for simplifying logic. + if (AllInSet(forwardOp.inputs_ /*names*/, kGradVarSuffix /*suffix*/, + no_grad_names /*set*/)) { return NOP(); } // All output gradients of forwarding operator do not need to calculate. // Then all input gradients cannot be computed at all, and we put them into // `no_grad_names` set. Return an NOP. - if (AllInSet(forwardOp.outputs_, kGradVarSuffix, no_grad_names)) { + if (AllInSet(forwardOp.outputs_ /*names*/, kGradVarSuffix /*suffix*/, + no_grad_names /*set*/)) { ForEachVarName(forwardOp.inputs_, [&no_grad_names](const std::string& name) -> bool { no_grad_names.insert(GradVarName(name)); @@ -93,11 +97,11 @@ std::shared_ptr BackwardRecursive( auto& forwardNet = static_cast(forwardOp); // Map from output gradient variable name to operator's indices in - // backward net. That operator generates that variable. + // backward net's ops_. That operator generates that variable. std::unordered_map> dup_output_ops; size_t local_op_id = 0; - // reversely travel forwardNet + // reversely travel forwardNet and collect all duplicate outputs. for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend(); ++it, ++local_op_id) { auto fwd = *it; @@ -112,25 +116,35 @@ std::shared_ptr BackwardRecursive( // Get unique ID for this method. auto uid = uniq_id++; // TODO(dzh): more comment + // multiple operators which have the same output (y for example) may + // overwrite the same y variable when backward, special operations are token + // to handle this case. For each duplicate output, rename it to an alias + // (original name with a offset), append an `add` op for its operator, + // and finally sum all the alias variable to the final output variable y. using Pos = std::pair>; std::list insert_position; for (auto& dup_output_op : dup_output_ops) { const std::string& name = dup_output_op.first; auto& dup_op = dup_output_op.second; + // no duplicate output if (dup_op.size() == 1) continue; - std::vector dup_outputs; + // process the duplicate outputs + std::vector dup_outputs; for (size_t i = 0; i < dup_op.size(); ++i) { + // rename each duplicate output to an alias auto op_offset = dup_op[i]; dup_outputs.push_back(name + "@RENAME@" + std::to_string(uid) + "@" + std::to_string(i)); net->ops_[op_offset]->Rename(name, dup_outputs.back()); } + // collect all the offset to append `add` op for each alias insert_position.push_back( {dup_op.back(), OpRegistry::CreateOp("add", {{"X", {dup_outputs}}}, {{"Out", {name}}}, {})}); } + // make sure the inserted `add` ops follow the BFS order. insert_position.sort( [](const Pos& l, const Pos& r) { return l.first > r.first; }); From 0c96c99746ddc8abf44dbada694715c9caad0bcd Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 14 Aug 2017 19:27:36 +0800 Subject: [PATCH 27/92] change pybind and net_op_test --- paddle/framework/grad_op_builder.cc | 11 +++++------ paddle/framework/grad_op_builder_test.cc | 12 ++++++------ paddle/framework/pybind.cc | 8 ++++---- paddle/operators/net_op_test.cc | 4 ++-- paddle/operators/recurrent_op.cc | 6 +++--- 5 files changed, 20 insertions(+), 21 deletions(-) diff --git a/paddle/framework/grad_op_builder.cc b/paddle/framework/grad_op_builder.cc index 1833a5463a..f9b1a37c99 100644 --- a/paddle/framework/grad_op_builder.cc +++ b/paddle/framework/grad_op_builder.cc @@ -23,7 +23,7 @@ static void TransOpArg(const OperatorBase* src_op, OperatorBase::VarNameMap* vars, const OpArgType& src_type, bool is_grad) { const auto& src_inout = - src_type == OpArgType::IN ? src_op->inputs_ : src_op->outputs_; + src_type == OpArgType::IN ? src_op->Inputs() : src_op->Outputs(); auto& dst_inout = *vars; const OpProto& proto = OpProtos().at(src_op->Type()); @@ -39,13 +39,12 @@ static void TransOpArg(const OperatorBase* src_op, dst_inout[dst_name].emplace_back(s); } } - return dst_inout; } OperatorBase* BuildGradOp(const OperatorBase* op) { - auto gop_type_it = OpRegistry::grad_ops().find(op->type_); + auto gop_type_it = OpRegistry::grad_ops().find(op->Type()); PADDLE_ENFORCE(gop_type_it != OpRegistry::grad_ops().end(), - "Operator %s do not register gradient type", op->type_); + "Operator %s do not register gradient type", op->Type()); auto& grad_op_type = gop_type_it->second; OperatorBase::VarNameMap inputs; OperatorBase::VarNameMap outputs; @@ -56,9 +55,9 @@ OperatorBase* BuildGradOp(const OperatorBase* op) { auto gop_it = OpRegistry::op_creators().find(grad_op_type); PADDLE_ENFORCE(gop_it != OpRegistry::op_creators().end(), "Operator %s 's Gradient %s's creator cannot be found", - op->type_, grad_op_type); + op->Type(), grad_op_type); - return gop_it->second(grad_op_type, inputs, outputs, op->attrs_); + return gop_it->second(grad_op_type, inputs, outputs, op->Attrs()); } } // namespace framework diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index ebaf84545f..ff1473d327 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -52,8 +52,8 @@ TEST(GradOpBuilder, AddTwo) { "add_two", {{"X", {"x"}}, {"Y", {"y"}}}, {{"Out", {"out"}}}, {})); std::shared_ptr grad_add_op = f::OpRegistry::CreateGradOp(*add_op); - EXPECT_EQ(grad_add_op->inputs_.size(), 4UL); - EXPECT_EQ(grad_add_op->outputs_.size(), 2UL); + EXPECT_EQ(grad_add_op->Inputs().size(), 4UL); + EXPECT_EQ(grad_add_op->Outputs().size(), 2UL); EXPECT_EQ(grad_add_op->Input("X"), "x"); EXPECT_EQ(grad_add_op->Input("Y"), "y"); EXPECT_EQ(grad_add_op->Input("Out"), "out"); @@ -76,7 +76,7 @@ TEST(GradOpBuilder, MutiInOut) { std::shared_ptr grad_test_op = f::OpRegistry::CreateGradOp(*test_op); - ASSERT_EQ(grad_test_op->inputs_.size(), 3UL + 2UL + 2UL); + ASSERT_EQ(grad_test_op->Inputs().size(), 3UL + 2UL + 2UL); EXPECT_EQ(grad_test_op->Input("In1"), "in1"); EXPECT_EQ(grad_test_op->Inputs("In2_mult"), std::vector({"in2_1", "in2_2", "in2_3"})); @@ -90,7 +90,7 @@ TEST(GradOpBuilder, MutiInOut) { std::vector( {f::GradVarName("out2_1"), f::GradVarName("out2_2")})); - ASSERT_EQ(grad_test_op->outputs_.size(), 3UL); + ASSERT_EQ(grad_test_op->Outputs().size(), 3UL); EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), std::vector({f::GradVarName("in2_1"), @@ -109,7 +109,7 @@ TEST(GradOpBuilder, IOIgnoredInGradient) { f::OpRegistry::CreateGradOp(*test_op); // 'In2' and 'Out2' are ignored in gradient calculating - ASSERT_EQ(grad_test_op->inputs_.size(), 2UL + 1UL + 2UL); + ASSERT_EQ(grad_test_op->Inputs().size(), 2UL + 1UL + 2UL); EXPECT_EQ(grad_test_op->Input("In1"), "in1"); EXPECT_EQ(grad_test_op->Inputs("In3_mult"), std::vector({"in3_1", "in3_2"})); @@ -121,7 +121,7 @@ TEST(GradOpBuilder, IOIgnoredInGradient) { EXPECT_EQ(grad_test_op->Input(f::GradVarName("Out2")), f::GradVarName("out2")); - ASSERT_EQ(grad_test_op->outputs_.size(), 3UL); + ASSERT_EQ(grad_test_op->Outputs().size(), 3UL); EXPECT_EQ(grad_test_op->Output(f::GradVarName("In1")), f::GradVarName("in1")); EXPECT_EQ(grad_test_op->Outputs(f::GradVarName("In2_mult")), std::vector( diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index 07b42c8371..e599b5daa0 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -53,15 +53,15 @@ void ExposeOperator(ClassType &m) { .def("run", &ClassType::type::Run) .def("type", [](const typename ClassType::type &op) -> std::string { - return op.type_; + return op.Type(); }) .def("outputs", [](const typename ClassType::type &op) -> std::map> { - return op.outputs_; + return op.Outputs(); }) .def("inputs", - [](const typename ClassType::type &op) { return op.inputs_; }) + [](const typename ClassType::type &op) { return op.Inputs(); }) .def("__str__", &ClassType::type::DebugString) .def("no_intermediate_outputs", [](const typename ClassType::type &op) { @@ -229,7 +229,7 @@ All parameter, weight, gradient are variables in Paddle. net.def_static("create", []() -> std::shared_ptr { auto retv = std::make_shared(); - retv->type_ = "plain_net"; + retv->SetType("plain_net"); return retv; }) .def("add_op", &operators::NetOp::AddOp) diff --git a/paddle/operators/net_op_test.cc b/paddle/operators/net_op_test.cc index f7aa56262e..0acde5a90d 100644 --- a/paddle/operators/net_op_test.cc +++ b/paddle/operators/net_op_test.cc @@ -56,8 +56,8 @@ TEST(OpKernel, all) { net->CompleteAddOp(); AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"}, - net->inputs_.at(NetOp::kAll)); - AssertSameVectorWithoutOrder({"y", "z"}, net->outputs_.at(NetOp::kAll)); + net->Inputs(NetOp::kAll)); + AssertSameVectorWithoutOrder({"y", "z"}, net->Outputs(NetOp::kAll)); auto final_outs = net->OutputVars(false); diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 5ddee75581..d81cc89ae3 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -82,14 +82,14 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { PADDLE_ENFORCE(net_var != nullptr, "no stepnet called %s in scope", arg_->step_net); auto net_op = net_var->GetMutable(); - PADDLE_ENFORCE(!net_op->outputs_.empty(), "net_op has no outputs"); + PADDLE_ENFORCE(!net_op->Outputs().empty(), "net_op has no outputs"); if (seq_len_ > step_scopes->size()) { for (size_t i = step_scopes->size(); i < seq_len_; ++i) { auto& step_scope = scope.NewScope(); // create step net's temp inputs - for (auto& input : net_op->inputs_) { + for (auto& input : net_op->Inputs()) { // the weight are located in parent scope for (auto& var_name : input.second) { if (!step_scope.FindVar(var_name)) { @@ -98,7 +98,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { } } // create stepnet's outputs - for (const auto& output : net_op->outputs_) { + for (const auto& output : net_op->Outputs()) { for (auto& var_name : output.second) { step_scope.NewVar(var_name); } From a240bce1529df0ad227cdc8eddd918630fcf26dc Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 14 Aug 2017 19:46:19 +0800 Subject: [PATCH 28/92] fix backward --- paddle/framework/backward.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 1fddad8d16..ab91e422ee 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -79,9 +79,9 @@ std::shared_ptr BackwardRecursive( // All output gradients of forwarding operator do not need to calculate. // Then all input gradients cannot be computed at all, and we put them into // `no_grad_names` set. Return an NOP. - if (AllInSet(forwardOp.Output() /*names*/, kGradVarSuffix /*suffix*/, + if (AllInSet(forwardOp.Outputs() /*names*/, kGradVarSuffix /*suffix*/, no_grad_names /*set*/)) { - ForEachVarName(forwardOp.inputs_, + ForEachVarName(forwardOp.Inputs(), [&no_grad_names](const std::string& name) -> bool { no_grad_names.insert(GradVarName(name)); return false; From e9e6abfa64cd7395c9bc0a934b0fe47853fb8437 Mon Sep 17 00:00:00 2001 From: dong zhihong Date: Mon, 14 Aug 2017 12:10:36 +0000 Subject: [PATCH 29/92] remove comments --- Dockerfile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 885bec9ba0..da00471025 100644 --- a/Dockerfile +++ b/Dockerfile @@ -63,8 +63,6 @@ RUN pip install pre-commit 'ipython==5.3.0' && \ pip install 'ipykernel==4.6.0' 'jupyter==1.0.0' && \ pip install opencv-python -# paddle is using numpy.flip, which is introduced since 1.12.0 -# RUN pip --no-cache-dir install 'numpy>=1.12.0' COPY ./python/requirements.txt /root/ RUN pip install -r /root/requirements.txt From 252d41655ac1685dad796c689fb12f9bafb62ae3 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 14 Aug 2017 20:17:48 +0800 Subject: [PATCH 30/92] clean code --- paddle/framework/op_registry.h | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index af965df7ec..a8c960effe 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -162,11 +162,8 @@ class OpRegistry { auto op_create_it = op_creators().find(type); PADDLE_ENFORCE(op_create_it != op_creators().end(), "Operator %s cannot be found.", type); - - auto attrMap = attrs; - op_checkers().at(type).Check(attrMap); - auto op = op_create_it->second(type, inputs, outputs, attrMap); - GenerateTempVariableName(op); + op_checkers().at(type).Check(attrs); + auto op = op_create_it->second(type, inputs, outputs, attrs); return std::shared_ptr(op); } @@ -217,21 +214,6 @@ class OpRegistry { static std::unordered_map op_checkers_; return op_checkers_; } - - static void GenerateTempVariableName(OperatorBase* op) { - static std::atomic gUniqId(0UL); - for (auto& output : op->Outputs()) { - for (auto& output_name : output.second) { - if (output_name == kTempVarName) { - auto new_name = output_name; - new_name += op->Type(); - new_name += "@"; - new_name += std::to_string(gUniqId.fetch_add(1)); - op->Rename(output_name, new_name); - } - } - } - } }; class Registrar { From 84d6434d53dbef47b5aa817c5ff25d236a59a83c Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Mon, 14 Aug 2017 20:58:57 +0800 Subject: [PATCH 31/92] Compare the gradient consistency between GPU and CPU calculations. --- paddle/operators/sigmoid_op.cc | 3 +- .../paddle/v2/framework/tests/CMakeLists.txt | 1 + .../v2/framework/tests/gradient_checker.py | 173 ++++++++---------- .../v2/framework/tests/test_sigmoid_op.py | 22 ++- 4 files changed, 98 insertions(+), 101 deletions(-) diff --git a/paddle/operators/sigmoid_op.cc b/paddle/operators/sigmoid_op.cc index a7dfb624e5..84601bd733 100644 --- a/paddle/operators/sigmoid_op.cc +++ b/paddle/operators/sigmoid_op.cc @@ -44,7 +44,8 @@ class SigmoidOpGrad : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - ctx.Output(0)->Resize(ctx.Input(0)->dims()); + ctx.Output(framework::GradVarName("X")) + ->Resize(ctx.Input("Y")->dims()); } }; diff --git a/python/paddle/v2/framework/tests/CMakeLists.txt b/python/paddle/v2/framework/tests/CMakeLists.txt index 96fad9b42e..4c088e7612 100644 --- a/python/paddle/v2/framework/tests/CMakeLists.txt +++ b/python/paddle/v2/framework/tests/CMakeLists.txt @@ -25,3 +25,4 @@ py_test(test_operator SRCS test_operator.py) # py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py) py_test(test_uniform_random_op SRCS test_uniform_random_op.py) py_test(test_recurrent_op SRCS test_recurrent_op.py) +py_test(test_gradient_checker SRCS test_gradient_checker.py) diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py index 501cf6110f..5f9e54837e 100644 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ b/python/paddle/v2/framework/tests/gradient_checker.py @@ -1,6 +1,7 @@ import unittest import numpy +import itertools import paddle.v2.framework.core as core from paddle.v2.framework.op import Operator @@ -8,6 +9,7 @@ __all__ = ['get_numeric_gradient'] def create_op(op_type): + # TODO need to set attrs kwargs = dict() for in_name in Operator.get_op_input_names(op_type): kwargs[in_name] = in_name @@ -66,7 +68,6 @@ def get_numeric_gradient(op, local_scope.find_var(output).get_tensor().alloc_float(core.CPUPlace( )) - # TODO(yuyang18): Only CPU is support now. cpu_ctx = core.DeviceContext.create(core.CPUPlace()) def get_output(): @@ -109,12 +110,71 @@ def get_numeric_gradient(op, class GradientChecker(unittest.TestCase): - def assert_is_close(self, numeric_grads, scope, max_relative_error, - msg_prefix): - for name in numeric_grads: - b = numpy.array(scope.find_var(grad_var_name(name)).get_tensor()) - a = numeric_grads[name] + def get_grad(self, forward_op, backward_op, input_vars, grad_names, place): + scope = core.Scope() + ctx = core.DeviceContext.create(place) + inputs = forward_op.inputs() + in_names = [item for k in inputs for item in inputs[k]] + outputs = forward_op.outputs() + out_names = [item for k in outputs for item in outputs[k]] + + # create input var and set value + for name, value in input_vars.iteritems(): + if name not in in_names: + raise ValueError(name + "does not exist in Op's inputs.") + var = scope.new_var(name).get_tensor() + var.set_dims(value.shape) + var.set(value, place) + + # run forward op + for out_name in out_names: + scope.new_var(out_name) + forward_op.infer_shape(scope) + forward_op.run(scope, ctx) + + # set output var's shape + # set output grad to ones + for name in out_names: + out_tensor = scope.find_var(name).get_tensor() + grad_tensor = scope.new_var(grad_var_name(name)).get_tensor() + grad_tensor.set_dims(out_tensor.shape()) + data = numpy.ones(out_tensor.shape(), dtype=numpy.float32) + grad_tensor.set(data, place) + + # run backward op + for name in backward_op.outputs(): + scope.new_var(name) + backward_op.infer_shape(scope) + backward_op.run(scope, ctx) + + outs = [ + numpy.array(scope.find_var(name).get_tensor()) + for name in grad_names + ] + return outs + + def compare_grad(self, forward_op, inputs): + backward_op = core.Operator.backward(forward_op, set()) + if not (core.is_compile_gpu() and backward_op.support_gpu()): + return + + outputs = backward_op.outputs() + out_names = [item for k in outputs for item in outputs[k]] + cpu_grads = self.get_grad(forward_op, backward_op, inputs, out_names, + core.CPUPlace()) + gpu_grads = self.get_grad(forward_op, backward_op, inputs, out_names, + core.GPUPlace(0)) + + for c_grad, g_grad, name in itertools.izip(cpu_grads, gpu_grads, + out_names): + self.assertTrue( + numpy.allclose(c_grad, g_grad), + "output name: " + name + " has diff") + + def assert_is_close(self, numeric_grads, analytic_grads, names, + max_relative_error, msg_prefix): + for a, b, name in itertools.izip(numeric_grads, analytic_grads, names): abs_a = numpy.abs(a) # if abs_a is nearly zero, then use abs error for a, not relative # error. @@ -159,106 +219,27 @@ class GradientChecker(unittest.TestCase): inputs = forward_op.inputs() in_names = [item for k in inputs for item in inputs[k]] - outputs = forward_op.outputs() - out_names = [item for k in outputs for item in outputs[k]] - for no_grad in no_grad_set: if no_grad not in in_names: raise ValueError("no_grad should be in in_names") backward_op = core.Operator.backward(forward_op, no_grad_set) - bwd_outputs = backward_op.outputs() - bwd_out_names = [item for k in bwd_outputs for item in bwd_outputs[k]] - places = [core.CPUPlace()] if not only_cpu and core.is_compile_gpu() and backward_op.support_gpu(): places.append(core.GPUPlace(0)) - numeric_grad = dict() - # get numeric gradient - for check_name in inputs_to_check: - numeric_grad[check_name] = \ - get_numeric_gradient(forward_op, input_vars, output_name, - check_name) + # get numerical gradients + numeric_grads = [ + get_numeric_gradient(forward_op, input_vars, output_name, name) + for name in inputs_to_check + ] - # get operator gradient according to different device + check_names = [grad_var_name(name) for name in inputs_to_check] for place in places: - scope = core.Scope() - ctx = core.DeviceContext.create(place) - - # create input var and set value - for name, value in input_vars.iteritems(): - if name not in in_names: - raise ValueError(name + " not in op.inputs_") - var = scope.new_var(name).get_tensor() - var.set_dims(value.shape) - var.set(value, place) - - # create output var - for out_name in out_names: - scope.new_var(out_name).get_tensor() - - # infer the shape of output var and compute/set value of output var - forward_op.infer_shape(scope) - forward_op.run(scope, ctx) - - # create output grad var - # set shape as the output var - # set value of this grad to ones - for name in out_names: - out_tensor = scope.find_var(name).get_tensor() - grad_tensor = scope.new_var(grad_var_name(name)).get_tensor() - grad_tensor.set_dims(out_tensor.shape()) - data = 1.0 * numpy.ones(out_tensor.shape()) - grad_tensor.set(data, place) - - # create input grad var - for name in bwd_out_names: - scope.new_var(name).get_tensor() - - # infer the shape of input gradient var and compute/set it's value - # with backward op - backward_op.infer_shape(scope) - backward_op.run(scope, ctx) - - self.assert_is_close(numeric_grad, scope, max_relative_error, + # get analytical gradients according to different device + analytic_grads = self.get_grad(forward_op, backward_op, input_vars, + check_grad_names, place) + self.assert_is_close(numeric_grads, analytic_grads, check_names, + max_relative_error, "Gradient Check On %s" % str(place)) - - -if __name__ == '__main__': - - class GetNumericGradientTest(unittest.TestCase): - def test_add_op(self): - add_op = Operator('add_two', X="X", Y="Y", Out="Z") - x = numpy.random.random((10, 1)).astype("float32") - y = numpy.random.random((10, 1)).astype("float32") - - arr = get_numeric_gradient(add_op, {'X': x, "Y": y}, 'Z', 'X') - self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-2) - - def test_softmax_op(self): - def stable_softmax(x): - """Compute the softmax of vector x in a numerically stable way.""" - shiftx = x - numpy.max(x) - exps = numpy.exp(shiftx) - return exps / numpy.sum(exps) - - def label_softmax_grad(Y, dY): - dX = Y * 0.0 - for i in range(Y.shape[0]): - d = numpy.dot(Y[i, :], dY[i, :]) - dX[i, :] = Y[i, :] * (dY[i, :] - d) - return dX - - softmax_op = Operator("softmax", X="X", Y="Y") - - X = numpy.random.random((2, 2)).astype("float32") - Y = numpy.apply_along_axis(stable_softmax, 1, X) - dY = numpy.ones(Y.shape) - dX = label_softmax_grad(Y, dY) - - arr = get_numeric_gradient(softmax_op, {"X": X}, 'Y', 'X') - numpy.testing.assert_almost_equal(arr, dX, decimal=1e-2) - - unittest.main() diff --git a/python/paddle/v2/framework/tests/test_sigmoid_op.py b/python/paddle/v2/framework/tests/test_sigmoid_op.py index 2a57a41ed8..1a6d395be6 100644 --- a/python/paddle/v2/framework/tests/test_sigmoid_op.py +++ b/python/paddle/v2/framework/tests/test_sigmoid_op.py @@ -1,6 +1,7 @@ import unittest -from op_test_util import OpTestMeta import numpy as np +from op_test_util import OpTestMeta +from gradient_checker import GradientChecker, create_op class TestSigmoidOp(unittest.TestCase): @@ -8,12 +9,25 @@ class TestSigmoidOp(unittest.TestCase): def setUp(self): self.type = "sigmoid" - self.inputs = {'X': np.random.random((32, 100)).astype("float32")} + self.inputs = {'X': np.random.random((15, 31)).astype("float32")} self.outputs = {'Y': 1 / (1 + np.exp(-self.inputs['X']))} -#class TestSigmoidGradOp(unittest.TestCase): -#TODO(qingqing) add unit test +class TestSigmoidGradOp(GradientChecker): + def test_compare_grad(self): + op = create_op("sigmoid") + inputs = {"X": np.random.random((11, 17)).astype("float32")} + + # compare gpu and cpu results for backward op + self.compare_grad(op, inputs) + + def test_check_grad(self): + op = create_op("sigmoid") + inputs = {"X": np.random.random((11, 17)).astype("float32")} + + # check gradients + self.check_grad(op, inputs, set("X"), "Y") + if __name__ == '__main__': unittest.main() From 01d9134067852a1f9dfecf75f730f9fba14434e0 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Mon, 14 Aug 2017 21:01:24 +0800 Subject: [PATCH 32/92] Add test_gradient_checker.py --- .../framework/tests/test_gradient_checker.py | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 python/paddle/v2/framework/tests/test_gradient_checker.py diff --git a/python/paddle/v2/framework/tests/test_gradient_checker.py b/python/paddle/v2/framework/tests/test_gradient_checker.py new file mode 100644 index 0000000000..e0b3151208 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_gradient_checker.py @@ -0,0 +1,43 @@ +import unittest +import numpy +from paddle.v2.framework.op import Operator +from gradient_checker import GradientChecker +from gradient_checker import get_numeric_gradient + + +class GetNumericGradientTest(unittest.TestCase): + def test_add_op(self): + add_op = Operator('add_two', X="X", Y="Y", Out="Z") + x = numpy.random.random((10, 1)).astype("float32") + y = numpy.random.random((10, 1)).astype("float32") + + arr = get_numeric_gradient(add_op, {'X': x, "Y": y}, 'Z', 'X') + self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4) + + def test_softmax_op(self): + def stable_softmax(x): + """Compute the softmax of vector x in a numerically stable way.""" + shiftx = x - numpy.max(x) + exps = numpy.exp(shiftx) + return exps / numpy.sum(exps) + + def label_softmax_grad(Y, dY): + dX = Y * 0.0 + for i in range(Y.shape[0]): + d = numpy.dot(Y[i, :], dY[i, :]) + dX[i, :] = Y[i, :] * (dY[i, :] - d) + return dX + + softmax_op = Operator("softmax", X="X", Y="Y") + + X = numpy.random.random((2, 2)).astype("float32") + Y = numpy.apply_along_axis(stable_softmax, 1, X) + dY = numpy.ones(Y.shape) + dX = label_softmax_grad(Y, dY) + + arr = get_numeric_gradient(softmax_op, {"X": X}, 'Y', 'X') + numpy.testing.assert_almost_equal(arr, dX, decimal=1e-2) + + +if __name__ == '__main__': + unittest.main() From 9a0eedf5d4d32e0aaa80e554f608c56e6d36a798 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Mon, 14 Aug 2017 21:27:17 +0800 Subject: [PATCH 33/92] fix bug. --- python/paddle/v2/framework/tests/gradient_checker.py | 3 ++- python/paddle/v2/framework/tests/test_sigmoid_op.py | 11 +++-------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py index 5f9e54837e..d251f14b9d 100644 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ b/python/paddle/v2/framework/tests/gradient_checker.py @@ -156,6 +156,7 @@ class GradientChecker(unittest.TestCase): def compare_grad(self, forward_op, inputs): backward_op = core.Operator.backward(forward_op, set()) + # return if not compile with GPU or not implementing GPU kernel if not (core.is_compile_gpu() and backward_op.support_gpu()): return @@ -239,7 +240,7 @@ class GradientChecker(unittest.TestCase): for place in places: # get analytical gradients according to different device analytic_grads = self.get_grad(forward_op, backward_op, input_vars, - check_grad_names, place) + check_names, place) self.assert_is_close(numeric_grads, analytic_grads, check_names, max_relative_error, "Gradient Check On %s" % str(place)) diff --git a/python/paddle/v2/framework/tests/test_sigmoid_op.py b/python/paddle/v2/framework/tests/test_sigmoid_op.py index 1a6d395be6..c3bd79f5dc 100644 --- a/python/paddle/v2/framework/tests/test_sigmoid_op.py +++ b/python/paddle/v2/framework/tests/test_sigmoid_op.py @@ -17,15 +17,10 @@ class TestSigmoidGradOp(GradientChecker): def test_compare_grad(self): op = create_op("sigmoid") inputs = {"X": np.random.random((11, 17)).astype("float32")} - - # compare gpu and cpu results for backward op + # compare gpu and cpu results for backward op. + # skip this test if only compiling CPU version. self.compare_grad(op, inputs) - - def test_check_grad(self): - op = create_op("sigmoid") - inputs = {"X": np.random.random((11, 17)).astype("float32")} - - # check gradients + # check gradients self.check_grad(op, inputs, set("X"), "Y") From 9638c142fb4fcc795ffbe9839bad78868a40b897 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 14 Aug 2017 23:15:28 +0800 Subject: [PATCH 34/92] refine --- python/setup.py.in | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/python/setup.py.in b/python/setup.py.in index 36438d3573..287442e013 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -31,12 +31,7 @@ paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/scripts/paddle_usage', '${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main'] paddle_rt_lib_dir = 'local/lib' -paddle_rt_libs = [] - -mkl_shared_libs='${MKL_SHARED_LIBS}' -if mkl_shared_libs != '': - paddle_rt_libs += mkl_shared_libs.split(';') -print paddle_rt_libs +paddle_rt_libs = [] if '${MKL_SHARED_LIBS}'== '' else '${MKL_SHARED_LIBS}'.split(';') setup(name='paddlepaddle', version='${PADDLE_VERSION}', From 2be3d32711c150d9d6cdb94124a6ecaa3c7ac0fe Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 14 Aug 2017 23:33:27 +0800 Subject: [PATCH 35/92] use shared lib when mkl --- cmake/external/openblas.cmake | 11 +++++++++-- paddle/operators/math/CMakeLists.txt | 9 ++------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index db09232c0e..d47eabba44 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -73,8 +73,15 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR}) # linear algebra libraries for cc_library(xxx SRCS xxx.c DEPS cblas) SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c) FILE(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";") -ADD_LIBRARY(cblas STATIC ${dummyfile}) -TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES}) + +IF(${CBLAS_PROVIDER} MATCHES MKL) + ADD_LIBRARY(cblas SHARED ${dummyfile}) + TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES}) + ADD_DEPENDENCIES(cblas mklml) +ELSE() + ADD_LIBRARY(cblas STATIC ${dummyfile}) + TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES}) +ENDIF() IF(NOT ${CBLAS_FOUND}) ADD_DEPENDENCIES(cblas extern_openblas) diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/operators/math/CMakeLists.txt index abcaf940ab..ed51d416ed 100644 --- a/paddle/operators/math/CMakeLists.txt +++ b/paddle/operators/math/CMakeLists.txt @@ -1,13 +1,8 @@ -if(WITH_MKLML) - set(BLAS_LIB mklml) -else() - set(BLAS_LIB cblas) -endif() if(WITH_GPU) - nv_library(math_function SRCS math_function.cc math_function.cu DEPS ${BLAS_LIB} device_context) + nv_library(math_function SRCS math_function.cc math_function.cu DEPS cblas device_context) else() - cc_library(math_function SRCS math_function.cc DEPS ${BLAS_LIB} device_context) + cc_library(math_function SRCS math_function.cc DEPS cblas device_context) endif() nv_test(math_function_test SRCS math_function_test.cc DEPS math_function tensor) From edb541f2926c6ef2cd7c9b1c5d0c80f692a50697 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 14 Aug 2017 11:47:00 -0700 Subject: [PATCH 36/92] fix compile errors --- paddle/framework/grad_op_builder.cc | 5 ++++- paddle/framework/op_registry.h | 20 ++++++++++---------- paddle/framework/operator.cc | 2 +- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/paddle/framework/grad_op_builder.cc b/paddle/framework/grad_op_builder.cc index b316f2d535..cb491ec95f 100644 --- a/paddle/framework/grad_op_builder.cc +++ b/paddle/framework/grad_op_builder.cc @@ -25,8 +25,9 @@ static void TransOpArg(const OperatorBase* src_op, const OpArgType& src_type, const auto& src_inout = src_type == OpArgType::IN ? src_op->inputs_ : src_op->outputs_; auto& dst_inout = *vars; + const OpProto* proto = OpRegistry::op_info_map().at(src_op->type_).proto_; const auto& src_arg_list = - src_type == OpArgType::IN ? proto.inputs() : proto.outputs(); + src_type == OpArgType::IN ? proto->inputs() : proto->outputs(); for (const auto& arg : src_arg_list) { if (arg.no_gradient() && !is_grad) continue; const std::string src_name = arg.name(); @@ -43,6 +44,8 @@ OperatorBase* BuildGradOp(const OperatorBase* op) { auto it = OpRegistry::op_info_map().find(op->type_); PADDLE_ENFORCE(it != OpRegistry::op_info_map().end(), "'%s' has not been registered.", op->type_); + PADDLE_ENFORCE(it->second.proto_ != nullptr, "'%s' has no OpProto.", + op->type_); std::string grad_op_type = it->second.grad_op_type_; PADDLE_ENFORCE(!grad_op_type.empty(), "'%s' has no gradient operator.", op->type_); diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 6dd5f4af22..120f4ede6b 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -126,13 +126,6 @@ class NOPMaker : public OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) {} }; -struct OpInfo { - std::function creator_; - std::string grad_op_type_; - OpProto* proto_; - OpAttrChecker* checker_; -}; - class OpRegistry { using VarNameMap = OperatorBase::VarNameMap; using OpCreator = std::function; public: + struct OpInfo { + OpCreator creator_; + std::string grad_op_type_; + OpProto* proto_; + OpAttrChecker* checker_; + }; + template static void RegisterOp(const std::string& op_type, const std::string& grad_op_type) { @@ -175,9 +175,9 @@ class OpRegistry { } static std::shared_ptr CreateOp(const std::string& type, - const VarNameList& inputs, - const VarNameList& outputs, - const AttributeMap& attrs) { + const VarNameMap& inputs, + const VarNameMap& outputs, + AttributeMap attrs) { auto it = op_info_map().find(type); PADDLE_ENFORCE(it != op_info_map().end(), "Operator '%s' has not been registered.", type); diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index c054804477..0daf12e7f5 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -152,7 +152,7 @@ std::vector OperatorBase::OutputVars(bool has_intermediate) const { type_); // get all OpProto::Var for outputs - for (auto& o : it->second.proto_.outputs()) { + for (auto& o : it->second.proto_->outputs()) { // ignore all intermediate output if (o.intermediate()) continue; auto out = outputs_.find(o.name()); From 39c986c73029b39576772afd4806f37fdfe9d57a Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 14 Aug 2017 13:47:15 -0700 Subject: [PATCH 37/92] restart CI --- paddle/framework/operator.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 606ab9e502..144db220a2 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -216,7 +216,7 @@ class InferShapeContext { [&](const std::string& sub_name) { auto var = scope_.FindVar(sub_name); PADDLE_ENFORCE_NOT_NULL( - var, "MultiOutput(%s:%s) should not be nullptr", name, + var, "MultiOutput(%s:%s) should not be nullptr.", name, sub_name); return var->GetMutable(); }); From 914a2f2b42c28b5a037b470c613b305303d99e16 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 14 Aug 2017 21:30:31 +0000 Subject: [PATCH 38/92] Format code --- paddle/framework/grad_op_builder_test.cc | 14 ++++++-------- paddle/framework/pybind.cc | 4 ++-- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index 043503ada6..d0d5d64fe6 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -59,10 +59,9 @@ REGISTER_OP(io_ignored, f::NOP, f::IOIgnoredOpMaker, io_ignored_grad, f::NOP); TEST(GradOpBuilder, MutiInOut) { std::shared_ptr test_op(f::OpRegistry::CreateOp( - "mult_io", - {{"In1", {"in1"}}, - {"In2_mult", {"in2_1", "in2_2", "in2_3"}}, - {"In3", {"in3"}}}, + "mult_io", {{"In1", {"in1"}}, + {"In2_mult", {"in2_1", "in2_2", "in2_3"}}, + {"In3", {"in3"}}}, {{"Out1", {"out1"}}, {"Out2_mult", {"out2_1", "out2_2"}}}, {})); std::shared_ptr grad_test_op = f::OpRegistry::CreateGradOp(*test_op); @@ -92,10 +91,9 @@ TEST(GradOpBuilder, MutiInOut) { TEST(GradOpBuilder, IOIgnoredInGradient) { std::shared_ptr test_op(f::OpRegistry::CreateOp( - "io_ignored", - {{"In1", {"in1"}}, - {"In2_mult", {"in2_1", "in2_2"}}, - {"In3_mult", {"in3_1", "in3_2"}}}, + "io_ignored", {{"In1", {"in1"}}, + {"In2_mult", {"in2_1", "in2_2"}}, + {"In3_mult", {"in3_1", "in3_2"}}}, {{"Out1_mult", {"out1_1", "out1_2"}}, {"Out2", {"out2"}}}, {})); std::shared_ptr grad_test_op = f::OpRegistry::CreateGradOp(*test_op); diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index 108ae79d2c..047e09642c 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -58,8 +58,8 @@ void ExposeOperator(ClassType &m) { .def("outputs", [](const typename ClassType::type &op) -> std::map> { - return op.outputs_; - }) + return op.outputs_; + }) .def("inputs", [](const typename ClassType::type &op) { return op.inputs_; }) .def("__str__", &ClassType::type::DebugString) From 2da240c7ec776b44ffe6e06fa551fbff960c3b18 Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Mon, 14 Aug 2017 15:13:23 -0700 Subject: [PATCH 39/92] fix local recordio reader --- python/paddle/v2/reader/creator.py | 12 ++++++---- python/paddle/v2/reader/tests/creator_test.py | 22 ++++++++++++++++++ .../v2/reader/tests/test_reader_recordio.dat | Bin 0 -> 76 bytes 3 files changed, 30 insertions(+), 4 deletions(-) create mode 100644 python/paddle/v2/reader/tests/test_reader_recordio.dat diff --git a/python/paddle/v2/reader/creator.py b/python/paddle/v2/reader/creator.py index d0f18e4b66..97e844b92c 100644 --- a/python/paddle/v2/reader/creator.py +++ b/python/paddle/v2/reader/creator.py @@ -57,7 +57,7 @@ def text_file(path): return reader -def recordio_local(paths, buf_size=100): +def recordio(paths, buf_size=100): """ Creates a data reader from given RecordIO file paths separated by ",", glob pattern is supported. @@ -67,15 +67,19 @@ def recordio_local(paths, buf_size=100): import recordio as rec import paddle.v2.reader.decorator as dec + import cPickle as pickle def reader(): - a = ','.join(paths) - f = rec.reader(a) + if isinstance(paths, basestring): + path = paths + else: + path = ",".join(paths) + f = rec.reader(path) while True: r = f.read() if r is None: break - yield r + yield pickle.loads(r) f.close() return dec.buffered(reader, buf_size) diff --git a/python/paddle/v2/reader/tests/creator_test.py b/python/paddle/v2/reader/tests/creator_test.py index 359f3eeefb..cf190aa664 100644 --- a/python/paddle/v2/reader/tests/creator_test.py +++ b/python/paddle/v2/reader/tests/creator_test.py @@ -34,5 +34,27 @@ class TestTextFile(unittest.TestCase): self.assertEqual(e, str(idx * 2) + " " + str(idx * 2 + 1)) +class TestRecordIO(unittest.TestCase): + def do_test(self, path): + reader = paddle.v2.reader.creator.recordio(path) + idx = 0 + for e in reader(): + if idx == 0: + self.assertEqual(e, (1, 2, 3)) + elif idx == 1: + self.assertEqual(e, (4, 5, 6)) + idx += 1 + self.assertEqual(idx, 2) + + def test_recordIO(self): + self.do_test( + os.path.join( + os.path.dirname(__file__), "test_reader_recordio.dat")) + self.do_test([ + os.path.join( + os.path.dirname(__file__), "test_reader_recordio.dat") + ]) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/v2/reader/tests/test_reader_recordio.dat b/python/paddle/v2/reader/tests/test_reader_recordio.dat new file mode 100644 index 0000000000000000000000000000000000000000..a99a35bb829e066c4845d0b85b96cd1eb3a12491 GIT binary patch literal 76 zcmZQ!W@4P2Bs!asfq}sSh?#)+KN|x>v0q|9K_sIV14Bftj}1RiRKwGd%hQO<)0nHI Tz>rH1B4onlY0Bkk1`z@P(}N7c literal 0 HcmV?d00001 From 864b00cdf0a70bca09ad52c514a7a9875e22d3a5 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 14 Aug 2017 15:26:56 -0700 Subject: [PATCH 40/92] Replace `EmptyOp` with `NOP` --- paddle/framework/backward_test.cc | 25 +++++++++---------------- paddle/operators/net_op_test.cc | 13 +++---------- 2 files changed, 12 insertions(+), 26 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index 1a2bee50a1..eb36ca4cfb 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -28,13 +28,6 @@ using OpAttrChecker = framework::OpAttrChecker; using Scope = framework::Scope; using DeviceContext = platform::DeviceContext; -class EmptyOp : public OperatorBase { - public: - using OperatorBase::OperatorBase; - void InferShape(const Scope &scope) const override {} - void Run(const Scope &scope, const DeviceContext &dev_ctx) const override {} -}; - class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { public: RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) @@ -155,16 +148,16 @@ class AddOpMaker : public OpProtoAndCheckerMaker { namespace f = paddle::framework; namespace ops = paddle::operators; using EnforceNotMet = paddle::platform::EnforceNotMet; -REGISTER_OP(rowwise_add, f::EmptyOp, f::RowWiseAddOpMaker, rowwise_add_grad, - f::EmptyOp); -REGISTER_OP(mul, f::EmptyOp, f::MulOpMaker, mul_grad, f::EmptyOp); -REGISTER_OP(sigmoid, f::EmptyOp, f::SigmoidOpMaker, sigmoid_grad, f::EmptyOp); -REGISTER_OP_WITHOUT_GRADIENT(nograd, f::EmptyOp, f::NoGradOpMaker); -REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, f::EmptyOp, f::FillZeroOpMaker); -REGISTER_OP(add, f::EmptyOp, f::AddOpMaker, add_grad, f::EmptyOp); +REGISTER_OP(rowwise_add, f::NOP, f::RowWiseAddOpMaker, rowwise_add_grad, + f::NOP); +REGISTER_OP(mul, f::NOP, f::MulOpMaker, mul_grad, f::NOP); +REGISTER_OP(sigmoid, f::NOP, f::SigmoidOpMaker, sigmoid_grad, f::NOP); +REGISTER_OP_WITHOUT_GRADIENT(nograd, f::NOP, f::NoGradOpMaker); +REGISTER_OP_WITHOUT_GRADIENT(fill_zeros_like, f::NOP, f::FillZeroOpMaker); +REGISTER_OP(add, f::NOP, f::AddOpMaker, add_grad, f::NOP); REGISTER_OP_WITHOUT_GRADIENT(fc, f::FcOp, f::FcOpMaker); -REGISTER_OP(many_output_op, f::EmptyOp, f::ManyOutputOpMaker, - many_output_op_grad, f::EmptyOp); +REGISTER_OP(many_output_op, f::NOP, f::ManyOutputOpMaker, many_output_op_grad, + f::NOP); TEST(Backward, simple_op_grad) { auto fwd = f::OpRegistry::CreateOp( diff --git a/paddle/operators/net_op_test.cc b/paddle/operators/net_op_test.cc index f7aa56262e..ea6327f1b0 100644 --- a/paddle/operators/net_op_test.cc +++ b/paddle/operators/net_op_test.cc @@ -20,13 +20,6 @@ class TestOp : public framework::OperatorBase { } }; -class EmptyOp : public framework::OperatorBase { - public: - using framework::OperatorBase::OperatorBase; - void InferShape(const Scope& scope) const override {} - void Run(const Scope& scope, const DeviceContext& dev_ctx) const override {} -}; - template void AssertSameVectorWithoutOrder(const std::vector& expected, const std::vector& actual) { @@ -67,9 +60,9 @@ TEST(OpKernel, all) { TEST(NetOp, insert_op) { NetOp net; - auto op1 = std::shared_ptr( - new EmptyOp("empty", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}}, - {{"Out", {"y"}}}, {})); + auto op1 = std::shared_ptr( + new NOP("empty", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}}, + {{"Out", {"y"}}}, {})); net.AddOp(op1); net.InsertOp(0, op1); ASSERT_EQ(2UL, net.ops_.size()); From c3bda2acf854c75a13fa96fe2cd7511d17a28f0f Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Mon, 14 Aug 2017 15:16:41 -0700 Subject: [PATCH 41/92] Add recordio as paddle's dependency. --- Dockerfile | 1 + python/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 41b6729124..ea2a00d6cd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -64,6 +64,7 @@ RUN pip install --upgrade pip && \ pip install -U sphinx-rtd-theme==0.1.9 recommonmark && \ pip install pre-commit 'requests==2.9.2' 'ipython==5.3.0' && \ pip install 'ipykernel==4.6.0' 'jupyter==1.0.0' && \ + pip install 'recordio>=0.1.0' && \ pip install opencv-python rarfile 'scipy>=0.19.0' 'nltk>=3.2.2' # To fix https://github.com/PaddlePaddle/Paddle/issues/1954, we use diff --git a/python/requirements.txt b/python/requirements.txt index 3df822bd76..e19453c25d 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,7 +1,7 @@ requests==2.9.2 numpy>=1.12 protobuf==3.1 -recordio +recordio>=0.1.0 matplotlib rarfile scipy>=0.19.0 From 5d142b0c78f4c4157ada0a87ad194e891057ea0c Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 14 Aug 2017 15:32:05 -0700 Subject: [PATCH 42/92] Fix compile errors --- paddle/operators/net_op_test.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/operators/net_op_test.cc b/paddle/operators/net_op_test.cc index ea6327f1b0..019c256943 100644 --- a/paddle/operators/net_op_test.cc +++ b/paddle/operators/net_op_test.cc @@ -60,9 +60,9 @@ TEST(OpKernel, all) { TEST(NetOp, insert_op) { NetOp net; - auto op1 = std::shared_ptr( - new NOP("empty", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}}, - {{"Out", {"y"}}}, {})); + auto op1 = std::shared_ptr( + new framework::NOP("empty", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}}, + {{"Out", {"y"}}}, {})); net.AddOp(op1); net.InsertOp(0, op1); ASSERT_EQ(2UL, net.ops_.size()); From 219f7a46404652a8edeb58e02fd52119d6af9d81 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 15 Aug 2017 07:37:09 +0800 Subject: [PATCH 43/92] typo --- paddle/framework/backward.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index ab91e422ee..83b7e4cdac 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -70,7 +70,7 @@ std::shared_ptr BackwardRecursive( std::unordered_set& no_grad_names, size_t& uniq_id) { // If all input gradients of forwarding operator do not need to calculate, // just return an NOP. Not return null ptr because NOP does not take - // too much time for calculation, but it is useful for simplifying logic. + // too much time for calculation, but it is useful for simplifying logic. if (AllInSet(forwardOp.Inputs() /*names*/, kGradVarSuffix /*suffix*/, no_grad_names /*set*/)) { return NOP(); From 318fee83895ba6c13e44d08954de5801e411e632 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Tue, 15 Aug 2017 09:57:09 +0800 Subject: [PATCH 44/92] refine cblas --- cmake/external/openblas.cmake | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index d47eabba44..0eeccbf7d8 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -73,17 +73,18 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR}) # linear algebra libraries for cc_library(xxx SRCS xxx.c DEPS cblas) SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c) FILE(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";") - IF(${CBLAS_PROVIDER} MATCHES MKL) ADD_LIBRARY(cblas SHARED ${dummyfile}) - TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES}) - ADD_DEPENDENCIES(cblas mklml) ELSE() ADD_LIBRARY(cblas STATIC ${dummyfile}) - TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES}) ENDIF() +TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES}) IF(NOT ${CBLAS_FOUND}) ADD_DEPENDENCIES(cblas extern_openblas) LIST(APPEND external_project_dependencies cblas) +ELSE() + IF("${CBLAS_PROVIDER}" STREQUAL "MKLML") + ADD_DEPENDENCIES(cblas mklml) + ENDIF() ENDIF(NOT ${CBLAS_FOUND}) From 0079fa32569f414c8ed2cceb1a70d98deb72d5e0 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Tue, 15 Aug 2017 11:01:35 +0800 Subject: [PATCH 45/92] Rnn make stepnet member (#3469) * make stepnet member * add pybind support * fix Inputs Outputs * remove unique_ptr --- paddle/framework/pybind.cc | 29 ++ paddle/operators/CMakeLists.txt | 1 - paddle/operators/recurrent_op.cc | 38 +-- paddle/operators/recurrent_op.h | 29 +- paddle/operators/recurrent_op_test.cc | 252 ------------------ paddle/operators/rnn/recurrent_op_utils.cc | 1 - python/paddle/v2/framework/op.py | 24 +- .../v2/framework/tests/test_recurrent_op.py | 19 +- 8 files changed, 97 insertions(+), 296 deletions(-) delete mode 100644 paddle/operators/recurrent_op_test.cc diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index 21c60a3c86..fe0c87bc57 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -20,6 +20,7 @@ limitations under the License. */ #include "paddle/framework/op_registry.h" #include "paddle/framework/tensor_py.h" #include "paddle/operators/net_op.h" +#include "paddle/operators/recurrent_op.h" #include "paddle/platform/enforce.h" #include "paddle/platform/place.h" #include "paddle/string/to_string.h" @@ -241,6 +242,11 @@ All parameter, weight, gradient are variables in Paddle. const std::shared_ptr &net) -> void { self.AddOp(std::static_pointer_cast(net)); }) + .def("add_op", + [](operators::NetOp &self, + const std::shared_ptr &rnn) -> void { + self.AddOp(std::static_pointer_cast(rnn)); + }) .def("complete_add_op", &operators::NetOp::CompleteAddOp) .def("complete_add_op", [](std::shared_ptr &self) { self->CompleteAddOp(); @@ -248,6 +254,29 @@ All parameter, weight, gradient are variables in Paddle. ExposeOperator(net); + // recurrent_op + py::class_> + rnn(m, "RecurrentOp"); + + rnn.def_static( + "create", + [](py::bytes protobin) -> std::shared_ptr { + OpDesc desc; + PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), + "Cannot parse user input to OpDesc"); + PADDLE_ENFORCE(desc.IsInitialized(), + "User OpDesc is not initialized, reason %s", + desc.InitializationErrorString()); + auto rnn_op = OpRegistry::CreateOp(desc); + return std::dynamic_pointer_cast(rnn_op); + }) + .def("set_stepnet", + [](operators::RecurrentOp &self, + const std::shared_ptr &net) -> void { + self.set_stepnet(net); + }); + ExposeOperator(rnn); + m.def("unique_integer", UniqueIntegerGenerator); m.def("is_compile_gpu", IsCompileGPU); diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index e5ff3b2f7e..a7c89787e4 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -66,6 +66,5 @@ op_library(sgd_op SRCS sgd_op.cc sgd_op.cu) op_library(recurrent_op SRCS recurrent_op.cc rnn/recurrent_op_utils.cc DEPS framework_proto tensor op_registry operator net_op) -cc_test(recurrent_op_test SRCS recurrent_op_test.cc DEPS recurrent_op gtest mul_op add_op) op_library(uniform_random_op SRCS uniform_random_op.cc uniform_random_op.cu) diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index f61e1288d3..78ce0ba3c0 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -36,15 +36,13 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const { rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, true /*infer_shape_mode*/); InitMemories(step_scopes[0], true /*infer_shape_mode*/); - Variable* net = scope.FindVar(arg_->step_net); - PADDLE_ENFORCE(net != nullptr, "failed to get step net"); for (size_t i = 0; i < seq_len_; i++) { if (i > 0) { rnn::LinkMemories(step_scopes, arg_->memories, i, -1, true /*infer_shape_mode*/); } - net->GetMutable()->InferShape(*step_scopes[i]); + (*stepnet_)->InferShape(*step_scopes[i]); } rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, true /*infer_shape_mode*/); @@ -56,7 +54,6 @@ void RecurrentAlgorithm::Run(const Scope& scope, rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, false /*infer_shape_mode*/); InitMemories(step_scopes[0], false /*infer_shape_mode*/); - Variable* net = scope.FindVar(arg_->step_net); for (size_t step_id = 0; step_id < seq_len_; step_id++) { // create output alias variables @@ -64,7 +61,7 @@ void RecurrentAlgorithm::Run(const Scope& scope, rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1, false /*infer_shape_mode*/); } - net->GetMutable()->Run(*step_scopes[step_id], dev_ctx); + (*stepnet_)->Run(*step_scopes[step_id], dev_ctx); } rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, false /*infer_shape_mode*/); @@ -78,18 +75,16 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { auto step_scopes = step_scopes_var->GetMutable>(); // Now all variables in scope must be created outside of op. - auto net_var = scope.FindVar(arg_->step_net); - PADDLE_ENFORCE(net_var != nullptr, "no stepnet called %s in scope", - arg_->step_net); - auto net_op = net_var->GetMutable(); - PADDLE_ENFORCE(!net_op->Outputs().empty(), "net_op has no outputs"); + PADDLE_ENFORCE_NOT_NULL(stepnet_); + PADDLE_ENFORCE(!(*stepnet_)->Outputs().empty(), "stepnet_ op has no outputs"); + PADDLE_ENFORCE(!(*stepnet_)->Outputs().empty(), "net_op has no outputs"); if (seq_len_ > step_scopes->size()) { for (size_t i = step_scopes->size(); i < seq_len_; ++i) { auto& step_scope = scope.NewScope(); // create step net's temp inputs - for (auto& input : net_op->Inputs()) { + for (auto& input : (*stepnet_)->Inputs()) { // the weight are located in parent scope for (auto& var_name : input.second) { if (!step_scope.FindVar(var_name)) { @@ -98,7 +93,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { } } // create stepnet's outputs - for (const auto& output : net_op->Outputs()) { + for (const auto& output : (*stepnet_)->Outputs()) { for (auto& var_name : output.second) { step_scope.NewVar(var_name); } @@ -140,9 +135,8 @@ RecurrentOp::RecurrentOp(const std::string& type, const framework::OperatorBase::VarNameMap& outputs, const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) { - std::unique_ptr arg(new rnn::Argument()); - rnn::InitArgument(kArgName, arg.get(), *this); - alg_.Init(std::move(arg)); + rnn::InitArgument(kArgName, &arg_, *this); + alg_.Init(&arg_, &stepnet_); } class RecurrentAlgorithmProtoAndCheckerMaker @@ -158,7 +152,6 @@ class RecurrentAlgorithmProtoAndCheckerMaker .AsDuplicable(); AddInput(name.boot_memories, "variables to initialize memories.") .AsDuplicable(); - AddInput(name.step_net, "network shared by all steps."); AddOutput(name.outlinks, "the outputs that need to concated for all steps.") .AsDuplicable(); @@ -180,14 +173,12 @@ void RecurrentGradientAlgorithm::Run( auto step_scopes = GetStepScopes(scope); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, false /*infer_shape_mode*/); - Variable* net = scope.FindVar(arg_->step_net); - PADDLE_ENFORCE(net != nullptr, "failed to get step net"); for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { if (static_cast(step_id) != seq_len_ - 1) { rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1, false /*infer_shape_mode*/); } - net->GetMutable()->Run(*step_scopes[step_id], dev_ctx); + (*stepnet_)->Run(*step_scopes[step_id], dev_ctx); } LinkBootMemoryGradients(step_scopes[0], false); rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, @@ -219,14 +210,12 @@ void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const { auto step_scopes = GetStepScopes(scope); rnn::SegmentInputs(step_scopes, arg_->inlinks, seq_len_, true /*infer_shape_mode*/); - Variable* net = scope.FindVar(arg_->step_net); - PADDLE_ENFORCE(net != nullptr, "failed to get step net"); for (int step_id = seq_len_ - 1; step_id >= 0; --step_id) { if (static_cast(step_id) != seq_len_ - 1) { rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1, true /*infer_shape_mode*/); } - net->GetMutable()->InferShape(*step_scopes[step_id]); + (*stepnet_)->InferShape(*step_scopes[step_id]); } rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, true /*infer_shape_mode*/); @@ -238,9 +227,8 @@ RecurrentGradientOp::RecurrentGradientOp( const framework::OperatorBase::VarNameMap& outputs, const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) { - std::unique_ptr arg(new rnn::Argument()); - rnn::InitArgument(kArgName, arg.get(), *this); - alg_.Init(std::move(arg)); + rnn::InitArgument(kArgName, &arg_, *this); + alg_.Init(&arg_, &stepnet_); } } // namespace operators diff --git a/paddle/operators/recurrent_op.h b/paddle/operators/recurrent_op.h index 8f4f2444d8..caca644c96 100644 --- a/paddle/operators/recurrent_op.h +++ b/paddle/operators/recurrent_op.h @@ -15,6 +15,7 @@ #pragma once #include "paddle/framework/operator.h" +#include "paddle/operators/net_op.h" #include "paddle/operators/rnn/recurrent_op_utils.h" namespace paddle { @@ -33,7 +34,11 @@ class RecurrentAlgorithm { void Run(const framework::Scope& scope, const platform::DeviceContext& dev_ctx) const; - void Init(std::unique_ptr arg) { arg_ = std::move(arg); } + void Init(rnn::Argument* arg, std::shared_ptr* stepnet) { + PADDLE_ENFORCE_NOT_NULL(stepnet, "stepnet should be set before."); + arg_ = arg; + stepnet_ = stepnet; + } /** * InferShape must be called before Run. @@ -58,7 +63,8 @@ class RecurrentAlgorithm { void InitMemories(framework::Scope* step_scopes, bool infer_shape_mode) const; private: - std::unique_ptr arg_; + std::shared_ptr* stepnet_; + rnn::Argument* arg_; mutable size_t seq_len_; }; @@ -74,7 +80,11 @@ class RecurrentGradientAlgorithm { * operator. */ public: - void Init(std::unique_ptr arg) { arg_ = std::move(arg); } + void Init(rnn::Argument* arg, std::shared_ptr* stepnet) { + PADDLE_ENFORCE_NOT_NULL(stepnet, "stepnet should be set before."); + arg_ = std::move(arg); + stepnet_ = stepnet; + } void Run(const framework::Scope& scope, const platform::DeviceContext& dev_ctx) const; @@ -95,8 +105,9 @@ class RecurrentGradientAlgorithm { } private: - std::unique_ptr arg_; + rnn::Argument* arg_; mutable size_t seq_len_; + std::shared_ptr* stepnet_; }; class RecurrentOp final : public framework::OperatorBase { @@ -115,10 +126,15 @@ class RecurrentOp final : public framework::OperatorBase { alg_.Run(scope, dev_ctx); } + void set_stepnet(std::shared_ptr net) { stepnet_ = net; } + const NetOp* stepnet() const { return stepnet_.get(); } + static const rnn::ArgumentName kArgName; private: RecurrentAlgorithm alg_; + rnn::Argument arg_; + std::shared_ptr stepnet_; }; class RecurrentGradientOp final : public framework::OperatorBase { @@ -141,8 +157,13 @@ class RecurrentGradientOp final : public framework::OperatorBase { static const rnn::ArgumentName kArgName; + void set_stepnet(const std::shared_ptr& net) { stepnet_ = net; } + const NetOp* stepnet() const { return stepnet_.get(); } + private: RecurrentGradientAlgorithm alg_; + std::shared_ptr stepnet_; + rnn::Argument arg_; }; } // namespace operators diff --git a/paddle/operators/recurrent_op_test.cc b/paddle/operators/recurrent_op_test.cc deleted file mode 100644 index 2f6eff0720..0000000000 --- a/paddle/operators/recurrent_op_test.cc +++ /dev/null @@ -1,252 +0,0 @@ -/* - Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -#include "paddle/operators/recurrent_op.h" - -#include -#include - -#include "paddle/framework/ddim.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/tensor.h" -#include "paddle/operators/net_op.h" - -namespace paddle { -namespace operators { - -using namespace paddle::framework; - -class RecurrentGradientAlgorithmTest : public ::testing::Test { - protected: - virtual void SetUp() override { - CreateGlobalVariables(); - CreateStepScopes(); - CreateStepNet(); - CreateRNNGradientAlgorithm(); - - // segment inputs - SegmentInputs(); - // link forward memories - LinkeMemories(); - } - - virtual void TearDown() override {} - - void CreateGlobalVariables() { - // inputs: x - LOG(INFO) << "create global variable x"; - Variable* x = scope_.NewVar("x"); - DDim dims = - make_ddim({10 /*sent size*/, 20 /*batch size*/, 30 /*input dim*/}); - x->GetMutable()->mutable_data(dims, platform::CPUPlace()); - // inputs: h_boot - LOG(INFO) << "create global variable h_boot"; - Variable* h_boot = scope_.NewVar("h_boot"); - h_boot->GetMutable()->mutable_data( - make_ddim({20 /*batch size*/, 30 /*input dim*/}), platform::CPUPlace()); - // inputs: w - LOG(INFO) << "create global variable w"; - Variable* w = scope_.NewVar("rnn/w"); - w->GetMutable()->mutable_data(make_ddim({30, 30}), - platform::CPUPlace()); - // inputs: h_grad - LOG(INFO) << "create variable h_grad"; - Variable* dh = scope_.NewVar("h_grad"); - dh->GetMutable()->mutable_data(make_ddim({10, 20, 30}), - platform::CPUPlace()); - // inputs: step_scopes - LOG(INFO) << "create variable step_scopes"; - scope_.NewVar("step_scopes"); - // inputs: step_net - LOG(INFO) << "create variable step_net"; - scope_.NewVar("step_net"); - // outputs: w_grad - LOG(INFO) << "create global variable w_grad"; - scope_.NewVar("rnn/w_grad"); - // outputs: x_grad - LOG(INFO) << "create global variable x_grad"; - scope_.NewVar("x_grad"); - // outputs: h_boot_grad - LOG(INFO) << "create global variable h_boot_grad"; - scope_.NewVar("h_boot_grad"); - } - - void CreateStepScopes() { - auto step_scopes = - scope_.FindVar("step_scopes")->GetMutable>(); - for (int i = 0; i < 10; ++i) { - auto& scope = scope_.NewScope(); - auto pre_t = scope.NewVar("rnn/pre_h")->GetMutable(); - pre_t->mutable_data({20, 30}, platform::CPUPlace()); - auto tensor = scope.NewVar("rnn/h")->GetMutable(); - tensor->mutable_data({20, 30}, platform::CPUPlace()); - - // for unit test of ConcatOutputs - auto xg = scope.NewVar("rnn/x_grad")->GetMutable(); - xg->mutable_data({20, 30}, platform::CPUPlace()); - - step_scopes->emplace_back(&scope); - } - - // last time step - auto g = (*step_scopes)[9]->NewVar("rnn/h_pre_grad")->GetMutable(); - g->mutable_data({20, 30}, platform::CPUPlace()); - } - - void CreateRNNGradientAlgorithm() { - std::unique_ptr arg(new rnn::Argument()); - arg->step_net = "step_net"; - arg->step_scopes = "step_scopes"; - rnn::Link inlink; - inlink.external = "h_grad"; - inlink.internal = "rnn/h_grad"; - arg->inlinks = std::vector{inlink}; - - rnn::Link outlink; - outlink.external = "x_grad"; - outlink.internal = "rnn/x_grad"; - arg->outlinks = std::vector{outlink}; - - rnn::MemoryAttr mem_attr; - mem_attr.pre_var = "rnn/h_pre_grad"; - mem_attr.var = "rnn/h_grad"; - mem_attr.boot_var = "h_boot_grad"; - arg->memories = std::vector{mem_attr}; - - rnn_grad_algo_.Init(std::move(arg)); - } - - void CreateStepNet() { - LOG(INFO) << "create variable step_net"; - Variable* var = scope_.NewVar("step_net"); - auto net = var->GetMutable(); - // TODO(qingqing) modify backward op create for RNNOp unit test - // and the unit test will be removed to Python. - // net->AddOp(OpRegistry::CreateOp("mul", {"X", {"rnn/h_pre", "rnn/w", - // "rnn/s_grad"}}, {"Y", {"rnn/h_pre_grad", "rnn/w_grad"}}, {})); - - // net->AddOp(OpRegistry::CreateOp("add_two", {"X", {"rnn/h_grad"}}, - // {"Y", {"rnn/x_grad"}}, {"Out", "rnn/s_grad"}}, {})); - net->CompleteAddOp(); - } - - void SegmentInputs() { - LOG(INFO) << "segment inputs"; - std::vector inlinks = {"x"}; - std::vector inlinks_alias = {"rnn/x"}; - - rnn::Link inlink; - inlink.external = "x"; - inlink.internal = "rnn/x"; - auto step_scopes = - scope_.FindVar("step_scopes")->GetMutable>(); - rnn::SegmentInputs(*step_scopes, std::vector{inlink}, 10, - true /*infer_shape_mode*/); - } - - void LinkeMemories() { - LOG(INFO) << "link memories"; - rnn::MemoryAttr mem_attr; - mem_attr.pre_var = "rnn/h_pre"; - mem_attr.var = "rnn/h"; - mem_attr.boot_var = "boot_h"; - std::vector memories; - memories.push_back(mem_attr); - auto step_scopes = - scope_.FindVar("step_scopes")->GetMutable>(); - for (int i = 1; i < 10; ++i) { - rnn::LinkMemories(*step_scopes, memories, i, -1, - true /*infer_shape_mode*/); - } - } - - Scope scope_; - RecurrentGradientAlgorithm rnn_grad_algo_; -}; - -// TEST_F(RecurrentGradientAlgorithmTest, Run) { -// platform::CPUDeviceContext ctx; -// rnn_grad_algo_.Run(scope_, ctx); -// } - -} // namespace operators -} // namespace paddle - -TEST(RecurrentOp, LinkMemories) { - using namespace paddle::framework; - using namespace paddle::platform; - using namespace paddle::operators; - - // create and init step scopes - size_t len = 10; - std::vector step_scopes; - for (size_t i = 0; i < len; ++i) { - auto scope = new Scope(); - scope->NewVar("pre_h"); - auto tensor = scope->NewVar("h")->GetMutable(); - float* data = tensor->mutable_data({15, 20}, CPUPlace()); - for (size_t j = 0; j < 15 * 20; ++j) { - data[j] = rand() * (1. / (double)RAND_MAX); - } - step_scopes.push_back(scope); - } - - // create MemoryAttr - rnn::MemoryAttr mem_attr; - mem_attr.pre_var = "pre_h"; - mem_attr.var = "h"; - mem_attr.boot_var = "boot_h"; - std::vector memories; - memories.push_back(mem_attr); - - for (size_t i = 1; i < len; ++i) { - rnn::LinkMemories(step_scopes, memories, i, -1, false - /*infer_shape_mode*/); - } - // check - for (size_t i = 0; i < len - 1; ++i) { - const float* a = - step_scopes[i]->FindVar("h")->GetMutable()->data(); - const float* b = step_scopes[i + 1] - ->FindVar("pre_h") - ->GetMutable() - ->data(); - for (size_t j = 0; j < 15 * 20; ++j) { - ASSERT_FLOAT_EQ(a[j], b[j]); - } - } - - for (int i = len - 2; i >= 0; --i) { - rnn::LinkMemories(step_scopes, memories, i, 1, false - /*infer_shape_mode*/); - } - // check - for (int i = len - 2; i >= 0; --i) { - const float* a = - step_scopes[i]->FindVar("pre_h")->GetMutable()->data(); - const float* b = - step_scopes[i + 1]->FindVar("h")->GetMutable()->data(); - for (size_t j = 0; j < 15 * 20; ++j) { - ASSERT_FLOAT_EQ(a[j], b[j]); - } - } - - for (auto s : step_scopes) { - delete s; - } -} - -USE_OP(add_two); -USE_OP(mul); -USE_OP_ITSELF(recurrent_op); diff --git a/paddle/operators/rnn/recurrent_op_utils.cc b/paddle/operators/rnn/recurrent_op_utils.cc index 7e4770630e..a9b65c30f2 100644 --- a/paddle/operators/rnn/recurrent_op_utils.cc +++ b/paddle/operators/rnn/recurrent_op_utils.cc @@ -106,7 +106,6 @@ void LinkMemories(const std::vector& scopes, void InitArgument(const ArgumentName& name, Argument* arg, const framework::OperatorBase& op) { - arg->step_net = op.Input(name.step_net); arg->step_scopes = op.Output(name.step_scopes); auto inlinks = op.Inputs(name.inlinks); diff --git a/python/paddle/v2/framework/op.py b/python/paddle/v2/framework/op.py index 904de08da4..6ac656321e 100644 --- a/python/paddle/v2/framework/op.py +++ b/python/paddle/v2/framework/op.py @@ -23,7 +23,7 @@ class OpDescCreationMethod(object): """ A Functor object to convert user input(use key word args) to OpDesc based on OpProto. - + :param op_proto: The OpProto object. :type op_proto: op_proto_pb2.OpProto """ @@ -177,4 +177,26 @@ class OperatorFactory(object): return self.get_op_info(type).attrs +class __RecurrentOp__(object): + __proto__ = None + type = 'recurrent_op' + + def __init__(self): + # cache recurrent_op's proto + if self.__proto__ is None: + for op_proto in get_all_op_protos(): + if op_proto.type == self.type: + self.__proto__ = op_proto + + def __call__(self, *args, **kwargs): + if self.type not in args and 'type' not in kwargs: + kwargs['type'] = self.type + # create proto + create_method = OpDescCreationMethod(self.__proto__) + proto = create_method(*args, **kwargs) + # create rnnop + return core.RecurrentOp.create(proto.SerializeToString()) + + Operator = OperatorFactory() # Default global factory +RecurrentOp = __RecurrentOp__() diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py index 0db66cc4e1..3d4a34d8d7 100644 --- a/python/paddle/v2/framework/tests/test_recurrent_op.py +++ b/python/paddle/v2/framework/tests/test_recurrent_op.py @@ -2,7 +2,7 @@ import logging import paddle.v2.framework.core as core import unittest import numpy as np -from paddle.v2.framework.op import Operator +from paddle.v2.framework.op import Operator, RecurrentOp def py_sigmoid(x): @@ -98,11 +98,11 @@ class TestRecurrentOp(unittest.TestCase): def forward(self): self.scope = core.Scope() self.create_global_variables() + self.create_rnn_op() self.create_step_net() - rnn_op = self.create_rnn_op() ctx = core.DeviceContext.create(core.CPUPlace()) - rnn_op.infer_shape(self.scope) - rnn_op.run(self.scope, ctx) + self.rnnop.infer_shape(self.scope) + self.rnnop.run(self.scope, ctx) return np.array(self.scope.find_var("h").get_tensor()) def create_global_variables(self): @@ -128,8 +128,7 @@ class TestRecurrentOp(unittest.TestCase): def create_rnn_op(self): # create RNNOp - rnnop = Operator( - "recurrent_op", + self.rnnop = RecurrentOp( # inputs inlinks=["x"], boot_memories=["h_boot"], @@ -142,14 +141,9 @@ class TestRecurrentOp(unittest.TestCase): outlink_alias=["h@alias"], pre_memories=["h@pre"], memories=["h@alias"]) - return rnnop def create_step_net(self): - var = self.scope.new_var("stepnet") - stepnet = var.get_net() - - # x_fc_op = Operator("fc", X="x@alias", W="W", Y="Wx") - # h_fc_op = Operator("fc", X="h@pre", W="U", Y="Uh") + stepnet = core.Net.create() x_fc_op = Operator("mul", X="x@alias", Y="W", Out="Wx") h_fc_op = Operator("mul", X="h@pre", Y="U", Out="Uh") sum_op = Operator("add_two", X="Wx", Y="Uh", Out="sum") @@ -158,6 +152,7 @@ class TestRecurrentOp(unittest.TestCase): for op in [x_fc_op, h_fc_op, sum_op, sig_op]: stepnet.add_op(op) stepnet.complete_add_op(True) + self.rnnop.set_stepnet(stepnet) def test_forward(self): print 'test recurrent op forward' From d1cda3331646806afd07c18d4ef9f6a6b88ce72b Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 15 Aug 2017 11:03:17 +0800 Subject: [PATCH 46/92] build documentation don't need install Paddle before --- paddle/scripts/docker/build.sh | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 2f0205b770..a382d4368c 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -82,10 +82,6 @@ EOF fi -# To build documentation, we need to run cmake again after installing -# PaddlePaddle. This awkwardness is due to -# https://github.com/PaddlePaddle/Paddle/issues/1854. It also -# describes a solution. if [[ ${WITH_DOC:-OFF} == "ON" ]]; then cat < Date: Tue, 15 Aug 2017 12:16:06 +0800 Subject: [PATCH 47/92] add install infomation in build.sh --- paddle/scripts/docker/build.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index a382d4368c..6c2f5fed40 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -174,3 +174,7 @@ ADD go/cmd/master/master /usr/bin/ # default command shows the paddle version and exit CMD ["paddle", "version"] EOF + +set +xe +printf "If you need to install PaddlePaddle in develop docker image," +printf "please make install or pip install build/python/dist/*.whl.\n" From 95fe318e3ee19004419eb5aff09bca7ddaacad46 Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 15 Aug 2017 14:08:20 +0800 Subject: [PATCH 48/92] init --- Dockerfile | 14 ------ cmake/flags.cmake | 7 --- paddle/platform/CMakeLists.txt | 2 +- paddle/platform/device_context.cc | 79 +++++++++++++++++++++++++------ paddle/platform/device_context.h | 12 +++-- 5 files changed, 74 insertions(+), 40 deletions(-) diff --git a/Dockerfile b/Dockerfile index da00471025..98f61ba586 100644 --- a/Dockerfile +++ b/Dockerfile @@ -71,20 +71,6 @@ RUN pip install -r /root/requirements.txt RUN apt-get install -y libssl-dev libffi-dev RUN pip install certifi urllib3[secure] -# TODO(qijun) The template library Eigen doesn't work well with GCC 5 -# coming with the default Docker image, so we switch to use GCC 4.8 -# by default. And I will check Eigen library later. - -RUN ln -sf gcc-4.8 /usr/bin/gcc && \ - ln -sf gcc-ar-4.8 /usr/bin/gcc-ar && \ - ln -sf gcc-nm-4.8 /usr/bin/gcc-nm && \ - ln -sf gcc-ranlib-4.8 /usr/bin/gcc-ranlib && \ - ln -sf gcc-4.8 /usr/bin/x86_64-linux-gnu-gcc && \ - ln -sf gcc-ar-4.8 /usr/bin/x86_64-linux-gnu-gcc-ar && \ - ln -sf gcc-nm-4.8 /usr/bin/x86_64-linux-gnu-gcc-nm && \ - ln -sf gcc-ranlib-4.8 /usr/bin/x86_64-linux-gnu-gcc-ranlib && \ - ln -sf g++-4.8 /usr/bin/g++ && \ - ln -sf g++-4.8 /usr/bin/x86_64-linux-gnu-g++ # Install woboq_codebrowser to /woboq RUN git clone https://github.com/woboq/woboq_codebrowser /woboq && \ diff --git a/cmake/flags.cmake b/cmake/flags.cmake index b27eb71550..47bb83b00a 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -9,13 +9,6 @@ function(CheckCompilerCXX11Flag) if(${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4.8) message(FATAL_ERROR "Unsupported GCC version. GCC >= 4.8 required.") endif() - if(NOT ANDROID) - # TODO(qijun) gcc 4.9 or later versions raise SEGV due to the optimization problem. - # Use Debug mode instead for now. - if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.9) - set(CMAKE_BUILD_TYPE "Debug" CACHE STRING "" FORCE) - endif() - endif() elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") # cmake >= 3.0 compiler id "AppleClang" on Mac OS X, otherwise "Clang" # Apple Clang is a different compiler than upstream Clang which havs different version numbers. diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index 4154aad15c..c1ad60d160 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -16,5 +16,5 @@ ELSE() set(GPU_CTX_DEPS) ENDIF() -cc_library(device_context SRCS device_context.cc DEPS place eigen3 ${GPU_CTX_DEPS}) +cc_library(device_context SRCS device_context.cc DEPS memory place eigen3 ${GPU_CTX_DEPS}) nv_test(device_context_test SRCS device_context_test.cc DEPS device_context gpu_info) diff --git a/paddle/platform/device_context.cc b/paddle/platform/device_context.cc index a928e09778..dc345bdd57 100644 --- a/paddle/platform/device_context.cc +++ b/paddle/platform/device_context.cc @@ -10,6 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/platform/device_context.h" +#include "paddle/memory/memory.h" namespace paddle { namespace platform { @@ -36,6 +37,59 @@ Place CPUDeviceContext::GetPlace() const { return CPUPlace(); } #ifndef PADDLE_ONLY_CPU +class EigenCudaStreamDevice : public Eigen::StreamInterface { + public: + EigenCudaStreamDevice() : scratch_(nullptr), semaphore_(nullptr) { + Eigen::initializeDeviceProp(); + } + ~EigenCudaStreamDevice() override {} + + void Reinitialize(const cudaStream_t* cuda_stream, GPUPlace place) { + stream_ = cuda_stream; + place_ = place; + device_prop_ = &Eigen::m_deviceProperties[place.device]; + } + + const cudaStream_t& stream() const override { return *stream_; } + + const cudaDeviceProp& deviceProperties() const override { + return *device_prop_; + } + + void* allocate(size_t num_bytes) const override { + paddle::memory::Alloc(place_, num_bytes); + } + + void deallocate(void* buffer) const override { + paddle::memory::Free(place_, buffer); + } + + void* scratchpad() const override { + if (scratch_ == NULL) { + scratch_ = allocate(Eigen::kCudaScratchSize + sizeof(unsigned int)); + } + return scratch_; + } + + unsigned int* semaphore() const override { + if (semaphore_ == NULL) { + char* scratch = + static_cast(scratchpad()) + Eigen::kCudaScratchSize; + semaphore_ = reinterpret_cast(scratch); + PADDLE_ENFORCE( + cudaMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_)); + } + return semaphore_; + } + + private: + GPUPlace place_; + const cudaStream_t* stream_; // not owned; + const cudaDeviceProp* device_prop_; // not owned; + mutable char* scratch_; + mutable unsigned int* semaphore_; +}; + template <> Eigen::GpuDevice* DeviceContext::get_eigen_device() const { return reinterpret_cast(this)->eigen_device(); @@ -43,19 +97,9 @@ Eigen::GpuDevice* DeviceContext::get_eigen_device() const { CUDADeviceContext::CUDADeviceContext(GPUPlace place) : place_(place) { SetDeviceId(place_.device); - // TODO(qijun) Pass a created cuda stream to Eigen::CudaStreamDevice directly - // here will cause segment fault. We must implement a class derived from - // Eigen::StreamInterface, and reinitialize it with a cuda stream and a gpu id - // later. Please refer to the implementation of class EigenCudaStreamDevice - // in TensorFlow. - // - // We find that CUDA 7 introduces a new option, the per-thread default stream, - // that has two effects. Please refer to https://devblogs.nvidia.com/ - // parallelforall/gpu-pro-tip-cuda-7-streams-simplify-concurrency/ - // - // So, we decide to use default stream and add –default-stream per-thread nvcc - // flag. Than, two threads with two CUDADeviceContexts will run parallelly. - eigen_stream_.reset(new Eigen::CudaStreamDevice()); + PADDLE_ENFORCE(cudaStreamCreate(&stream_)); + eigen_stream_.reset(new EigenCudaStreamDevice()); + eigen_stream_->Reinitialize(&stream_, place); eigen_device_.reset(new Eigen::GpuDevice(eigen_stream_.get())); } @@ -75,12 +119,13 @@ CUDADeviceContext::~CUDADeviceContext() { } eigen_stream_.reset(); eigen_device_.reset(); + PADDLE_ENFORCE(cudaStreamDestroy(stream_)); } Place CUDADeviceContext::GetPlace() const { return place_; } void CUDADeviceContext::Wait() const { - PADDLE_ENFORCE(cudaStreamSynchronize(0)); + PADDLE_ENFORCE(cudaStreamSynchronize(stream_)); } Eigen::GpuDevice* CUDADeviceContext::eigen_device() const { @@ -91,6 +136,7 @@ cublasHandle_t CUDADeviceContext::cublas_handle() { if (!cublas_handle_) { SetDeviceId(place_.device); PADDLE_ENFORCE(dynload::cublasCreate(&cublas_handle_)); + PADDLE_ENFORCE(dynload::cublasSetStream(cublas_handle_, stream_)); } return cublas_handle_; } @@ -99,10 +145,13 @@ cudnnHandle_t CUDADeviceContext::cudnn_handle() { if (!cudnn_handle_) { SetDeviceId(place_.device); PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_)); + PADDLE_ENFORCE(dynload::cudnnSetStream(cudnnHandle_t, stream_)); } return cudnn_handle_; } +cudaStream_t CUDADeviceContext::stream() { return stream_; } + curandGenerator_t CUDADeviceContext::curand_generator() { if (!curand_generator_) { SetDeviceId(place_.device); @@ -110,6 +159,8 @@ curandGenerator_t CUDADeviceContext::curand_generator() { CURAND_RNG_PSEUDO_DEFAULT)); PADDLE_ENFORCE( dynload::curandSetPseudoRandomGeneratorSeed(curand_generator_, seed_)); + + PADDLE_ENFORCE(dynload::curandSetStream(curandGenerator_t, stream_)); } return curand_generator_; } diff --git a/paddle/platform/device_context.h b/paddle/platform/device_context.h index 08b5b2cff9..b68e177c0a 100644 --- a/paddle/platform/device_context.h +++ b/paddle/platform/device_context.h @@ -76,6 +76,9 @@ class CUDADeviceContext : public DeviceContext { /*! \brief Return curand handle in the device context. */ curandGenerator_t curand_generator(); + + /*! \brief Return cuda stream in the device context. */ + cudaStream_t stream(); // clang-format on private: @@ -83,15 +86,16 @@ class CUDADeviceContext : public DeviceContext { private: std::unique_ptr eigen_device_; - std::unique_ptr eigen_stream_; + std::unique_ptr eigen_stream_; private: uint64_t seed_; // clang-format off - cudnnHandle_t cudnn_handle_ = nullptr; - cublasHandle_t cublas_handle_ = nullptr; - curandGenerator_t curand_generator_ = nullptr; + cudaStream_t stream_{nullptr} + cudnnHandle_t cudnn_handle_{nullptr}; + cublasHandle_t cublas_handle_{nullptr}; + curandGenerator_t curand_generator_{nullptr}; // clang-format on }; From 962cb25c9d4b29ae4e61bfa891faaead5e140633 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Tue, 15 Aug 2017 14:10:15 +0800 Subject: [PATCH 49/92] fix crash when disable WITH_SWIG_PY --- python/CMakeLists.txt | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index d2f064bea0..7bd6d59b00 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -50,8 +50,11 @@ add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp COMMAND ${CMAKE_COMMAND} -E copy_directory ${PADDLE_PYTHON_BUILD_DIR}/lib* ${PADDLE_PYTHON_BUILD_DIR}/lib-python DEPENDS gen_proto_py copy_paddle_pybind framework_py_proto ${PY_FILES} ${external_project_dependencies} ${COPY_PADDLE_MASTER}) -add_custom_target(paddle_python ALL DEPENDS - ${PADDLE_PYTHON_BUILD_DIR}/.timestamp paddle_pserver_main paddle_trainer paddle_merge_model python_api_wheel ${MKL_DEPENDS}) +set(paddle_python_deps ${PADDLE_PYTHON_BUILD_DIR}/.timestamp paddle_pserver_main paddle_trainer paddle_merge_model ${MKL_DEPENDS}) +if(WITH_SWIG_PY) + list(APPEND paddle_python_deps python_api_wheel) +endif() +add_custom_target(paddle_python ALL DEPENDS ${paddle_python_deps}) set(PADDLE_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/) From f168843e47df6cee8a81a30408ba4c2d092893fa Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 15 Aug 2017 06:59:05 +0000 Subject: [PATCH 50/92] fix gpu build error --- paddle/memory/CMakeLists.txt | 2 +- paddle/platform/CMakeLists.txt | 5 ++++- paddle/platform/device_context.cc | 8 ++++---- paddle/platform/device_context.h | 3 ++- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/paddle/memory/CMakeLists.txt b/paddle/memory/CMakeLists.txt index 8035d93bfe..9cc4233e43 100644 --- a/paddle/memory/CMakeLists.txt +++ b/paddle/memory/CMakeLists.txt @@ -1,7 +1,7 @@ add_subdirectory(detail) cc_library(memory SRCS memory.cc) -cc_library(memcpy SRCS memcpy.cc DEPS device_context) +cc_library(memcpy SRCS memcpy.cc) cc_library(paddle_memory DEPS diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index c1ad60d160..acfc063973 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -16,5 +16,8 @@ ELSE() set(GPU_CTX_DEPS) ENDIF() -cc_library(device_context SRCS device_context.cc DEPS memory place eigen3 ${GPU_CTX_DEPS}) +# memcpy deoends on device_context, here add deps individually for +# avoiding cycle dependencies +cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator + system_allocator memory_block meta_data meta_cache place eigen3 ${GPU_CTX_DEPS}) nv_test(device_context_test SRCS device_context_test.cc DEPS device_context gpu_info) diff --git a/paddle/platform/device_context.cc b/paddle/platform/device_context.cc index dc345bdd57..f92c15ae45 100644 --- a/paddle/platform/device_context.cc +++ b/paddle/platform/device_context.cc @@ -57,7 +57,7 @@ class EigenCudaStreamDevice : public Eigen::StreamInterface { } void* allocate(size_t num_bytes) const override { - paddle::memory::Alloc(place_, num_bytes); + return paddle::memory::Alloc(place_, num_bytes); } void deallocate(void* buffer) const override { @@ -86,7 +86,7 @@ class EigenCudaStreamDevice : public Eigen::StreamInterface { GPUPlace place_; const cudaStream_t* stream_; // not owned; const cudaDeviceProp* device_prop_; // not owned; - mutable char* scratch_; + mutable void* scratch_; mutable unsigned int* semaphore_; }; @@ -145,7 +145,7 @@ cudnnHandle_t CUDADeviceContext::cudnn_handle() { if (!cudnn_handle_) { SetDeviceId(place_.device); PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_)); - PADDLE_ENFORCE(dynload::cudnnSetStream(cudnnHandle_t, stream_)); + PADDLE_ENFORCE(dynload::cudnnSetStream(cudnn_handle_, stream_)); } return cudnn_handle_; } @@ -160,7 +160,7 @@ curandGenerator_t CUDADeviceContext::curand_generator() { PADDLE_ENFORCE( dynload::curandSetPseudoRandomGeneratorSeed(curand_generator_, seed_)); - PADDLE_ENFORCE(dynload::curandSetStream(curandGenerator_t, stream_)); + PADDLE_ENFORCE(dynload::curandSetStream(curand_generator_, stream_)); } return curand_generator_; } diff --git a/paddle/platform/device_context.h b/paddle/platform/device_context.h index b68e177c0a..c5042ae33e 100644 --- a/paddle/platform/device_context.h +++ b/paddle/platform/device_context.h @@ -52,6 +52,7 @@ class CPUDeviceContext : public DeviceContext { }; #ifndef PADDLE_ONLY_CPU +class EigenCudaStreamDevice; class CUDADeviceContext : public DeviceContext { public: @@ -92,7 +93,7 @@ class CUDADeviceContext : public DeviceContext { uint64_t seed_; // clang-format off - cudaStream_t stream_{nullptr} + cudaStream_t stream_{nullptr}; cudnnHandle_t cudnn_handle_{nullptr}; cublasHandle_t cublas_handle_{nullptr}; curandGenerator_t curand_generator_{nullptr}; From 2403045cbd57eb837d5ab82e2acc66767c1d3224 Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 15 Aug 2017 07:03:54 +0000 Subject: [PATCH 51/92] refine device_context_test --- paddle/platform/device_context_test.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/platform/device_context_test.cc b/paddle/platform/device_context_test.cc index 65345c433c..8b764bdcd9 100644 --- a/paddle/platform/device_context_test.cc +++ b/paddle/platform/device_context_test.cc @@ -45,6 +45,7 @@ TEST(Device, CUDADeviceContext) { ASSERT_NE(nullptr, cublas_handle); curandGenerator_t curand_handle = device_context->curand_generator(); ASSERT_NE(nullptr, curand_handle); + ASSERT_NE(nullptr, device_context->stream()); delete device_context; } } From d08550fdd22453227e9a3f3f5e061c2849290304 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=AD=A6=E6=AF=85?= Date: Tue, 15 Aug 2017 15:53:24 +0800 Subject: [PATCH 52/92] Large model train doc (#3445) * large model train note * update * update doc --- .../cluster_train/large_model_dist_train.md | 101 ++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 doc/design/cluster_train/large_model_dist_train.md diff --git a/doc/design/cluster_train/large_model_dist_train.md b/doc/design/cluster_train/large_model_dist_train.md new file mode 100644 index 0000000000..0c4b5bc24c --- /dev/null +++ b/doc/design/cluster_train/large_model_dist_train.md @@ -0,0 +1,101 @@ +# Alalysis of large model distributed training in Paddle + +***NOTE: This is only some note for how we implemeted this scheme in V1, not a new design.*** + +## What is it + +We often encounter cases that the embedding layer parameters(sparse) are so large that we can not store it in the trainer's memory when training. So we need to put them to several servers, and fetch them row by row instead of fetch all of the parameters. + +## How to use + +Specify command-line argument like `--loadsave_parameters_in_pserver=true --ports_num_for_sparse=1 --use_old_updater=1` when starting the paddle trainer. And also add something like `--ports_num_for_sparse=1 --pserver_num_threads=5` when starting pserver processes. + +Accrodingly, configure your embedding layers like: + +```python +SPARSE_REMOTE=True + +w1 = data_layer(name="w1", size=dict_size) +emb1 = embedding_layer(input=w1, size=32, param_attr=ParameterAttribute(sparse_update=SPARSE_REMOTE)) +w2 = data_layer(name="w2", size=dict_size) +emb2 = embedding_layer(input=w2, size=32, param_attr=ParameterAttribute(sparse_update=SPARSE_REMOTE)) +... +``` + +## Implementation details + +```c++ +enum MatType { + MAT_NORMAL, + MAT_NORMAL_SHARED, + MAT_VALUE_SHARED, + MAT_SPARSE_ROW_IDS, + MAT_SPARSE_ROW_AUTO_GROW, + MAT_CACHE_ROW, + MAT_SPARSE_ROW, + MAT_SPARSE_ROW_PREFETCH, + MAT_SPARSE_ROW_PREFETCH_FULL_SIZE, +}; +``` + +`MAT_SPARSE_ROW_PREFETCH` is what we use when configured to fetch only row of matrix when training. + +In `trainer_internal.cpp:L93 trainOneBatch`: + +```c++ + if (config_->getOptConfig().use_sparse_remote_updater()) { + REGISTER_TIMER("prefetch"); + gradientMachine_->prefetch(inArgs); + parameterUpdater_->getParametersRemote(); + } +``` + +When doing actual network forward and backward, at the beginning of each batch, the trainer will try to download one row of data from pserver. + +In `trainer/RemoteParameterUpdater.cpp`: `parameterUpdater_->getParametersRemote();`: + +```c++ +if (fullSize) { + ... +} else { +getParams = [&] { + parameterClient_->getParameterSparse( + /* recvParameterType= */ PARAMETER_VALUE, sendBackParameterType); +}; +applyL1 = [](Parameter& para, real decayRate) { + para.getMat(PARAMETER_VALUE)->applyL1(/*lr=*/1.0f, decayRate); +}; +} +``` + +Calling `parameterClient_->getParameterSparse` will do remote call to pserver's `getParameterSparse`: + +```c++ +void ParameterServer2::getParameterSparse(const SendParameterRequest& request, + std::vector& inputBuffers, + SendParameterResponse* response, + std::vector* outputBuffers) { + (void)inputBuffers; + auto& buffer = *readWriteBuffer_; + size_t numReals = 0; + for (const auto& block : request.blocks()) { + numReals += getParameterConfig(block).dims(1); + } + buffer.resize(numReals); + + VLOG(3) << "pserver: getParameterSparse, numReals=" << numReals; + + ReadLockGuard guard(parameterMutex_); + size_t offset = 0; + for (const auto& block : request.blocks()) { + size_t width = getParameterConfig(block).dims(1); + Buffer buf = {buffer.data() + offset, width}; + int type = request.send_back_parameter_type(); + sendBackParameterSparse(block, type, response, &buf, width, outputBuffers); + offset += width; + } +} +``` + +`getParameterConfig(block).dims(1)` returns the width of the current "parameter block"(a shard of parameter object), +then `getParameterSparse` remote call returns only one row of data to the client. From 881c5d00d0e44cd957eeea2532c556a0eef8117a Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 15 Aug 2017 16:08:11 +0800 Subject: [PATCH 53/92] remove device_context deps in memcpy --- paddle/memory/memcpy.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/paddle/memory/memcpy.cc b/paddle/memory/memcpy.cc index aaab1142ca..a19a3e3675 100644 --- a/paddle/memory/memcpy.cc +++ b/paddle/memory/memcpy.cc @@ -16,8 +16,6 @@ limitations under the License. */ #include // for memcpy -#include "paddle/platform/device_context.h" - namespace paddle { namespace memory { From 9eaef75397926819294edda04dbed34aa069f5f4 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Tue, 15 Aug 2017 17:44:08 +0800 Subject: [PATCH 54/92] RNN backward create (#3490) * insert rnn's backward into Backward() * add device_context into backward_test --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/backward.cc | 17 +++++++++++++++++ paddle/operators/recurrent_op.h | 4 ++-- 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 0398526024..68304c9fc8 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -38,7 +38,7 @@ add_custom_command(TARGET framework_py_proto POST_BUILD WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) cc_library(backward SRCS backward.cc DEPS net_op) -cc_test(backward_test SRCS backward_test.cc DEPS backward) +cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context) if(WITH_PYTHON) cc_library(paddle_pybind SHARED diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 83b7e4cdac..c226e4e3d2 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -17,6 +17,7 @@ #include #include "paddle/framework/op_registry.h" #include "paddle/operators/net_op.h" +#include "paddle/operators/recurrent_op.h" namespace paddle { namespace framework { @@ -178,6 +179,22 @@ std::shared_ptr BackwardRecursive( return false; }); + // process recurrent gradient op as a special operator. + if (forwardOp.Type() == "recurrent_op") { + // NOTE clean up cycle call somewhere (RNN's stepnet constains itself), or + // this will result in infinite loop. + const auto& rnnop = + *static_cast(&forwardOp); + auto rnn_grad_op = + static_cast(grad_op.get()); + const auto& stepnet_op = + *static_cast(&rnnop.stepnet()); + // create stepnet's gradient op + auto grad_stepnet = BackwardRecursive(stepnet_op, no_grad_names, uniq_id); + rnn_grad_op->set_stepnet( + std::static_pointer_cast(grad_stepnet)); + } + if (net->ops_.empty()) { // Current no aux op is added to network return grad_op; } diff --git a/paddle/operators/recurrent_op.h b/paddle/operators/recurrent_op.h index caca644c96..171a0bd2ae 100644 --- a/paddle/operators/recurrent_op.h +++ b/paddle/operators/recurrent_op.h @@ -127,7 +127,7 @@ class RecurrentOp final : public framework::OperatorBase { } void set_stepnet(std::shared_ptr net) { stepnet_ = net; } - const NetOp* stepnet() const { return stepnet_.get(); } + const NetOp& stepnet() const { return *stepnet_; } static const rnn::ArgumentName kArgName; @@ -158,7 +158,7 @@ class RecurrentGradientOp final : public framework::OperatorBase { static const rnn::ArgumentName kArgName; void set_stepnet(const std::shared_ptr& net) { stepnet_ = net; } - const NetOp* stepnet() const { return stepnet_.get(); } + const NetOp& stepnet() const { return *stepnet_; } private: RecurrentGradientAlgorithm alg_; From 79a336b78f707ba04076e3130e7b7c0d87a484a7 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 15 Aug 2017 18:39:28 +0800 Subject: [PATCH 55/92] add shared warpctc lib in whl --- python/setup.py.in | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/setup.py.in b/python/setup.py.in index 287442e013..db15f6d950 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -31,7 +31,9 @@ paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/scripts/paddle_usage', '${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main'] paddle_rt_lib_dir = 'local/lib' -paddle_rt_libs = [] if '${MKL_SHARED_LIBS}'== '' else '${MKL_SHARED_LIBS}'.split(';') +paddle_rt_libs = ['${WARPCTC_LIBRARIES}'] +if '${MKL_SHARED_LIBS}'!= '': + paddle_rt_libs += '${MKL_SHARED_LIBS}'.split(';') setup(name='paddlepaddle', version='${PADDLE_VERSION}', From 7bc60b02737ba3695997086ac96d6915b1acb3f9 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 15 Aug 2017 14:21:35 -0700 Subject: [PATCH 56/92] Move OpRegistry functions to .cc file and move OpMaker to Op module --- paddle/framework/op_registry.cc | 46 ++++++++++- paddle/framework/op_registry.h | 138 ++------------------------------ paddle/framework/operator.cc | 38 +++++++++ paddle/framework/operator.h | 68 ++++++++++++++++ 4 files changed, 156 insertions(+), 134 deletions(-) diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index 1caa02a2a1..f801f970f2 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -17,5 +17,49 @@ limitations under the License. */ #include namespace paddle { -namespace framework {} // namespace framework +namespace framework { + +std::shared_ptr OpRegistry::CreateOp(const std::string& type, + const VarNameMap& inputs, + const VarNameMap& outputs, + AttributeMap attrs) { + auto it = op_info_map().find(type); + PADDLE_ENFORCE(it != op_info_map().end(), + "Operator '%s' has not been registered.", type); + it->second.checker_->Check(attrs); + auto op = it->second.creator_(type, inputs, outputs, attrs); + return std::shared_ptr(op); +} + +std::shared_ptr OpRegistry::CreateOp(const OpDesc& op_desc) { + VarNameMap inputs = ConvertOpDescVarsToVarNameMap(op_desc.inputs()); + VarNameMap outputs = ConvertOpDescVarsToVarNameMap(op_desc.outputs()); + AttributeMap attrs; + for (auto& attr : op_desc.attrs()) { + attrs[attr.name()] = GetAttrValue(attr); + } + + return CreateOp(op_desc.type(), inputs, outputs, attrs); +} + +OperatorBase::VarNameMap OpRegistry::ConvertOpDescVarsToVarNameMap( + const google::protobuf::RepeatedPtrField& op_desc_vars) { + VarNameMap ret_val; + for (auto& var : op_desc_vars) { + auto& var_names = ret_val[var.parameter()]; + auto& var_names_in_proto = var.arguments(); + var_names.reserve(static_cast(var_names_in_proto.size())); + std::copy(var_names_in_proto.begin(), var_names_in_proto.end(), + std::back_inserter(var_names)); + } + return ret_val; +} + +std::shared_ptr OpRegistry::CreateGradOp(const OperatorBase& op) { + PADDLE_ENFORCE(!op.IsNetOp(), "Use framework::Backward to get backward ops"); + std::shared_ptr grad_op(BuildGradOp(&op)); + return grad_op; +} + +} // namespace framework } // namespace paddle diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 120f4ede6b..cc2234d50e 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -29,103 +29,6 @@ limitations under the License. */ namespace paddle { namespace framework { -// this class not only make proto but also init attribute checkers. -class OpProtoAndCheckerMaker { - public: - OpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : proto_(proto), op_checker_(op_checker) {} - - ~OpProtoAndCheckerMaker() { - PADDLE_ENFORCE(validated_, "should call Validate after build"); - } - - void Validate() { - validated_ = true; - CheckNoDuplicatedInOutAttrs(); - } - - protected: - struct VariableBuilder { - OpProto::Var* var_; - - VariableBuilder& AsDuplicable() { - var_->set_duplicable(true); - return *this; - } - - VariableBuilder& AsIntermediate() { - var_->set_intermediate(true); - return *this; - } - - // TODO(FengJiayi, yuyang18): `AsNoGradient` is a very bad name, because it - // means that input/output is not needed when calculate gradient. It does - // not mean no gradient when backward. It should be changed soon. - VariableBuilder& AsNoGradient() { - var_->set_no_gradient(true); - return *this; - } - }; - - VariableBuilder AddInput(const std::string& name, - const std::string& comment) { - auto* input = proto_->add_inputs(); - input->set_name(name); - input->set_comment(comment); - return VariableBuilder{input}; - } - - VariableBuilder AddOutput(const std::string& name, - const std::string& comment) { - auto* output = proto_->add_outputs(); - output->set_name(name); - output->set_comment(comment); - return VariableBuilder{output}; - } - - template - TypedAttrChecker& AddAttr(const std::string& name, - const std::string& comment, - bool generated = false) { - auto* attr = proto_->add_attrs(); - attr->set_name(name); - attr->set_comment(comment); - attr->set_generated(generated); - attr->set_type(AttrTypeID()); - return op_checker_->AddAttrChecker(name); - } - - void AddComment(const std::string& comment) { proto_->set_comment(comment); } - - private: - void CheckNoDuplicatedInOutAttrs() { - std::unordered_set names; - auto checker = [&](const std::string& name) { - PADDLE_ENFORCE(!names.count(name), "[%s] is duplicated", name); - names.insert(name); - }; - for (auto& attr : proto_->attrs()) { - checker(attr.name()); - } - for (auto& input : proto_->inputs()) { - checker(input.name()); - } - for (auto& output : proto_->outputs()) { - checker(output.name()); - } - } - - OpProto* proto_; - OpAttrChecker* op_checker_; - bool validated_{false}; -}; - -class NOPMaker : public OpProtoAndCheckerMaker { - public: - NOPMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) {} -}; - class OpRegistry { using VarNameMap = OperatorBase::VarNameMap; using OpCreator = std::function CreateOp(const std::string& type, const VarNameMap& inputs, const VarNameMap& outputs, - AttributeMap attrs) { - auto it = op_info_map().find(type); - PADDLE_ENFORCE(it != op_info_map().end(), - "Operator '%s' has not been registered.", type); - it->second.checker_->Check(attrs); - auto op = it->second.creator_(type, inputs, outputs, attrs); - return std::shared_ptr(op); - } - - static VarNameMap ConvertOpDescVarsToVarNameMap( - const google::protobuf::RepeatedPtrField& op_desc_vars) { - VarNameMap ret_val; - for (auto& var : op_desc_vars) { - auto& var_names = ret_val[var.parameter()]; - auto& var_names_in_proto = var.arguments(); - var_names.reserve(static_cast(var_names_in_proto.size())); - std::copy(var_names_in_proto.begin(), var_names_in_proto.end(), - std::back_inserter(var_names)); - } - return ret_val; - } + AttributeMap attrs); - static std::shared_ptr CreateOp(const OpDesc& op_desc) { - VarNameMap inputs = ConvertOpDescVarsToVarNameMap(op_desc.inputs()); - VarNameMap outputs = ConvertOpDescVarsToVarNameMap(op_desc.outputs()); - AttributeMap attrs; - for (auto& attr : op_desc.attrs()) { - attrs[attr.name()] = GetAttrValue(attr); - } + static std::shared_ptr CreateOp(const OpDesc& op_desc); - return CreateOp(op_desc.type(), inputs, outputs, attrs); - } + static VarNameMap ConvertOpDescVarsToVarNameMap( + const google::protobuf::RepeatedPtrField& op_desc_vars); - static std::shared_ptr CreateGradOp(const OperatorBase& op) { - PADDLE_ENFORCE(!op.IsNetOp(), - "Use framework::Backward to get backward ops"); - std::shared_ptr grad_op(BuildGradOp(&op)); - return grad_op; - } + static std::shared_ptr CreateGradOp(const OperatorBase& op); static std::unordered_map& op_info_map() { static std::unordered_map op_info_map_; diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 0daf12e7f5..eadd8f3316 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -164,5 +164,43 @@ std::vector OperatorBase::OutputVars(bool has_intermediate) const { return ret_val; } +void OpProtoAndCheckerMaker::Validate() { + validated_ = true; + CheckNoDuplicatedInOutAttrs(); +} + +OpProtoAndCheckerMaker::VariableBuilder OpProtoAndCheckerMaker::AddInput( + const std::string& name, const std::string& comment) { + auto* input = proto_->add_inputs(); + input->set_name(name); + input->set_comment(comment); + return OpProtoAndCheckerMaker::VariableBuilder{input}; +} + +OpProtoAndCheckerMaker::VariableBuilder OpProtoAndCheckerMaker::AddOutput( + const std::string& name, const std::string& comment) { + auto* output = proto_->add_outputs(); + output->set_name(name); + output->set_comment(comment); + return OpProtoAndCheckerMaker::VariableBuilder{output}; +} + +void OpProtoAndCheckerMaker::CheckNoDuplicatedInOutAttrs() { + std::unordered_set names; + auto checker = [&](const std::string& name) { + PADDLE_ENFORCE(!names.count(name), "[%s] is duplicated", name); + names.insert(name); + }; + for (auto& attr : proto_->attrs()) { + checker(attr.name()); + } + for (auto& input : proto_->inputs()) { + checker(input.name()); + } + for (auto& output : proto_->outputs()) { + checker(output.name()); + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 60d4f06c7e..2c8620a7ce 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -138,6 +138,74 @@ class NOP : public OperatorBase { const platform::DeviceContext& dev_ctx) const override {} }; +// this class not only make proto but also init attribute checkers. +class OpProtoAndCheckerMaker { + public: + OpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) + : proto_(proto), op_checker_(op_checker) {} + + ~OpProtoAndCheckerMaker() { + PADDLE_ENFORCE(validated_, "should call Validate after build"); + } + + void Validate(); + + protected: + struct VariableBuilder { + OpProto::Var* var_; + + VariableBuilder& AsDuplicable() { + var_->set_duplicable(true); + return *this; + } + + VariableBuilder& AsIntermediate() { + var_->set_intermediate(true); + return *this; + } + + // TODO(FengJiayi, yuyang18): `AsNoGradient` is a very bad name, because it + // means that input/output is not needed when calculate gradient. It does + // not mean no gradient when backward. It should be changed soon. + VariableBuilder& AsNoGradient() { + var_->set_no_gradient(true); + return *this; + } + }; + + VariableBuilder AddInput(const std::string& name, const std::string& comment); + + VariableBuilder AddOutput(const std::string& name, + const std::string& comment); + + template + TypedAttrChecker& AddAttr(const std::string& name, + const std::string& comment, + bool generated = false) { + auto* attr = proto_->add_attrs(); + attr->set_name(name); + attr->set_comment(comment); + attr->set_generated(generated); + attr->set_type(AttrTypeID()); + return op_checker_->AddAttrChecker(name); + } + + void AddComment(const std::string& comment) { proto_->set_comment(comment); } + + private: + void CheckNoDuplicatedInOutAttrs(); + + OpProto* proto_; + OpAttrChecker* op_checker_; + bool validated_{false}; +}; + +class NOPMaker : public OpProtoAndCheckerMaker { + public: + NOPMaker(framework::OpProto* proto, framework::OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) {} +}; + class InferShapeContext { public: InferShapeContext(const OperatorBase& op, const Scope& scope) From c307ee303b982c97ee66f91981f81c606c62ec63 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Wed, 16 Aug 2017 11:31:21 +0800 Subject: [PATCH 57/92] clang format with version check (#3513) * add clang-format with version check 3.8 * improve doc --- .clang_format.hook | 15 +++++++++++++++ .pre-commit-config.yaml | 4 ++-- 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100755 .clang_format.hook diff --git a/.clang_format.hook b/.clang_format.hook new file mode 100755 index 0000000000..1d92821686 --- /dev/null +++ b/.clang_format.hook @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +readonly VERSION="3.8" + +version=$(clang-format -version) + +if ! [[ $version == *"$VERSION"* ]]; then + echo "clang-format version check failed." + echo "a version contains '$VERSION' is needed, but get '$version'" + echo "you can install the right version, and make an soft-link to '\$PATH' env" + exit -1 +fi + +clang-format $@ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bb8c88787d..a772125df6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -19,10 +19,10 @@ - id: end-of-file-fixer - repo: local hooks: - - id: clang-format + - id: clang-format-with-version-check name: clang-format description: Format files with ClangFormat. - entry: clang-format -i + entry: ./.clang_format.hook -i language: system files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto)$ - repo: https://github.com/PaddlePaddle/pre-commit-golang From 13c20ad39e23f0d377bab05c7fea0621d46abd07 Mon Sep 17 00:00:00 2001 From: qijun Date: Wed, 16 Aug 2017 11:53:46 +0800 Subject: [PATCH 58/92] remove --default-stream per-thread nvcc flag --- cmake/flags.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 47bb83b00a..ff246b2eb4 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -153,7 +153,7 @@ set(CUDA_PROPAGATE_HOST_FLAGS OFF) # Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc. # So, don't set these flags here. -LIST(APPEND CUDA_NVCC_FLAGS -std=c++11 --default-stream per-thread) +LIST(APPEND CUDA_NVCC_FLAGS -std=c++11) LIST(APPEND CUDA_NVCC_FLAGS --use_fast_math) if(CMAKE_BUILD_TYPE STREQUAL "Debug") From 0d2ab5e993c9dd16ada677a8ea9de563553a7428 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Wed, 16 Aug 2017 11:50:11 +0800 Subject: [PATCH 59/92] use param header to save mkldnn format info --- doc/design/mkldnn/README.MD | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/design/mkldnn/README.MD b/doc/design/mkldnn/README.MD index e956994431..2929514b08 100644 --- a/doc/design/mkldnn/README.MD +++ b/doc/design/mkldnn/README.MD @@ -101,6 +101,7 @@ if use_mkldnn 5. 在**Argument**里添加两个`MkldnnMatrixPtr`,取名为`mkldnnValue`和`mkldnnGrad`,用于存放`MkldnnLayer`会用到的memory buffer。 并且添加函数cvt(会修改为一个更加合适的函数名),用于处理"CPU device"和"MKL-DNN device"之间memory的相互转化。 6. 在父类`Layer`中的`getOutput`函数中添加一段逻辑,用于判断`deviceId`,并针对device在MKL-DNN和CPU之间不统一的情况,做一个前期转换。 也就是调用`Argument`的cvt函数把output统一到需要的device上。 7. 在原来的`FLAGS`中添加一个`use_mkldnn`的flag,用于选择是否使用MKL-DNN的相关功能。 +8. 关于MKLDNN参数的保存。由于MKLDNN参数的格式与PaddlePaddle原有的格式存在不一样的情况,所以需要在保存参数时同时保存该格式信息。目前准备扩展`Header`里面的`int32_t version; // = 0, file format version`信息。这个`version`值,不管是在v1还是在v2里面,一直保存的是0。所以可以充分利用这个信息,定义一个枚举处理所有MKLDNN的参数格式,`MKLDNNLayer`就可以知道得到的参数是哪种格式的了。只不过目前v2里面是写的固定值0,而不是保存的`Header`本身,这一点相信v2未来应该会优化的。 ## References From 137a05eb752f33d2529437c08bf6e58a7010c03d Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Wed, 16 Aug 2017 13:53:07 +0800 Subject: [PATCH 60/92] update --- doc/design/mkldnn/README.MD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/design/mkldnn/README.MD b/doc/design/mkldnn/README.MD index 2929514b08..fe8da907d9 100644 --- a/doc/design/mkldnn/README.MD +++ b/doc/design/mkldnn/README.MD @@ -101,7 +101,7 @@ if use_mkldnn 5. 在**Argument**里添加两个`MkldnnMatrixPtr`,取名为`mkldnnValue`和`mkldnnGrad`,用于存放`MkldnnLayer`会用到的memory buffer。 并且添加函数cvt(会修改为一个更加合适的函数名),用于处理"CPU device"和"MKL-DNN device"之间memory的相互转化。 6. 在父类`Layer`中的`getOutput`函数中添加一段逻辑,用于判断`deviceId`,并针对device在MKL-DNN和CPU之间不统一的情况,做一个前期转换。 也就是调用`Argument`的cvt函数把output统一到需要的device上。 7. 在原来的`FLAGS`中添加一个`use_mkldnn`的flag,用于选择是否使用MKL-DNN的相关功能。 -8. 关于MKLDNN参数的保存。由于MKLDNN参数的格式与PaddlePaddle原有的格式存在不一样的情况,所以需要在保存参数时同时保存该格式信息。目前准备扩展`Header`里面的`int32_t version; // = 0, file format version`信息。这个`version`值,不管是在v1还是在v2里面,一直保存的是0。所以可以充分利用这个信息,定义一个枚举处理所有MKLDNN的参数格式,`MKLDNNLayer`就可以知道得到的参数是哪种格式的了。只不过目前v2里面是写的固定值0,而不是保存的`Header`本身,这一点相信v2未来应该会优化的。 +8. 关于MKLDNN参数的保存。由于MKLDNN参数的格式与PaddlePaddle原有的格式存在不一样的情况,所以需要在保存参数时同时保存该格式信息。目前准备扩展[Header](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/parameter/Parameter.h#L247)里面的`int32_t version`。这个值不管是在v1还是在v2里面,一直保存的是0,所以可以充分利用这个信息,定义一个枚举处理所有MKLDNN的参数格式,从而`MKLDNNLayer`就可以从输入的参数中获取需要的格式信息。 ## References From 29d892c13cf88c7659647cec532169caa7abd2b9 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 16 Aug 2017 14:19:38 +0800 Subject: [PATCH 61/92] Add Clone Method For OperatorBase * Clone method will create a new object instance, which is as same as itself. * This is the first step to remove shared_ptr for OperatorBase --- paddle/framework/op_registry.h | 15 +++++++++++++-- paddle/framework/operator.h | 14 ++++++++++---- paddle/framework/operator_test.cc | 19 +++++++++++++++++++ paddle/operators/net_op.cc | 7 +++++++ paddle/operators/net_op.h | 13 +++++++++++++ paddle/operators/net_op_test.cc | 17 +++++++++++++++++ paddle/operators/recurrent_op.h | 22 ++++++++++++++++++---- 7 files changed, 97 insertions(+), 10 deletions(-) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 3b793628aa..b5b4668074 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -271,7 +271,13 @@ class OpKernelRegistrar : public Registrar { #define REGISTER_OP(op_type, op_class, op_maker_class) \ STATIC_ASSERT_GLOBAL_NAMESPACE( \ __reg_op__##op_type, "REGISTER_OP must be called in global namespace"); \ - static ::paddle::framework::OpRegistrar \ + class _OpClass_##op_type##_ : public op_class { \ + public: \ + DEFINE_OP_CLONE_METHOD(_OpClass_##op_type##_); \ + DEFINE_OP_CTOR(_OpClass_##op_type##_, op_class); \ + }; \ + static ::paddle::framework::OpRegistrar<_OpClass_##op_type##_, \ + op_maker_class> \ __op_registrar_##op_type##__(#op_type); \ int TouchOpRegistrar_##op_type() { \ __op_registrar_##op_type##__.Touch(); \ @@ -285,7 +291,12 @@ class OpKernelRegistrar : public Registrar { STATIC_ASSERT_GLOBAL_NAMESPACE( \ __reg_gradient_op__##op_type##_##grad_op_type, \ "REGISTER_GRADIENT_OP must be called in global namespace"); \ - static ::paddle::framework::GradOpRegistrar \ + class _OpGradClass_##op_type##_ : public grad_op_class { \ + public: \ + DEFINE_OP_CLONE_METHOD(_OpGradClass_##op_type##_); \ + DEFINE_OP_CTOR(_OpGradClass_##op_type##_, grad_op_class); \ + }; \ + static ::paddle::framework::GradOpRegistrar<_OpGradClass_##op_type##_> \ __op_gradient_registrar_##op_type##_##grad_op_type##__(#op_type, \ #grad_op_type); \ int TouchOpGradientRegistrar_##op_type() { \ diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 4a72ced6ce..9203247866 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -69,10 +69,6 @@ class OperatorBase { OperatorBase(const std::string& type, const VarNameMap& inputs, const VarNameMap& outputs, const AttributeMap& attrs); - OperatorBase(const OperatorBase& o) = delete; - OperatorBase& operator=(const OperatorBase& o) = delete; - OperatorBase(OperatorBase&& o) = delete; - virtual ~OperatorBase() {} template @@ -115,6 +111,8 @@ class OperatorBase { std::string Type() const { return type_; } const AttributeMap& Attrs() const { return attrs_; } + virtual OperatorBase* Clone() const = 0; + public: std::string type_; // NOTE: in case of OpGrad, inputs_ contains: @@ -129,6 +127,14 @@ class OperatorBase { AttributeMap attrs_; }; +#define DEFINE_OP_CLONE_METHOD(CLS) \ + OperatorBase* Clone() const final { return new CLS(*this); } + +#define DEFINE_OP_CTOR(CLS, PARENT_CLS) \ + CLS(const std::string& type, const VarNameMap& inputs, \ + const VarNameMap& outputs, const paddle::framework::AttributeMap& attrs) \ + : PARENT_CLS(type, inputs, outputs, attrs) {} + class InferShapeContext { public: InferShapeContext(const OperatorBase& op, const Scope& scope) diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index 6804841587..ceba7f5e6e 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -242,3 +242,22 @@ TEST(OpKernel, multi_inputs) { auto op = paddle::framework::OpRegistry::CreateOp(op_desc); op->Run(scope, cpu_device_context); } + +class OperatorClone : public paddle::framework::OperatorBase { + public: + DEFINE_OP_CLONE_METHOD(OperatorClone); + OperatorClone(const std::string& type, const VarNameMap& inputs, + const VarNameMap& outputs, + const paddle::framework::AttributeMap& attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + void InferShape(const paddle::framework::Scope& scope) const override {} + void Run(const paddle::framework::Scope& scope, + const paddle::platform::DeviceContext& dev_ctx) const override {} +}; + +TEST(Operator, Clone) { + OperatorClone a("ABC", {}, {}, {}); + auto* b = a.Clone(); + ASSERT_EQ(a.Type(), b->Type()); + delete b; +} \ No newline at end of file diff --git a/paddle/operators/net_op.cc b/paddle/operators/net_op.cc index 1d1b290440..896550f9d0 100644 --- a/paddle/operators/net_op.cc +++ b/paddle/operators/net_op.cc @@ -87,5 +87,12 @@ NetOp::NetOp(const std::string& type, const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) {} +framework::OperatorBase* NetOp::Clone() const { + PADDLE_ENFORCE( + add_op_done_, + "Must clone a sealed NetOp, invoke Net::CompleteAddOp before clone"); + return new NetOp(*this); +} + } // namespace operators } // namespace paddle diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h index 4a3408c158..deee543065 100644 --- a/paddle/operators/net_op.h +++ b/paddle/operators/net_op.h @@ -41,6 +41,18 @@ class NetOp : public framework::OperatorBase { NetOp(const std::string& type, const VarNameMap& inputs, const VarNameMap& outputs, const framework::AttributeMap& attrs); + NetOp(const NetOp& o) + : framework::OperatorBase( + static_cast(o)) { + this->ops_.reserve(o.ops_.size()); + std::transform(o.ops_.begin(), o.ops_.end(), std::back_inserter(this->ops_), + [](const std::shared_ptr& op) + -> std::shared_ptr { + return std::shared_ptr(op->Clone()); + }); + this->CompleteAddOp(); + } + /** * Infer all the operators' input and output variables' shapes, will be called * before every mini-batch @@ -97,6 +109,7 @@ class NetOp : public framework::OperatorBase { bool IsNetOp() const override; std::vector OutputVars(bool has_intermediate) const override; + framework::OperatorBase* Clone() const override; std::vector> ops_; diff --git a/paddle/operators/net_op_test.cc b/paddle/operators/net_op_test.cc index f7aa56262e..40e43f46df 100644 --- a/paddle/operators/net_op_test.cc +++ b/paddle/operators/net_op_test.cc @@ -13,6 +13,7 @@ static int run_cnt = 0; class TestOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; + DEFINE_OP_CLONE_METHOD(TestOp); void InferShape(const Scope& scope) const override { ++infer_shape_cnt; } void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const override { @@ -23,6 +24,7 @@ class TestOp : public framework::OperatorBase { class EmptyOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; + DEFINE_OP_CLONE_METHOD(EmptyOp); void InferShape(const Scope& scope) const override {} void Run(const Scope& scope, const DeviceContext& dev_ctx) const override {} }; @@ -77,5 +79,20 @@ TEST(NetOp, insert_op) { ASSERT_EQ(3UL, net.ops_.size()); } +TEST(NetOp, Clone) { + NetOp net; + net.AddOp(std::shared_ptr(new EmptyOp{"empty", {}, {}, {}})); + net.AddOp(std::shared_ptr(new EmptyOp{"empty2", {}, {}, {}})); + net.CompleteAddOp(true); + auto* new_net_op = net.Clone(); + ASSERT_NE(new_net_op, nullptr); + ASSERT_TRUE(new_net_op->IsNetOp()); + auto* new_net = static_cast(new_net_op); + ASSERT_EQ(2, new_net->ops_.size()); + ASSERT_EQ(new_net->ops_[0]->Type(), "empty"); + ASSERT_EQ(new_net->ops_[1]->Type(), "empty2"); + delete new_net; +} + } // namespace operators } // namespace paddle diff --git a/paddle/operators/recurrent_op.h b/paddle/operators/recurrent_op.h index 8f4f2444d8..cc40eff0cf 100644 --- a/paddle/operators/recurrent_op.h +++ b/paddle/operators/recurrent_op.h @@ -99,13 +99,20 @@ class RecurrentGradientAlgorithm { mutable size_t seq_len_; }; -class RecurrentOp final : public framework::OperatorBase { +class RecurrentOp : public framework::OperatorBase { public: RecurrentOp(const std::string& type, const VarNameMap& inputs, const VarNameMap& outputs, const framework::AttributeMap& attrs); + + RecurrentOp(const RecurrentOp& o) + : framework::OperatorBase( + static_cast(o)) { + // TODO(yuyang18): Implement copy ctor well. + PADDLE_THROW("Not implemented"); + } /** - * InferShape must be called before Run. - */ + * InferShape must be called before Run. + */ void InferShape(const framework::Scope& scope) const override { alg_.InferShape(scope); } @@ -121,12 +128,19 @@ class RecurrentOp final : public framework::OperatorBase { RecurrentAlgorithm alg_; }; -class RecurrentGradientOp final : public framework::OperatorBase { +class RecurrentGradientOp : public framework::OperatorBase { public: RecurrentGradientOp(const std::string& type, const VarNameMap& inputs, const VarNameMap& outputs, const framework::AttributeMap& attrs); + RecurrentGradientOp(const RecurrentGradientOp& o) + : framework::OperatorBase( + static_cast(o)) { + // TODO(yuyang18): Implement Copy ctor. + PADDLE_THROW("Not Implemented"); + } + /** * InferShape must be called before Run. */ From 3e52343dc1c31d0c23a6fdcdee0c7c0492310014 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 16 Aug 2017 14:24:10 +0800 Subject: [PATCH 62/92] Add comments --- paddle/framework/operator.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 9203247866..9e4d0d5e39 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -111,6 +111,8 @@ class OperatorBase { std::string Type() const { return type_; } const AttributeMap& Attrs() const { return attrs_; } + // Return a new operator instance, which is as same as this. + // NOTE: It is caller's responsibility to delete that operator instance. virtual OperatorBase* Clone() const = 0; public: @@ -127,9 +129,16 @@ class OperatorBase { AttributeMap attrs_; }; +// Macro for define a clone method. +// If you are writing an kernel operator, `Clone` will be defined when you +// register it. #define DEFINE_OP_CLONE_METHOD(CLS) \ OperatorBase* Clone() const final { return new CLS(*this); } +// Macro for define a default constructor for Operator. +// You can also use +// using PARENT_CLASS::PARENT_CLASS; +// to use parent's constructor. #define DEFINE_OP_CTOR(CLS, PARENT_CLS) \ CLS(const std::string& type, const VarNameMap& inputs, \ const VarNameMap& outputs, const paddle::framework::AttributeMap& attrs) \ From a0d77533f01c5da0fa811d4cc91235f5610f745f Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 16 Aug 2017 14:49:18 +0800 Subject: [PATCH 63/92] Rename Ctor -> Constructor Make code more clearer --- paddle/framework/op_registry.h | 4 ++-- paddle/framework/operator.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index b5b4668074..c0654b375d 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -274,7 +274,7 @@ class OpKernelRegistrar : public Registrar { class _OpClass_##op_type##_ : public op_class { \ public: \ DEFINE_OP_CLONE_METHOD(_OpClass_##op_type##_); \ - DEFINE_OP_CTOR(_OpClass_##op_type##_, op_class); \ + DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_, op_class); \ }; \ static ::paddle::framework::OpRegistrar<_OpClass_##op_type##_, \ op_maker_class> \ @@ -294,7 +294,7 @@ class OpKernelRegistrar : public Registrar { class _OpGradClass_##op_type##_ : public grad_op_class { \ public: \ DEFINE_OP_CLONE_METHOD(_OpGradClass_##op_type##_); \ - DEFINE_OP_CTOR(_OpGradClass_##op_type##_, grad_op_class); \ + DEFINE_OP_CONSTRUCTOR(_OpGradClass_##op_type##_, grad_op_class); \ }; \ static ::paddle::framework::GradOpRegistrar<_OpGradClass_##op_type##_> \ __op_gradient_registrar_##op_type##_##grad_op_type##__(#op_type, \ diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 9e4d0d5e39..4a1dee6fb0 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -118,7 +118,7 @@ class OperatorBase { public: std::string type_; // NOTE: in case of OpGrad, inputs_ contains: - // I (Inputs) + // I (Inputs)opear // O (Outputs) // OG (Output Gradients) VarNameMap inputs_; @@ -139,7 +139,7 @@ class OperatorBase { // You can also use // using PARENT_CLASS::PARENT_CLASS; // to use parent's constructor. -#define DEFINE_OP_CTOR(CLS, PARENT_CLS) \ +#define DEFINE_OP_CONSTRUCTOR(CLS, PARENT_CLS) \ CLS(const std::string& type, const VarNameMap& inputs, \ const VarNameMap& outputs, const paddle::framework::AttributeMap& attrs) \ : PARENT_CLS(type, inputs, outputs, attrs) {} From 1425387570d5559ad0e82bd690b0fcc424911ca1 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 16 Aug 2017 15:52:48 +0800 Subject: [PATCH 64/92] Using unique_ptr instead of raw ptr Fit google C++ style --- paddle/framework/operator.h | 10 ++++++---- paddle/framework/operator_test.cc | 3 +-- paddle/operators/net_op.cc | 6 +++--- paddle/operators/net_op.h | 3 ++- paddle/operators/net_op_test.cc | 5 ++--- 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 4a1dee6fb0..9e8aef6f85 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -112,8 +112,8 @@ class OperatorBase { const AttributeMap& Attrs() const { return attrs_; } // Return a new operator instance, which is as same as this. - // NOTE: It is caller's responsibility to delete that operator instance. - virtual OperatorBase* Clone() const = 0; + // Use unique_ptr to prevent caller forget to delete this pointer. + virtual std::unique_ptr Clone() const = 0; public: std::string type_; @@ -132,8 +132,10 @@ class OperatorBase { // Macro for define a clone method. // If you are writing an kernel operator, `Clone` will be defined when you // register it. -#define DEFINE_OP_CLONE_METHOD(CLS) \ - OperatorBase* Clone() const final { return new CLS(*this); } +#define DEFINE_OP_CLONE_METHOD(CLS) \ + std::unique_ptr Clone() const final { \ + return std::unique_ptr(new CLS(*this)); \ + } // Macro for define a default constructor for Operator. // You can also use diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index ceba7f5e6e..8836217126 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -257,7 +257,6 @@ class OperatorClone : public paddle::framework::OperatorBase { TEST(Operator, Clone) { OperatorClone a("ABC", {}, {}, {}); - auto* b = a.Clone(); + auto b = a.Clone(); ASSERT_EQ(a.Type(), b->Type()); - delete b; } \ No newline at end of file diff --git a/paddle/operators/net_op.cc b/paddle/operators/net_op.cc index 896550f9d0..77eb07e2f9 100644 --- a/paddle/operators/net_op.cc +++ b/paddle/operators/net_op.cc @@ -85,13 +85,13 @@ NetOp::NetOp(const std::string& type, const framework::OperatorBase::VarNameMap& inputs, const framework::OperatorBase::VarNameMap& outputs, const framework::AttributeMap& attrs) - : OperatorBase(type, inputs, outputs, attrs) {} + : framework::OperatorBase(type, inputs, outputs, attrs) {} -framework::OperatorBase* NetOp::Clone() const { +std::unique_ptr NetOp::Clone() const { PADDLE_ENFORCE( add_op_done_, "Must clone a sealed NetOp, invoke Net::CompleteAddOp before clone"); - return new NetOp(*this); + return std::unique_ptr(new NetOp(*this)); } } // namespace operators diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h index deee543065..743f0e67db 100644 --- a/paddle/operators/net_op.h +++ b/paddle/operators/net_op.h @@ -109,7 +109,8 @@ class NetOp : public framework::OperatorBase { bool IsNetOp() const override; std::vector OutputVars(bool has_intermediate) const override; - framework::OperatorBase* Clone() const override; + + std::unique_ptr Clone() const override; std::vector> ops_; diff --git a/paddle/operators/net_op_test.cc b/paddle/operators/net_op_test.cc index 40e43f46df..6d6f8bd354 100644 --- a/paddle/operators/net_op_test.cc +++ b/paddle/operators/net_op_test.cc @@ -84,14 +84,13 @@ TEST(NetOp, Clone) { net.AddOp(std::shared_ptr(new EmptyOp{"empty", {}, {}, {}})); net.AddOp(std::shared_ptr(new EmptyOp{"empty2", {}, {}, {}})); net.CompleteAddOp(true); - auto* new_net_op = net.Clone(); + auto new_net_op = net.Clone(); ASSERT_NE(new_net_op, nullptr); ASSERT_TRUE(new_net_op->IsNetOp()); - auto* new_net = static_cast(new_net_op); + auto* new_net = static_cast(new_net_op.get()); ASSERT_EQ(2, new_net->ops_.size()); ASSERT_EQ(new_net->ops_[0]->Type(), "empty"); ASSERT_EQ(new_net->ops_[1]->Type(), "empty2"); - delete new_net; } } // namespace operators From 0f8688192cfd4892c379c5f994a2d7149fa3c63d Mon Sep 17 00:00:00 2001 From: Yancey Date: Wed, 16 Aug 2017 16:09:09 +0800 Subject: [PATCH 65/92] Fix invalid paddle binary file path (#3421) Fix invalid paddle executable file path with pip install --- .../build_and_install/build_from_source_en.md | 13 +++++---- paddle/scripts/docker/build.sh | 26 ++--------------- paddle/scripts/submit_local.sh.in | 29 ++++--------------- python/setup.py.in | 12 ++++---- 4 files changed, 21 insertions(+), 59 deletions(-) diff --git a/doc/getstarted/build_and_install/build_from_source_en.md b/doc/getstarted/build_and_install/build_from_source_en.md index c0608ede8e..2f14614894 100644 --- a/doc/getstarted/build_and_install/build_from_source_en.md +++ b/doc/getstarted/build_and_install/build_from_source_en.md @@ -68,7 +68,7 @@ As a simple example, consider the following: 1. **BLAS Dependencies(optional)** - CMake will search BLAS libraries from system. If not found, OpenBLAS will be downloaded, built and installed automatically. + CMake will search BLAS libraries from the system. If not found, OpenBLAS will be downloaded, built and installed automatically. To utilize preinstalled BLAS, you can simply specify MKL, OpenBLAS or ATLAS via `MKL_ROOT`, `OPENBLAS_ROOT` or `ATLAS_ROOT`. ```bash @@ -131,9 +131,9 @@ As a simple example, consider the following: To build GPU version, you will need the following installed: 1. a CUDA-capable GPU - 2. A supported version of Linux with a gcc compiler and toolchain + 2. A supported version of Linux with a GCC compiler and toolchain 3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads) - 4. NVIDIA cuDNN Library (availabel at https://developer.nvidia.com/cudnn) + 4. NVIDIA cuDNN Library (available at https://developer.nvidia.com/cudnn) The CUDA development environment relies on tight integration with the host development environment, including the host compiler and C runtime libraries, and is therefore only supported on @@ -172,6 +172,7 @@ export PATH=/bin:$PATH # install PaddlePaddle Python modules. sudo pip install /opt/paddle/share/wheels/*.whl ``` + ## Build on Centos 7 ### Install Dependencies @@ -192,9 +193,9 @@ sudo pip install /opt/paddle/share/wheels/*.whl To build GPU version, you will need the following installed: 1. a CUDA-capable GPU - 2. A supported version of Linux with a gcc compiler and toolchain + 2. A supported version of Linux with a GCC compiler and toolchain 3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads) - 4. NVIDIA cuDNN Library (availabel at https://developer.nvidia.com/cudnn) + 4. NVIDIA cuDNN Library (available at https://developer.nvidia.com/cudnn) The CUDA development environment relies on tight integration with the host development environment, including the host compiler and C runtime libraries, and is therefore only supported on @@ -222,7 +223,7 @@ mkdir build && cd build ``` Finally, you can build and install PaddlePaddle: - + ```bash # you can add build option here, such as: cmake3 .. -DCMAKE_INSTALL_PREFIX= diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 6c2f5fed40..7c12664aed 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -120,25 +120,6 @@ EOF /woboq/indexgenerator/codebrowser_indexgenerator $WOBOQ_OUT fi -# generate deb package for current build -# FIXME(typhoonzero): should we remove paddle/scripts/deb ? -if [[ ${WITH_DEB:-ON} == "ON" ]]; then - cat <> /paddle/build/Dockerfile </dev/null) - BASEDIR=$(dirname "$0") - pip install ${BASEDIR}/../opt/paddle/share/wheels/*-${PYTHON_PADDLE_VERSION}-*.whl - if [ $? -ne 0 ]; then - echo "pip install wheels failed. " - echo "Please use 'sudo paddle' at the first time you use PaddlePaddle" - echo "PaddlePaddle will install some python dependencies automatically." - exit 1 - fi - echo "Python dependencies are installed." -fi case "$1" in "train") - ${DEBUGGER} $MYDIR/../opt/paddle/bin/paddle_trainer ${@:2} + ${DEBUGGER} $PADDLE_BIN_PATH/paddle_trainer ${@:2} ;; "merge_model") - ${DEBUGGER} $MYDIR/../opt/paddle/bin/paddle_merge_model ${@:2} + ${DEBUGGER} $PADDLE_BIN_PATH/paddle_merge_model ${@:2} ;; "pserver") - ${DEBUGGER} $MYDIR/../opt/paddle/bin/paddle_pserver_main ${@:2} + ${DEBUGGER} $PADDLE_BIN_PATH/paddle_pserver_main ${@:2} ;; "dump_config") python -m paddle.utils.dump_config ${@:2} @@ -129,7 +110,7 @@ case "$1" in python -m paddle.utils.make_model_diagram ${@:2} ;; "usage") - $MYDIR/../opt/paddle/bin/paddle_usage ${@:2} + $PADDLE_BIN_PATH/paddle_usage ${@:2} ;; "version") version diff --git a/python/setup.py.in b/python/setup.py.in index 287442e013..82f5006121 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -24,13 +24,14 @@ if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: setup_requires+=["opencv-python"] # the prefix is sys.prefix which should always be usr -paddle_bin_dir = 'local/opt/paddle/bin' +paddle_bin_dir = 'opt/paddle/bin' paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/scripts/paddle_usage', '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer', '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_merge_model', - '${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main'] + '${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main', + '${PADDLE_BINARY_DIR}/paddle/scripts/paddle'] -paddle_rt_lib_dir = 'local/lib' +paddle_rt_lib_dir = 'lib' paddle_rt_libs = [] if '${MKL_SHARED_LIBS}'== '' else '${MKL_SHARED_LIBS}'.split(';') setup(name='paddlepaddle', @@ -50,8 +51,7 @@ setup(name='paddlepaddle', 'paddle.v2.framework.proto': '${PADDLE_BINARY_DIR}/paddle/framework', 'py_paddle': '${PADDLE_SOURCE_DIR}/paddle/py_paddle' }, - scripts=['${PADDLE_BINARY_DIR}/paddle/scripts/paddle'], + scripts=paddle_bins, distclass=BinaryDistribution, - data_files=[(paddle_bin_dir, paddle_bins), - (paddle_rt_lib_dir, paddle_rt_libs)] + data_files=[(paddle_rt_lib_dir, paddle_rt_libs)] ) From 57d96f88e1d59f4ed6173602a44b1380fed30a4e Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 16 Aug 2017 16:15:12 +0800 Subject: [PATCH 66/92] Fix document error. --- python/paddle/v2/trainer.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index 9c4dd5f250..1daf23a738 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -27,16 +27,21 @@ class SGD(object): SGD Trainer combines data reader, network topolopy and update_equation together to train/test a neural network. - :param update_equation: The optimizer object. - :type update_equation: paddle.v2.optimizer.Optimizer :param cost: Target cost that neural network should be optimized. :type cost: paddle.v2.config_base.Layer :param parameters: The parameters dictionary. :type parameters: paddle.v2.parameters.Parameters + :param update_equation: The optimizer object. + :type update_equation: paddle.v2.optimizer.Optimizer :param extra_layers: Some layers in the neural network graph are not in the path of cost layer. - :param pserver_spec: pserver location, eg: localhost:3000 :type extra_layers: paddle.v2.config_base.Layer + :param is_local: Whether trainning locally + :type is_local: bool + :param pserver_spec: pserver location, eg: localhost:3000 + :type pserver_spec: string + :param use_etcd: Whether using etcd pserver. + :param use_etcd: bool """ def __init__(self, From fd107ae550be7e93e45a88bc2826a9be803dd710 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 16 Aug 2017 17:00:57 +0800 Subject: [PATCH 67/92] Modify pserver_spec's doc. --- python/paddle/v2/trainer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index 1daf23a738..4cf4d8b11d 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -38,7 +38,9 @@ class SGD(object): :type extra_layers: paddle.v2.config_base.Layer :param is_local: Whether trainning locally :type is_local: bool - :param pserver_spec: pserver location, eg: localhost:3000 + :param pserver_spec: pserver location, eg: localhost:3000, + if use_etcd is true, pserver_spec indicates + the etcd endpoints, eg: http://127.0.0.1:2379 :type pserver_spec: string :param use_etcd: Whether using etcd pserver. :param use_etcd: bool From 5d18aaf8223ef7de420e09ad1de8fd93dbdf6db7 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Wed, 16 Aug 2017 09:11:03 +0000 Subject: [PATCH 68/92] Add a c-api interface to get the output of a specified layer. --- paddle/capi/gradient_machine.cpp | 16 ++++++++++++++++ paddle/capi/gradient_machine.h | 18 +++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/paddle/capi/gradient_machine.cpp b/paddle/capi/gradient_machine.cpp index b3287552db..629449bbd4 100644 --- a/paddle/capi/gradient_machine.cpp +++ b/paddle/capi/gradient_machine.cpp @@ -146,3 +146,19 @@ paddle_error paddle_gradient_machine_randomize_param( m->machine->randParameters(); return kPD_NO_ERROR; } + +paddle_error paddle_gradient_machine_get_layer_output( + paddle_gradient_machine machine, + const char* layerName, + paddle_arguments args) { + auto m = cast(machine); + auto out = paddle::capi::cast(args); + if (m == nullptr || layerName == nullptr || out == nullptr || + m->machine == nullptr) { + return kPD_NULLPTR; + } + + auto layerOutput = m->machine->getLayerOutput(layerName); + out->args.push_back(layerOutput); + return kPD_NO_ERROR; +} diff --git a/paddle/capi/gradient_machine.h b/paddle/capi/gradient_machine.h index c613ade5b2..28eeb23e3b 100644 --- a/paddle/capi/gradient_machine.h +++ b/paddle/capi/gradient_machine.h @@ -39,7 +39,11 @@ PD_API paddle_error paddle_gradient_machine_create_for_inference( /** * @brief Create a gradient machine used for model inference, using config with * parameters which is generated by `paddle merge_model`. - * @param [out] machine that used for model inference. + * Example: + * paddle merge_model \ + * --model_dir="pass-00000" \ + * --model_file="merged_model.paddle" + * @param [out] machine that used for model inference * @param [in] mergedModel * @param [in] size * @return paddle_error @@ -97,6 +101,18 @@ paddle_gradient_machine_randomize_param(paddle_gradient_machine machine); PD_API paddle_error paddle_gradient_machine_destroy(paddle_gradient_machine machine); +/** + * @brief Get the output of the layer named `layerName`. + * @param [in] gradient machine that have run a inference + * @param [in] layerName name of specified layer + * @param [out] args output of the specified layer + * @return paddle_error + */ +PD_API paddle_error +paddle_gradient_machine_get_layer_output(paddle_gradient_machine machine, + const char* layerName, + paddle_arguments args); + #ifdef __cplusplus } #endif From f7d32c614dc047faa3e19eb471d3bca6269d2d03 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 16 Aug 2017 17:21:59 +0800 Subject: [PATCH 69/92] Fix bug of enforce when dladdr not found * Wrong Printf format before --- paddle/platform/enforce.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/platform/enforce.h b/paddle/platform/enforce.h index 15fdf7a94f..81448897e9 100644 --- a/paddle/platform/enforce.h +++ b/paddle/platform/enforce.h @@ -86,7 +86,7 @@ struct EnforceNotMet : public std::exception { 2 + sizeof(void*) * 2, call_stack[i], demangled, addr_offset); } else { - sout << string::Sprintf("%-3d %*0p %s\n", i, 2 + sizeof(void*) * 2, + sout << string::Sprintf("%-3d %*0p\n", i, 2 + sizeof(void*) * 2, call_stack[i]); } } From 8c653ba76a442a528c68240baf2d564971d5588d Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 16 Aug 2017 17:47:22 +0800 Subject: [PATCH 70/92] Complete remove std::shared_ptr --- paddle/framework/backward.cc | 40 +++++++++++++-------------- paddle/framework/op_registry.h | 11 ++++---- paddle/framework/op_registry_test.cc | 6 ++-- paddle/framework/pybind.cc | 37 +++++++++++-------------- paddle/operators/net_op.h | 41 +++++++++++++++++++++------- paddle/operators/net_op_test.cc | 23 +++++++--------- paddle/operators/recurrent_op.cc | 20 +++++++------- paddle/operators/recurrent_op.h | 24 +++++++++------- 8 files changed, 107 insertions(+), 95 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index c226e4e3d2..a1049f718d 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -15,6 +15,8 @@ #include "paddle/framework/backward.h" #include +#include + #include "paddle/framework/op_registry.h" #include "paddle/operators/net_op.h" #include "paddle/operators/recurrent_op.h" @@ -43,11 +45,11 @@ static bool AllInSet( return all_in_set; } -static std::shared_ptr NOP() { - auto net_op = std::make_shared(); +static std::unique_ptr NOP() { + auto net_op = new operators::NetOp(); net_op->SetType("@NOP@"); net_op->CompleteAddOp(); - return net_op; + return std::unique_ptr(net_op); } // Get backward operator from a forward operator, a recursive implementation. @@ -62,11 +64,7 @@ static std::shared_ptr NOP() { // operator, in a complex situation, it maybe a NetOp. // // See Backward.h for details -static std::shared_ptr BackwardRecursive( - const OperatorBase& forwardOp, - std::unordered_set& no_grad_names, size_t& uniq_id); - -std::shared_ptr BackwardRecursive( +static std::unique_ptr BackwardRecursive( const OperatorBase& forwardOp, std::unordered_set& no_grad_names, size_t& uniq_id) { // If all input gradients of forwarding operator do not need to calculate, @@ -91,7 +89,7 @@ std::shared_ptr BackwardRecursive( } // Returned gradient network - auto net = std::make_shared(); + auto net = std::unique_ptr(); if (forwardOp.IsNetOp()) { // Because forwardOp is a net op, it can static_cast. @@ -105,14 +103,14 @@ std::shared_ptr BackwardRecursive( // reversely travel forwardNet and collect all duplicate outputs. for (auto it = forwardNet.ops_.rbegin(); it != forwardNet.ops_.rend(); ++it, ++local_op_id) { - auto fwd = *it; + auto& fwd = *it; auto bwd = BackwardRecursive(*fwd, no_grad_names, uniq_id); - net->AddOp(bwd); ForEachVarName(bwd->Outputs(), [&dup_output_ops, local_op_id](const std::string& out) { dup_output_ops[out].emplace_back(local_op_id); return false; }); + net->AddOp(std::move(bwd)); } // Get unique ID for this method. auto uid = uniq_id++; @@ -122,7 +120,7 @@ std::shared_ptr BackwardRecursive( // to handle this case. For each duplicate output, rename it to an alias // (original name with a offset), append an `add` op for its operator, // and finally sum all the alias variable to the final output variable y. - using Pos = std::pair>; + using Pos = std::pair>; std::list insert_position; for (auto& dup_output_op : dup_output_ops) { const std::string& name = dup_output_op.first; @@ -150,13 +148,13 @@ std::shared_ptr BackwardRecursive( [](const Pos& l, const Pos& r) { return l.first > r.first; }); for (auto& pos : insert_position) { - net->InsertOp(pos.first + 1, pos.second); + net->InsertOp(pos.first + 1, std::move(pos.second)); } } else { - std::shared_ptr grad_op = OpRegistry::CreateGradOp(forwardOp); + std::unique_ptr grad_op(OpRegistry::CreateGradOp(forwardOp)); - ForEachVarName(grad_op->Inputs(), [&no_grad_names, &net, - grad_op](const std::string& grad_input) { + ForEachVarName(grad_op->Inputs(), [&no_grad_names, &net, &grad_op]( + const std::string& grad_input) { if (no_grad_names.count(grad_input)) { // +1 for \0 std::string prefix = grad_input.substr( @@ -190,20 +188,20 @@ std::shared_ptr BackwardRecursive( const auto& stepnet_op = *static_cast(&rnnop.stepnet()); // create stepnet's gradient op - auto grad_stepnet = BackwardRecursive(stepnet_op, no_grad_names, uniq_id); rnn_grad_op->set_stepnet( - std::static_pointer_cast(grad_stepnet)); + BackwardRecursive(stepnet_op, no_grad_names, uniq_id)); } if (net->ops_.empty()) { // Current no aux op is added to network return grad_op; } - net->AddOp(grad_op); + net->AddOp(std::move(grad_op)); } net->SetType("@GENERATED_BACKWARD@"); net->CompleteAddOp(); - return net; -} // namespace framework + return std::unique_ptr( + static_cast(net.release())); +} // See header for comments std::shared_ptr Backward( diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 4fa0a2750b..f0cc0012e1 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -174,7 +174,7 @@ class OpRegistry { } } - static std::shared_ptr CreateOp(const std::string& type, + static std::unique_ptr CreateOp(const std::string& type, const VarNameMap& inputs, const VarNameMap& outputs, AttributeMap attrs) { @@ -183,7 +183,7 @@ class OpRegistry { "Operator '%s' has not been registered.", type); it->second.checker_->Check(attrs); auto op = it->second.creator_(type, inputs, outputs, attrs); - return std::shared_ptr(op); + return std::unique_ptr(op); } static VarNameMap ConvertOpDescVarsToVarNameMap( @@ -199,7 +199,7 @@ class OpRegistry { return ret_val; } - static std::shared_ptr CreateOp(const OpDesc& op_desc) { + static std::unique_ptr CreateOp(const OpDesc& op_desc) { VarNameMap inputs = ConvertOpDescVarsToVarNameMap(op_desc.inputs()); VarNameMap outputs = ConvertOpDescVarsToVarNameMap(op_desc.outputs()); AttributeMap attrs; @@ -210,11 +210,10 @@ class OpRegistry { return CreateOp(op_desc.type(), inputs, outputs, attrs); } - static std::shared_ptr CreateGradOp(const OperatorBase& op) { + static std::unique_ptr CreateGradOp(const OperatorBase& op) { PADDLE_ENFORCE(!op.IsNetOp(), "Use framework::Backward to get backward ops"); - std::shared_ptr grad_op(BuildGradOp(&op)); - return grad_op; + return std::unique_ptr(BuildGradOp(&op)); } static std::unordered_map& op_info_map() { diff --git a/paddle/framework/op_registry_test.cc b/paddle/framework/op_registry_test.cc index 1a85d56835..50c45919c5 100644 --- a/paddle/framework/op_registry_test.cc +++ b/paddle/framework/op_registry_test.cc @@ -76,8 +76,7 @@ TEST(OpRegistry, CreateOp) { attr->set_type(paddle::framework::AttrType::FLOAT); attr->set_f(scale); - std::shared_ptr op = - paddle::framework::OpRegistry::CreateOp(op_desc); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); @@ -118,8 +117,7 @@ TEST(OpRegistry, DefaultValue) { ASSERT_TRUE(op_desc.IsInitialized()); - std::shared_ptr op = - paddle::framework::OpRegistry::CreateOp(op_desc); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::Scope scope; paddle::platform::CPUDeviceContext dev_ctx; op->Run(scope, dev_ctx); diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index fe0c87bc57..2fc1e214b2 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -207,8 +207,7 @@ All parameter, weight, gradient are variables in Paddle. .def(py::init<>()) .def("__str__", string::to_string); - py::class_> operator_base( - m, "Operator"); + py::class_ operator_base(m, "Operator"); operator_base.def_static("create", [](py::bytes protobin) { OpDesc desc; @@ -228,25 +227,23 @@ All parameter, weight, gradient are variables in Paddle. ExposeOperator(operator_base); - py::class_> net(m, "Net"); + py::class_ net(m, "Net"); net.def_static("create", - []() -> std::shared_ptr { - auto retv = std::make_shared(); + []() -> operators::NetOp * { + auto *retv = new operators::NetOp; retv->SetType("plain_net"); return retv; }) - .def("add_op", &operators::NetOp::AddOp) + .def("add_op", [](operators::NetOp &self, + const OperatorBase &op) { self.AddOp(op); }) .def("add_op", - [](operators::NetOp &self, - const std::shared_ptr &net) -> void { - self.AddOp(std::static_pointer_cast(net)); + [](operators::NetOp &self, const operators::NetOp &net) -> void { + self.AddOp(net); }) .def("add_op", [](operators::NetOp &self, - const std::shared_ptr &rnn) -> void { - self.AddOp(std::static_pointer_cast(rnn)); - }) + const operators::RecurrentOp &rnn) -> void { self.AddOp(rnn); }) .def("complete_add_op", &operators::NetOp::CompleteAddOp) .def("complete_add_op", [](std::shared_ptr &self) { self->CompleteAddOp(); @@ -255,12 +252,11 @@ All parameter, weight, gradient are variables in Paddle. ExposeOperator(net); // recurrent_op - py::class_> - rnn(m, "RecurrentOp"); + py::class_ rnn(m, "RecurrentOp"); rnn.def_static( "create", - [](py::bytes protobin) -> std::shared_ptr { + [](py::bytes protobin) -> operators::RecurrentOp * { OpDesc desc; PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), "Cannot parse user input to OpDesc"); @@ -268,13 +264,12 @@ All parameter, weight, gradient are variables in Paddle. "User OpDesc is not initialized, reason %s", desc.InitializationErrorString()); auto rnn_op = OpRegistry::CreateOp(desc); - return std::dynamic_pointer_cast(rnn_op); + return static_cast(rnn_op.release()); }) - .def("set_stepnet", - [](operators::RecurrentOp &self, - const std::shared_ptr &net) -> void { - self.set_stepnet(net); - }); + .def("set_stepnet", [](operators::RecurrentOp &self, + const operators::NetOp &net) -> void { + self.set_stepnet(net.Clone()); + }); ExposeOperator(rnn); m.def("unique_integer", UniqueIntegerGenerator); diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h index 743f0e67db..2ec65c63f3 100644 --- a/paddle/operators/net_op.h +++ b/paddle/operators/net_op.h @@ -45,11 +45,11 @@ class NetOp : public framework::OperatorBase { : framework::OperatorBase( static_cast(o)) { this->ops_.reserve(o.ops_.size()); - std::transform(o.ops_.begin(), o.ops_.end(), std::back_inserter(this->ops_), - [](const std::shared_ptr& op) - -> std::shared_ptr { - return std::shared_ptr(op->Clone()); - }); + std::transform( + o.ops_.begin(), o.ops_.end(), std::back_inserter(this->ops_), + [](const std::unique_ptr& op) { + return std::unique_ptr(op->Clone()); + }); this->CompleteAddOp(); } @@ -86,21 +86,42 @@ class NetOp : public framework::OperatorBase { return true; } + void AddOp(const framework::OperatorBase& op) { AddOp(op.Clone()); } + /** * @brief Add an operator by ptr */ - void AddOp(const std::shared_ptr& op) { + void AddOp(framework::OperatorBase* op, bool own) { PADDLE_ENFORCE(!add_op_done_, "Cannot AddOp when this network is sealed"); PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); - ops_.push_back(op); + if (!own) { + op = op->Clone().release(); + } + ops_.emplace_back(op); } - void InsertOp(size_t pos, const std::shared_ptr& op) { + void AddOp(std::unique_ptr&& op) { + AddOp(op.release(), true); + } + + void InsertOp(size_t pos, framework::OperatorBase* op, bool own) { PADDLE_ENFORCE(!add_op_done_, "Cannot InsertOp when this network is sealed"); PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); PADDLE_ENFORCE_LE(pos, ops_.size(), "Out of range"); - ops_.insert(ops_.begin() + pos, op); + if (!own) { + op = op->Clone().release(); + } + ops_.insert(ops_.begin() + pos, + std::unique_ptr(op)); + } + + void InsertOp(size_t pos, std::unique_ptr&& op) { + InsertOp(pos, op.release(), true); + } + + void InsertOp(size_t pos, const framework::OperatorBase& op) { + InsertOp(pos, op.Clone()); } void CompleteAddOp(bool calculate = true); @@ -112,7 +133,7 @@ class NetOp : public framework::OperatorBase { std::unique_ptr Clone() const override; - std::vector> ops_; + std::vector> ops_; private: bool add_op_done_{false}; diff --git a/paddle/operators/net_op_test.cc b/paddle/operators/net_op_test.cc index e28d4df6a5..e9598610c0 100644 --- a/paddle/operators/net_op_test.cc +++ b/paddle/operators/net_op_test.cc @@ -38,15 +38,12 @@ TEST(OpKernel, all) { auto net = std::make_shared(); ASSERT_NE(net, nullptr); - auto op1 = std::shared_ptr( + net->AddOp(std::unique_ptr( new TestOp("test", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}}, - {{"Out", {"y"}}}, {})); - net->AddOp(op1); - - auto op2 = std::shared_ptr( + {{"Out", {"y"}}}, {}))); + net->AddOp(std::unique_ptr( new TestOp("test", {{"X", {"y"}}, {"W", {"w2"}}, {"b", {"b2"}}}, - {{"Out", {"z"}}}, {})); - net->AddOp(op2); + {{"Out", {"z"}}}, {}))); net->CompleteAddOp(); AssertSameVectorWithoutOrder({"x", "w1", "b1", "w2", "b2"}, @@ -61,21 +58,21 @@ TEST(OpKernel, all) { TEST(NetOp, insert_op) { NetOp net; - auto op1 = std::shared_ptr( + auto op1 = std::unique_ptr( new framework::NOP("empty", {{"X", {"x"}}, {"W", {"w1"}}, {"b", {"b1"}}}, {{"Out", {"y"}}}, {})); - net.AddOp(op1); - net.InsertOp(0, op1); + net.AddOp(*op1); + net.InsertOp(0, *op1); ASSERT_EQ(2UL, net.ops_.size()); - net.InsertOp(2, op1); + net.InsertOp(2, std::move(op1)); ASSERT_EQ(3UL, net.ops_.size()); } TEST(NetOp, Clone) { NetOp net; net.AddOp( - std::shared_ptr(new framework::NOP{"empty", {}, {}, {}})); - net.AddOp(std::shared_ptr( + std::unique_ptr(new framework::NOP{"empty", {}, {}, {}})); + net.AddOp(std::unique_ptr( new framework::NOP{"empty2", {}, {}, {}})); net.CompleteAddOp(true); auto new_net_op = net.Clone(); diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index 78ce0ba3c0..aae78a1cec 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -42,7 +42,7 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const { rnn::LinkMemories(step_scopes, arg_->memories, i, -1, true /*infer_shape_mode*/); } - (*stepnet_)->InferShape(*step_scopes[i]); + stepnet_->InferShape(*step_scopes[i]); } rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, true /*infer_shape_mode*/); @@ -61,7 +61,7 @@ void RecurrentAlgorithm::Run(const Scope& scope, rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1, false /*infer_shape_mode*/); } - (*stepnet_)->Run(*step_scopes[step_id], dev_ctx); + stepnet_->Run(*step_scopes[step_id], dev_ctx); } rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, false /*infer_shape_mode*/); @@ -76,15 +76,15 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { // Now all variables in scope must be created outside of op. PADDLE_ENFORCE_NOT_NULL(stepnet_); - PADDLE_ENFORCE(!(*stepnet_)->Outputs().empty(), "stepnet_ op has no outputs"); - PADDLE_ENFORCE(!(*stepnet_)->Outputs().empty(), "net_op has no outputs"); + PADDLE_ENFORCE(!stepnet_->Outputs().empty(), "stepnet_ op has no outputs"); + PADDLE_ENFORCE(!stepnet_->Outputs().empty(), "net_op has no outputs"); if (seq_len_ > step_scopes->size()) { for (size_t i = step_scopes->size(); i < seq_len_; ++i) { auto& step_scope = scope.NewScope(); // create step net's temp inputs - for (auto& input : (*stepnet_)->Inputs()) { + for (auto& input : stepnet_->Inputs()) { // the weight are located in parent scope for (auto& var_name : input.second) { if (!step_scope.FindVar(var_name)) { @@ -93,7 +93,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { } } // create stepnet's outputs - for (const auto& output : (*stepnet_)->Outputs()) { + for (const auto& output : stepnet_->Outputs()) { for (auto& var_name : output.second) { step_scope.NewVar(var_name); } @@ -136,7 +136,7 @@ RecurrentOp::RecurrentOp(const std::string& type, const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) { rnn::InitArgument(kArgName, &arg_, *this); - alg_.Init(&arg_, &stepnet_); + alg_.Init(&arg_, stepnet_.get()); } class RecurrentAlgorithmProtoAndCheckerMaker @@ -178,7 +178,7 @@ void RecurrentGradientAlgorithm::Run( rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1, false /*infer_shape_mode*/); } - (*stepnet_)->Run(*step_scopes[step_id], dev_ctx); + stepnet_->Run(*step_scopes[step_id], dev_ctx); } LinkBootMemoryGradients(step_scopes[0], false); rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, @@ -215,7 +215,7 @@ void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const { rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1, true /*infer_shape_mode*/); } - (*stepnet_)->InferShape(*step_scopes[step_id]); + stepnet_->InferShape(*step_scopes[step_id]); } rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, true /*infer_shape_mode*/); @@ -228,7 +228,7 @@ RecurrentGradientOp::RecurrentGradientOp( const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) { rnn::InitArgument(kArgName, &arg_, *this); - alg_.Init(&arg_, &stepnet_); + alg_.Init(&arg_, stepnet_.get()); } } // namespace operators diff --git a/paddle/operators/recurrent_op.h b/paddle/operators/recurrent_op.h index 1d8a697395..4d091aa212 100644 --- a/paddle/operators/recurrent_op.h +++ b/paddle/operators/recurrent_op.h @@ -34,7 +34,7 @@ class RecurrentAlgorithm { void Run(const framework::Scope& scope, const platform::DeviceContext& dev_ctx) const; - void Init(rnn::Argument* arg, std::shared_ptr* stepnet) { + void Init(rnn::Argument* arg, framework::OperatorBase* stepnet) { PADDLE_ENFORCE_NOT_NULL(stepnet, "stepnet should be set before."); arg_ = arg; stepnet_ = stepnet; @@ -63,7 +63,7 @@ class RecurrentAlgorithm { void InitMemories(framework::Scope* step_scopes, bool infer_shape_mode) const; private: - std::shared_ptr* stepnet_; + framework::OperatorBase* stepnet_; rnn::Argument* arg_; mutable size_t seq_len_; }; @@ -80,7 +80,7 @@ class RecurrentGradientAlgorithm { * operator. */ public: - void Init(rnn::Argument* arg, std::shared_ptr* stepnet) { + void Init(rnn::Argument* arg, framework::OperatorBase* stepnet) { PADDLE_ENFORCE_NOT_NULL(stepnet, "stepnet should be set before."); arg_ = std::move(arg); stepnet_ = stepnet; @@ -107,7 +107,7 @@ class RecurrentGradientAlgorithm { private: rnn::Argument* arg_; mutable size_t seq_len_; - std::shared_ptr* stepnet_; + framework::OperatorBase* stepnet_; }; class RecurrentOp : public framework::OperatorBase { @@ -133,15 +133,17 @@ class RecurrentOp : public framework::OperatorBase { alg_.Run(scope, dev_ctx); } - void set_stepnet(std::shared_ptr net) { stepnet_ = net; } - const NetOp& stepnet() const { return *stepnet_; } + void set_stepnet(std::unique_ptr net) { + stepnet_ = std::move(net); + } + const OperatorBase& stepnet() const { return *stepnet_; } static const rnn::ArgumentName kArgName; private: RecurrentAlgorithm alg_; rnn::Argument arg_; - std::shared_ptr stepnet_; + std::unique_ptr stepnet_; }; class RecurrentGradientOp : public framework::OperatorBase { @@ -171,12 +173,14 @@ class RecurrentGradientOp : public framework::OperatorBase { static const rnn::ArgumentName kArgName; - void set_stepnet(const std::shared_ptr& net) { stepnet_ = net; } - const NetOp& stepnet() const { return *stepnet_; } + void set_stepnet(std::unique_ptr net) { + stepnet_ = std::move(net); + } + const OperatorBase& stepnet() const { return *stepnet_; } private: RecurrentGradientAlgorithm alg_; - std::shared_ptr stepnet_; + std::unique_ptr stepnet_; rnn::Argument arg_; }; From 8f80f5bc794d8900f9d57b51eea167f4dde2903c Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 16 Aug 2017 19:46:12 +0800 Subject: [PATCH 71/92] FIX: Release CPU/GPU memory via deleter --- paddle/memory/memory.cc | 59 ++++++++++++++++++++++++++++------------- 1 file changed, 41 insertions(+), 18 deletions(-) diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 207025f9b1..5946c3ea4a 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -16,19 +16,31 @@ limitations under the License. */ #include "paddle/memory/detail/buddy_allocator.h" #include "paddle/memory/detail/system_allocator.h" -#include // for memcpy +#include // for transfrom +#include // for memcpy +#include // for call_once + +#include "glog/logging.h" namespace paddle { namespace memory { -detail::BuddyAllocator* GetCPUBuddyAllocator() { - static detail::BuddyAllocator* a = nullptr; - if (a == nullptr) { - a = new detail::BuddyAllocator(new detail::CPUAllocator, - platform::CpuMinChunkSize(), - platform::CpuMaxChunkSize()); - } - return a; +using BuddyAllocator = detail::BuddyAllocator; + +std::once_flag cpu_alloctor_flag; +std::once_flag gpu_alloctor_flag; + +BuddyAllocator* GetCPUBuddyAllocator() { + static std::unique_ptr a{ + nullptr, [](BuddyAllocator* p) { delete p; }}; + + std::call_once(cpu_alloctor_flag, [&]() { + a.reset(new BuddyAllocator(new detail::CPUAllocator, + platform::CpuMinChunkSize(), + platform::CpuMaxChunkSize())); + }); + + return a.get(); } template <> @@ -48,20 +60,31 @@ size_t Used(platform::CPUPlace place) { #ifndef PADDLE_ONLY_CPU -detail::BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { - static detail::BuddyAllocator** as = NULL; - if (as == NULL) { +BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { + using BuddyAllocVec = std::vector; + static std::unique_ptr as{ + new std::vector, [](BuddyAllocVec* p) { + std::for_each(p->begin(), p->end(), + [](BuddyAllocator* p) { delete p; }); + }}; + + // GPU buddy alloctors + auto& alloctors = *as.get(); + + // GPU buddy allocator initialization + std::call_once(gpu_alloctor_flag, [&]() { int gpu_num = platform::GetDeviceCount(); - as = new detail::BuddyAllocator*[gpu_num]; + alloctors.reserve(gpu_num); for (int gpu = 0; gpu < gpu_num; gpu++) { platform::SetDeviceId(gpu); - as[gpu] = new detail::BuddyAllocator(new detail::GPUAllocator, - platform::GpuMinChunkSize(), - platform::GpuMaxChunkSize()); + alloctors.emplace_back(new BuddyAllocator(new detail::GPUAllocator, + platform::GpuMinChunkSize(), + platform::GpuMaxChunkSize())); } - } + }); + platform::SetDeviceId(gpu_id); - return as[gpu_id]; + return alloctors[gpu_id]; } template <> From 4be8189a8033010cf3517d14bed30d991780285b Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 16 Aug 2017 19:50:40 +0800 Subject: [PATCH 72/92] Modify pserver_spec's doc. --- python/paddle/v2/trainer.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index 4cf4d8b11d..0654a30104 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -38,9 +38,10 @@ class SGD(object): :type extra_layers: paddle.v2.config_base.Layer :param is_local: Whether trainning locally :type is_local: bool - :param pserver_spec: pserver location, eg: localhost:3000, - if use_etcd is true, pserver_spec indicates - the etcd endpoints, eg: http://127.0.0.1:2379 + :param pserver_spec: comma string for pserver location, + eg:127.10.0.10:3000,127.10.0.11:3000, + and this parameter is only used for fault + tolerant mode cluster training. :type pserver_spec: string :param use_etcd: Whether using etcd pserver. :param use_etcd: bool From f15e083098d94af00c02f44e32f0b8891c079f55 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 16 Aug 2017 21:24:12 +0800 Subject: [PATCH 73/92] Remove std::shared_ptr in Python & C++ * Also simplify pybind implementation by using OperatorBase as holder type. --- paddle/framework/backward.cc | 4 +- paddle/framework/backward.h | 2 +- paddle/framework/backward_test.cc | 3 +- paddle/framework/pybind.cc | 124 +++++++----------- paddle/operators/net_op.h | 4 +- paddle/operators/recurrent_op.cc | 20 +-- paddle/operators/recurrent_op.h | 10 +- .../v2/framework/tests/gradient_checker.py | 1 - 8 files changed, 71 insertions(+), 97 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index a1049f718d..9d30887224 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -89,7 +89,7 @@ static std::unique_ptr BackwardRecursive( } // Returned gradient network - auto net = std::unique_ptr(); + auto net = std::unique_ptr(new operators::NetOp()); if (forwardOp.IsNetOp()) { // Because forwardOp is a net op, it can static_cast. @@ -204,7 +204,7 @@ static std::unique_ptr BackwardRecursive( } // See header for comments -std::shared_ptr Backward( +std::unique_ptr Backward( const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars) { std::unordered_set no_grad_names; diff --git a/paddle/framework/backward.h b/paddle/framework/backward.h index c181919dc1..1ecf69881b 100644 --- a/paddle/framework/backward.h +++ b/paddle/framework/backward.h @@ -20,7 +20,7 @@ namespace framework { // Create the backward operator from a forward operator. // TODO(yuyang18): Add more API reference comment. -extern std::shared_ptr Backward( +extern std::unique_ptr Backward( const OperatorBase& forwardOp, const std::unordered_set& no_grad_vars); } // namespace framework diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index d942604bf0..1003b1ccd8 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -180,8 +180,7 @@ TEST(Backward, simple_op_not_need_grad) { auto no_input_gop = f::Backward(*fwd, {"x", "b"}); ASSERT_NE(no_input_gop, nullptr); ASSERT_TRUE(no_input_gop->IsNetOp()); - ASSERT_EQ(0UL, - std::static_pointer_cast(no_input_gop)->ops_.size()); + ASSERT_EQ(0UL, static_cast(no_input_gop.get())->ops_.size()); } TEST(Backward, net_fc_backward_normal) { diff --git a/paddle/framework/pybind.cc b/paddle/framework/pybind.cc index 2fc1e214b2..f0114b9e49 100644 --- a/paddle/framework/pybind.cc +++ b/paddle/framework/pybind.cc @@ -48,29 +48,6 @@ namespace framework { using Tensor = framework::Tensor; -template -void ExposeOperator(ClassType &m) { - m.def("infer_shape", &ClassType::type::InferShape) - .def("run", &ClassType::type::Run) - .def("type", - [](const typename ClassType::type &op) -> std::string { - return op.Type(); - }) - .def("outputs", - [](const typename ClassType::type &op) - -> std::map> { - return op.Outputs(); - }) - .def("inputs", - [](const typename ClassType::type &op) { return op.Inputs(); }) - .def("__str__", &ClassType::type::DebugString) - .def("no_intermediate_outputs", - [](const typename ClassType::type &op) { - return op.OutputVars(false); - }) - .def("support_gpu", &ClassType::type::SupportGPU); -} - static size_t UniqueIntegerGenerator() { static std::atomic generator; return generator.fetch_add(1); @@ -207,70 +184,69 @@ All parameter, weight, gradient are variables in Paddle. .def(py::init<>()) .def("__str__", string::to_string); - py::class_ operator_base(m, "Operator"); - - operator_base.def_static("create", [](py::bytes protobin) { - OpDesc desc; - PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), - "Cannot parse user input to OpDesc"); - PADDLE_ENFORCE(desc.IsInitialized(), - "User OpDesc is not initialized, reason %s", - desc.InitializationErrorString()); - return OpRegistry::CreateOp(desc); - }); - - operator_base.def("backward", - [](const OperatorBase &forwardOp, - const std::unordered_set &no_grad_vars) { - return Backward(forwardOp, no_grad_vars); - }); - - ExposeOperator(operator_base); - - py::class_ net(m, "Net"); + py::class_(m, "Operator") + .def_static("create", + [](py::bytes protobin) { + OpDesc desc; + PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), + "Cannot parse user input to OpDesc"); + PADDLE_ENFORCE(desc.IsInitialized(), + "User OpDesc is not initialized, reason %s", + desc.InitializationErrorString()); + return OpRegistry::CreateOp(desc); + }) + .def("backward", + [](const OperatorBase &forwardOp, + const std::unordered_set &no_grad_vars) { + return Backward(forwardOp, no_grad_vars).release(); + }) + .def("infer_shape", &OperatorBase::InferShape) + .def("run", &OperatorBase::Run) + .def("type", + [](const OperatorBase &op) -> std::string { return op.Type(); }) + .def("outputs", + [](const OperatorBase &op) + -> std::map> { + return op.Outputs(); + }) + .def("inputs", [](const OperatorBase &op) { return op.Inputs(); }) + .def("__str__", &OperatorBase::DebugString) + .def("no_intermediate_outputs", + [](const OperatorBase &op) { return op.OutputVars(false); }) + .def("support_gpu", &OperatorBase::SupportGPU); - net.def_static("create", - []() -> operators::NetOp * { - auto *retv = new operators::NetOp; - retv->SetType("plain_net"); - return retv; - }) + py::class_(m, "Net") + .def_static("create", + []() -> operators::NetOp * { + auto *retv = new operators::NetOp; + retv->SetType("plain_net"); + return retv; + }) .def("add_op", [](operators::NetOp &self, const OperatorBase &op) { self.AddOp(op); }) - .def("add_op", - [](operators::NetOp &self, const operators::NetOp &net) -> void { - self.AddOp(net); - }) - .def("add_op", - [](operators::NetOp &self, - const operators::RecurrentOp &rnn) -> void { self.AddOp(rnn); }) .def("complete_add_op", &operators::NetOp::CompleteAddOp) .def("complete_add_op", [](std::shared_ptr &self) { self->CompleteAddOp(); }); - ExposeOperator(net); - // recurrent_op - py::class_ rnn(m, "RecurrentOp"); - - rnn.def_static( - "create", - [](py::bytes protobin) -> operators::RecurrentOp * { - OpDesc desc; - PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), - "Cannot parse user input to OpDesc"); - PADDLE_ENFORCE(desc.IsInitialized(), - "User OpDesc is not initialized, reason %s", - desc.InitializationErrorString()); - auto rnn_op = OpRegistry::CreateOp(desc); - return static_cast(rnn_op.release()); - }) + py::class_(m, "RecurrentOp") + .def_static( + "create", + [](py::bytes protobin) -> operators::RecurrentOp * { + OpDesc desc; + PADDLE_ENFORCE(desc.ParsePartialFromString(protobin), + "Cannot parse user input to OpDesc"); + PADDLE_ENFORCE(desc.IsInitialized(), + "User OpDesc is not initialized, reason %s", + desc.InitializationErrorString()); + auto rnn_op = OpRegistry::CreateOp(desc); + return static_cast(rnn_op.release()); + }) .def("set_stepnet", [](operators::RecurrentOp &self, const operators::NetOp &net) -> void { self.set_stepnet(net.Clone()); }); - ExposeOperator(rnn); m.def("unique_integer", UniqueIntegerGenerator); diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h index 2ec65c63f3..ce7da1f383 100644 --- a/paddle/operators/net_op.h +++ b/paddle/operators/net_op.h @@ -41,9 +41,7 @@ class NetOp : public framework::OperatorBase { NetOp(const std::string& type, const VarNameMap& inputs, const VarNameMap& outputs, const framework::AttributeMap& attrs); - NetOp(const NetOp& o) - : framework::OperatorBase( - static_cast(o)) { + NetOp(const NetOp& o) : framework::OperatorBase(o.type_, {}, {}, o.attrs_) { this->ops_.reserve(o.ops_.size()); std::transform( o.ops_.begin(), o.ops_.end(), std::back_inserter(this->ops_), diff --git a/paddle/operators/recurrent_op.cc b/paddle/operators/recurrent_op.cc index aae78a1cec..78ce0ba3c0 100644 --- a/paddle/operators/recurrent_op.cc +++ b/paddle/operators/recurrent_op.cc @@ -42,7 +42,7 @@ void RecurrentAlgorithm::InferShape(const Scope& scope) const { rnn::LinkMemories(step_scopes, arg_->memories, i, -1, true /*infer_shape_mode*/); } - stepnet_->InferShape(*step_scopes[i]); + (*stepnet_)->InferShape(*step_scopes[i]); } rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, true /*infer_shape_mode*/); @@ -61,7 +61,7 @@ void RecurrentAlgorithm::Run(const Scope& scope, rnn::LinkMemories(step_scopes, arg_->memories, step_id, -1, false /*infer_shape_mode*/); } - stepnet_->Run(*step_scopes[step_id], dev_ctx); + (*stepnet_)->Run(*step_scopes[step_id], dev_ctx); } rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, false /*infer_shape_mode*/); @@ -76,15 +76,15 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { // Now all variables in scope must be created outside of op. PADDLE_ENFORCE_NOT_NULL(stepnet_); - PADDLE_ENFORCE(!stepnet_->Outputs().empty(), "stepnet_ op has no outputs"); - PADDLE_ENFORCE(!stepnet_->Outputs().empty(), "net_op has no outputs"); + PADDLE_ENFORCE(!(*stepnet_)->Outputs().empty(), "stepnet_ op has no outputs"); + PADDLE_ENFORCE(!(*stepnet_)->Outputs().empty(), "net_op has no outputs"); if (seq_len_ > step_scopes->size()) { for (size_t i = step_scopes->size(); i < seq_len_; ++i) { auto& step_scope = scope.NewScope(); // create step net's temp inputs - for (auto& input : stepnet_->Inputs()) { + for (auto& input : (*stepnet_)->Inputs()) { // the weight are located in parent scope for (auto& var_name : input.second) { if (!step_scope.FindVar(var_name)) { @@ -93,7 +93,7 @@ void RecurrentAlgorithm::CreateScopes(const Scope& scope) const { } } // create stepnet's outputs - for (const auto& output : stepnet_->Outputs()) { + for (const auto& output : (*stepnet_)->Outputs()) { for (auto& var_name : output.second) { step_scope.NewVar(var_name); } @@ -136,7 +136,7 @@ RecurrentOp::RecurrentOp(const std::string& type, const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) { rnn::InitArgument(kArgName, &arg_, *this); - alg_.Init(&arg_, stepnet_.get()); + alg_.Init(&arg_, &stepnet_); } class RecurrentAlgorithmProtoAndCheckerMaker @@ -178,7 +178,7 @@ void RecurrentGradientAlgorithm::Run( rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1, false /*infer_shape_mode*/); } - stepnet_->Run(*step_scopes[step_id], dev_ctx); + (*stepnet_)->Run(*step_scopes[step_id], dev_ctx); } LinkBootMemoryGradients(step_scopes[0], false); rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, @@ -215,7 +215,7 @@ void RecurrentGradientAlgorithm::InferShape(const Scope& scope) const { rnn::LinkMemories(step_scopes, arg_->memories, step_id, 1, true /*infer_shape_mode*/); } - stepnet_->InferShape(*step_scopes[step_id]); + (*stepnet_)->InferShape(*step_scopes[step_id]); } rnn::ConcatOutputs(step_scopes, arg_->outlinks, seq_len_, true /*infer_shape_mode*/); @@ -228,7 +228,7 @@ RecurrentGradientOp::RecurrentGradientOp( const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) { rnn::InitArgument(kArgName, &arg_, *this); - alg_.Init(&arg_, stepnet_.get()); + alg_.Init(&arg_, &stepnet_); } } // namespace operators diff --git a/paddle/operators/recurrent_op.h b/paddle/operators/recurrent_op.h index 4d091aa212..bcfa817de8 100644 --- a/paddle/operators/recurrent_op.h +++ b/paddle/operators/recurrent_op.h @@ -34,7 +34,8 @@ class RecurrentAlgorithm { void Run(const framework::Scope& scope, const platform::DeviceContext& dev_ctx) const; - void Init(rnn::Argument* arg, framework::OperatorBase* stepnet) { + void Init(rnn::Argument* arg, + std::unique_ptr* stepnet) { PADDLE_ENFORCE_NOT_NULL(stepnet, "stepnet should be set before."); arg_ = arg; stepnet_ = stepnet; @@ -63,7 +64,7 @@ class RecurrentAlgorithm { void InitMemories(framework::Scope* step_scopes, bool infer_shape_mode) const; private: - framework::OperatorBase* stepnet_; + std::unique_ptr* stepnet_; rnn::Argument* arg_; mutable size_t seq_len_; }; @@ -80,7 +81,8 @@ class RecurrentGradientAlgorithm { * operator. */ public: - void Init(rnn::Argument* arg, framework::OperatorBase* stepnet) { + void Init(rnn::Argument* arg, + std::unique_ptr* stepnet) { PADDLE_ENFORCE_NOT_NULL(stepnet, "stepnet should be set before."); arg_ = std::move(arg); stepnet_ = stepnet; @@ -107,7 +109,7 @@ class RecurrentGradientAlgorithm { private: rnn::Argument* arg_; mutable size_t seq_len_; - framework::OperatorBase* stepnet_; + std::unique_ptr* stepnet_; }; class RecurrentOp : public framework::OperatorBase { diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py index 501cf6110f..831c0f0f2a 100644 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ b/python/paddle/v2/framework/tests/gradient_checker.py @@ -165,7 +165,6 @@ class GradientChecker(unittest.TestCase): for no_grad in no_grad_set: if no_grad not in in_names: raise ValueError("no_grad should be in in_names") - backward_op = core.Operator.backward(forward_op, no_grad_set) bwd_outputs = backward_op.outputs() From 3484874278a1e1377af37677d29609f95fff2325 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 16 Aug 2017 14:44:51 -0700 Subject: [PATCH 74/92] Rename `AsNoGradient` of VariableBuilder to `NotInGradient` --- paddle/framework/backward_test.cc | 6 +++--- paddle/framework/framework.proto | 2 +- paddle/framework/grad_op_builder.cc | 2 +- paddle/framework/grad_op_builder_test.cc | 4 ++-- paddle/framework/operator.h | 7 ++----- paddle/operators/mean_op.cc | 2 +- 6 files changed, 10 insertions(+), 13 deletions(-) diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index d942604bf0..8780b50773 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -32,9 +32,9 @@ class RowWiseAddOpMaker : public OpProtoAndCheckerMaker { public: RowWiseAddOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "Input X of Add").AsNoGradient(); - AddInput("b", "Bias of Add").AsNoGradient(); - AddOutput("Out", "Out of Add").AsNoGradient(); + AddInput("X", "Input X of Add").NotInGradient(); + AddInput("b", "Bias of Add").NotInGradient(); + AddOutput("Out", "Out of Add").NotInGradient(); AddComment("Add Op"); } }; diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index 7077e8aa2c..ae44a1ffd4 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -60,7 +60,7 @@ message OpProto { optional bool duplicable = 3 [ default = false ]; optional bool intermediate = 4 [ default = false ]; - optional bool no_gradient = 5 [ default = false ]; + optional bool not_in_gradient = 5 [ default = false ]; } // AttrProto describes the C++ type Attribute. diff --git a/paddle/framework/grad_op_builder.cc b/paddle/framework/grad_op_builder.cc index b73dac22d0..0a2a41f6b6 100644 --- a/paddle/framework/grad_op_builder.cc +++ b/paddle/framework/grad_op_builder.cc @@ -28,7 +28,7 @@ static void TransOpArg(const OperatorBase* src_op, const OpArgType& src_type, const auto& src_arg_list = src_type == OpArgType::IN ? proto->inputs() : proto->outputs(); for (const auto& arg : src_arg_list) { - if (arg.no_gradient() && !is_grad) continue; + if (arg.not_in_gradient() && !is_grad) continue; const std::string src_name = arg.name(); std::string dst_name = is_grad ? GradVarName(src_name) : src_name; dst_inout[dst_name].reserve(src_inout.at(src_name).size()); diff --git a/paddle/framework/grad_op_builder_test.cc b/paddle/framework/grad_op_builder_test.cc index 0c26293fd2..902c2655e9 100644 --- a/paddle/framework/grad_op_builder_test.cc +++ b/paddle/framework/grad_op_builder_test.cc @@ -26,10 +26,10 @@ class IOIgnoredOpMaker : public OpProtoAndCheckerMaker { IOIgnoredOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("In1", "a single input"); - AddInput("In2_mult", "a multiple input").AsDuplicable().AsNoGradient(); + AddInput("In2_mult", "a multiple input").AsDuplicable().NotInGradient(); AddInput("In3_mult", "another multiple input").AsDuplicable(); AddOutput("Out1_mult", "a multiple output").AsDuplicable(); - AddOutput("Out2", "a single output").AsNoGradient(); + AddOutput("Out2", "a single output").NotInGradient(); AddComment("op with inputs and outputs ignored in gradient calculating"); } }; diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 2c8620a7ce..dbe205976c 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -164,11 +164,8 @@ class OpProtoAndCheckerMaker { return *this; } - // TODO(FengJiayi, yuyang18): `AsNoGradient` is a very bad name, because it - // means that input/output is not needed when calculate gradient. It does - // not mean no gradient when backward. It should be changed soon. - VariableBuilder& AsNoGradient() { - var_->set_no_gradient(true); + VariableBuilder& NotInGradient() { + var_->set_not_in_gradient(true); return *this; } }; diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 49d0f43508..d3d0e55a67 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -34,7 +34,7 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker { MeanOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of mean op"); - AddOutput("Out", "The output of mean op").AsNoGradient(); + AddOutput("Out", "The output of mean op").NotInGradient(); AddComment("Mean Operator"); } }; From ac02fb82d7a76fce4a870acc30891d657a83ab9c Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 17 Aug 2017 10:14:05 +0800 Subject: [PATCH 75/92] FIX: tensor memory must be gaven back to buddy allocator for free --- paddle/operators/gather_test.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/paddle/operators/gather_test.cc b/paddle/operators/gather_test.cc index d24d83f299..0ae1e99452 100644 --- a/paddle/operators/gather_test.cc +++ b/paddle/operators/gather_test.cc @@ -45,4 +45,8 @@ TEST(Gather, GatherData) { for (int i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4); for (int i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4); + + delete src; + delete index; + delete output; } From 2f7489fbd565caff0608214808ef682b6f46f984 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 17 Aug 2017 10:14:48 +0800 Subject: [PATCH 76/92] change use_pinned_memory to true for cpu --- paddle/memory/detail/system_allocator.cc | 2 +- paddle/memory/memory.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/memory/detail/system_allocator.cc b/paddle/memory/detail/system_allocator.cc index f61e67a329..a270bd5958 100644 --- a/paddle/memory/detail/system_allocator.cc +++ b/paddle/memory/detail/system_allocator.cc @@ -27,7 +27,7 @@ limitations under the License. */ // between host and device. Allocates too much would reduce the amount // of memory available to the system for paging. So, by default, we // should set false to use_pinned_memory. -DEFINE_bool(use_pinned_memory, false, "If set, allocate cpu pinned memory."); +DEFINE_bool(use_pinned_memory, true, "If set, allocate cpu pinned memory."); namespace paddle { namespace memory { diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 5946c3ea4a..684635405a 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -63,7 +63,7 @@ size_t Used(platform::CPUPlace place) { BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { using BuddyAllocVec = std::vector; static std::unique_ptr as{ - new std::vector, [](BuddyAllocVec* p) { + new BuddyAllocVec, [](BuddyAllocVec* p) { std::for_each(p->begin(), p->end(), [](BuddyAllocator* p) { delete p; }); }}; From 33228cacddc5058feeb5444cf98dd25dc2ca77c0 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 17 Aug 2017 10:21:28 +0800 Subject: [PATCH 77/92] Fix typo error --- paddle/memory/memory.cc | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 684635405a..99c62b50ee 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -27,14 +27,14 @@ namespace memory { using BuddyAllocator = detail::BuddyAllocator; -std::once_flag cpu_alloctor_flag; -std::once_flag gpu_alloctor_flag; +std::once_flag cpu_allocator_flag; +std::once_flag gpu_allocator_flag; BuddyAllocator* GetCPUBuddyAllocator() { static std::unique_ptr a{ nullptr, [](BuddyAllocator* p) { delete p; }}; - std::call_once(cpu_alloctor_flag, [&]() { + std::call_once(cpu_allocator_flag, [&]() { a.reset(new BuddyAllocator(new detail::CPUAllocator, platform::CpuMinChunkSize(), platform::CpuMaxChunkSize())); @@ -68,23 +68,23 @@ BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { [](BuddyAllocator* p) { delete p; }); }}; - // GPU buddy alloctors - auto& alloctors = *as.get(); + // GPU buddy allocators + auto& allocators = *as.get(); // GPU buddy allocator initialization - std::call_once(gpu_alloctor_flag, [&]() { + std::call_once(gpu_allocator_flag, [&]() { int gpu_num = platform::GetDeviceCount(); - alloctors.reserve(gpu_num); + allocators.reserve(gpu_num); for (int gpu = 0; gpu < gpu_num; gpu++) { platform::SetDeviceId(gpu); - alloctors.emplace_back(new BuddyAllocator(new detail::GPUAllocator, - platform::GpuMinChunkSize(), - platform::GpuMaxChunkSize())); + allocators.emplace_back(new BuddyAllocator(new detail::GPUAllocator, + platform::GpuMinChunkSize(), + platform::GpuMaxChunkSize())); } }); platform::SetDeviceId(gpu_id); - return alloctors[gpu_id]; + return allocators[gpu_id]; } template <> From ff5bfc1c89b5b52588a128e90399bdac804a0b44 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 17 Aug 2017 10:23:59 +0800 Subject: [PATCH 78/92] Google style for header file includes --- paddle/memory/memory.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 99c62b50ee..be346325c2 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -13,14 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/memory/memory.h" -#include "paddle/memory/detail/buddy_allocator.h" -#include "paddle/memory/detail/system_allocator.h" #include // for transfrom #include // for memcpy #include // for call_once -#include "glog/logging.h" +#include "paddle/memory/detail/buddy_allocator.h" +#include "paddle/memory/detail/system_allocator.h" namespace paddle { namespace memory { From 3f9fe6248754bcfd85356174725c99a23e763c8a Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 17 Aug 2017 10:24:20 +0800 Subject: [PATCH 79/92] Fix typo error --- paddle/memory/memory.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index be346325c2..dfe9f16f74 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/memory/memory.h" -#include // for transfrom +#include // for transform #include // for memcpy #include // for call_once From d8560ec2e819c5a708caf5e35f791571ea3628aa Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 17 Aug 2017 10:29:50 +0800 Subject: [PATCH 80/92] Fix scatter_test --- paddle/operators/scatter_test.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/paddle/operators/scatter_test.cc b/paddle/operators/scatter_test.cc index 4449ce6564..26fdaff146 100644 --- a/paddle/operators/scatter_test.cc +++ b/paddle/operators/scatter_test.cc @@ -49,4 +49,8 @@ TEST(scatter, ScatterUpdate) { EXPECT_EQ(output->data()[i], float(i - 4)); for (size_t i = 8; i < 16; ++i) EXPECT_EQ(p_output[i], float(0)); for (size_t i = 8; i < 16; ++i) EXPECT_EQ(output->data()[i], float(0)); + + delete src; + delete index; + delete output; } From 0945dc1b9968f92a23bcedbb24bf68aacd194f26 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Thu, 17 Aug 2017 10:31:46 +0800 Subject: [PATCH 81/92] enable header format --- paddle/parameter/Parameter.cpp | 10 ++++++---- paddle/parameter/Parameter.h | 29 +++++++++++++++++++++++++++-- paddle/pserver/ParameterServer2.cpp | 7 ++++--- 3 files changed, 37 insertions(+), 9 deletions(-) diff --git a/paddle/parameter/Parameter.cpp b/paddle/parameter/Parameter.cpp index ebe36d4937..f031109501 100644 --- a/paddle/parameter/Parameter.cpp +++ b/paddle/parameter/Parameter.cpp @@ -48,7 +48,8 @@ Parameter::Parameter(const ParameterConfig& config, bool useGpu, bool doInit) deviceId_(-1), sharedCount_(0), updateCounter_(0), - updated_(false) { + updated_(false), + headerFormat_(PARAM_FORMAT_ORIGINAL) { setID(-1); /* capture uninitialized id */ if (useGpu_ && FLAGS_parallel_nn) { /* gpu environment is specified by device property */ @@ -285,7 +286,7 @@ bool Parameter::save(const std::string& filename) const { bool Parameter::save(std::ostream& s) const { CpuVector vec(*bufs_[PARAMETER_VALUE].get()); Header header; - header.version = kFormatVersion; + header.format = headerFormat_; header.valueSize = sizeof(real); header.size = getSize(); @@ -344,8 +345,9 @@ bool Parameter::load(std::istream& s) { Header header; CHECK(s.read(reinterpret_cast(&header), sizeof(header))) << "Fail to read parameter " << getName(); - CHECK_EQ(header.version, kFormatVersion) << "Incorrect format version: " - << header.version; + CHECK(isHeaderFormatSupported(header.format)) << "Incorrect format version: " + << header.format; + headerFormat_ = header.format; CHECK_EQ(header.size, getSize()) << "The size (" << header.size << ") in the file does not match the size " << "(" << getSize() << ") of the parameter: " << getName(); diff --git a/paddle/parameter/Parameter.h b/paddle/parameter/Parameter.h index 0bac76f068..cffd3aa92e 100644 --- a/paddle/parameter/Parameter.h +++ b/paddle/parameter/Parameter.h @@ -34,6 +34,12 @@ limitations under the License. */ namespace paddle { +typedef enum { + PARAM_FORMAT_ORIGINAL = 0, // the paddle original basic format + PARAM_FORMAT_MKLDNN_OI, // the mkldnn format oi + PARAM_FORMAT_ITEMS, // the total format items numbers +} PARAM_FORMAT; + class SparsePrefetchRowCpuMatrix; class Parameter; @@ -242,14 +248,30 @@ public: /// Initialize the value to 0 void zeroMem(); - static const int kFormatVersion = 0; /// file header structure struct Header { - int32_t version; // = 0, file format version + int32_t format; // = PARAM_FORMAT uint32_t valueSize; // = sizeof(real) uint64_t size; // = getSize() }; + /** + * @brief Is the header supported + */ + static bool isHeaderFormatSupported(int32_t fmt) { + return fmt < PARAM_FORMAT_ITEMS; + } + + /** + * @brief Get the format in header + */ + int getHeaderFormat() { return headerFormat_; } + + /** + * @brief Set the format in header + */ + void setHeaderFormat(int32_t fmt) { headerFormat_ = fmt; } + /** * @brief Parameter Update Hook. * @@ -321,6 +343,9 @@ protected: bool updated_; SparseFormat format_; + // The header format for saving or loading param + int32_t headerFormat_; + std::vector> updaterHooks_; public: diff --git a/paddle/pserver/ParameterServer2.cpp b/paddle/pserver/ParameterServer2.cpp index d7c1d4f788..54f5c4c0fb 100644 --- a/paddle/pserver/ParameterServer2.cpp +++ b/paddle/pserver/ParameterServer2.cpp @@ -1032,8 +1032,8 @@ void ParameterServer2::loadValueVector(const LoadValueRequest& request, Parameter::Header header; CHECK(fs.read(reinterpret_cast(&header), sizeof(header))) << "Fail to read parameters in pserver"; - CHECK_EQ(header.version, Parameter::kFormatVersion) - << "Incorrect format version: " << header.version; + CHECK(Parameter::isHeaderFormatSupported(header.format)) + << "Incorrect format version: " << header.format; CHECK_EQ(header.size, (size_t)size_) << "The size (" << header.size << ") in the file does not match the size " << "(" << size_ << ") of the pserver: " << serverId_; @@ -1063,7 +1063,8 @@ void ParameterServer2::saveValueVector(const SaveValueRequest& request, CpuVector& vec = vectors_[PARAMETER_APPLY] ? *vectors_[PARAMETER_APPLY] : *vectors_[PARAMETER_VALUE]; Parameter::Header header; - header.version = Parameter::kFormatVersion; + // TODO(TJ): save param headerFormat_ + header.format = PARAM_FORMAT_ORIGINAL; header.valueSize = sizeof(real); header.size = size_; From 4b148d0afd9bdf255c0e69b406577e83ae156388 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 17 Aug 2017 10:59:10 +0800 Subject: [PATCH 82/92] Fix typo --- paddle/framework/operator.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 90e30bee0a..6448170652 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -119,7 +119,7 @@ class OperatorBase { protected: std::string type_; // NOTE: in case of OpGrad, inputs_ contains: - // I (Inputs)opear + // I (Inputs) // O (Outputs) // OG (Output Gradients) VarNameMap inputs_; From 225579b9d9ab28de046805f40301d68d9dd3b5cb Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 17 Aug 2017 11:10:32 +0800 Subject: [PATCH 83/92] Remove own for add_op * add_op could take a unique_ptr or a const reference. If unique_ptr is taken, the NetOp will take care of that operator's life cycle. If a const reference is taken, that op will be Cloned. --- paddle/operators/net_op.h | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h index ce7da1f383..e8720c9609 100644 --- a/paddle/operators/net_op.h +++ b/paddle/operators/net_op.h @@ -89,33 +89,18 @@ class NetOp : public framework::OperatorBase { /** * @brief Add an operator by ptr */ - void AddOp(framework::OperatorBase* op, bool own) { + void AddOp(std::unique_ptr&& op) { PADDLE_ENFORCE(!add_op_done_, "Cannot AddOp when this network is sealed"); PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); - if (!own) { - op = op->Clone().release(); - } - ops_.emplace_back(op); - } - - void AddOp(std::unique_ptr&& op) { - AddOp(op.release(), true); + ops_.push_back(std::move(op)); } - void InsertOp(size_t pos, framework::OperatorBase* op, bool own) { + void InsertOp(size_t pos, std::unique_ptr&& op) { PADDLE_ENFORCE(!add_op_done_, "Cannot InsertOp when this network is sealed"); PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); PADDLE_ENFORCE_LE(pos, ops_.size(), "Out of range"); - if (!own) { - op = op->Clone().release(); - } - ops_.insert(ops_.begin() + pos, - std::unique_ptr(op)); - } - - void InsertOp(size_t pos, std::unique_ptr&& op) { - InsertOp(pos, op.release(), true); + ops_.insert(ops_.begin() + pos, std::move(op)); } void InsertOp(size_t pos, const framework::OperatorBase& op) { From a28a5564d26e9aeac48cb41f2f2bd40fcd73946a Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 17 Aug 2017 11:55:48 +0800 Subject: [PATCH 84/92] add more comments and fix code style. --- .../v2/framework/tests/gradient_checker.py | 64 +++++++++++++++---- 1 file changed, 51 insertions(+), 13 deletions(-) diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py index d251f14b9d..2c92dfa43e 100644 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ b/python/paddle/v2/framework/tests/gradient_checker.py @@ -110,7 +110,24 @@ def get_numeric_gradient(op, class GradientChecker(unittest.TestCase): - def get_grad(self, forward_op, backward_op, input_vars, grad_names, place): + def __get_gradient(self, forward_op, backward_op, input_value, grad_names, + place): + """Get the input gradients after running forward and backward operators + on the given places. + + :param forward_op: forward operator + :type forward_op: Operator + :param backward_op: backward operator + :type backward_op: Operator + :param input_value: input values. + :type input_value: dict{string:numpy.array} + :param grad_names: the names of returned input gradients. + :type input_value: a list of string + :param place: the device type. + :type place: CPUPlace or GPUPlace + :return: the input grdients of given grad_names. + :rtype: a list of numpy.array + """ scope = core.Scope() ctx = core.DeviceContext.create(place) @@ -120,7 +137,7 @@ class GradientChecker(unittest.TestCase): out_names = [item for k in outputs for item in outputs[k]] # create input var and set value - for name, value in input_vars.iteritems(): + for name, value in input_value.iteritems(): if name not in in_names: raise ValueError(name + "does not exist in Op's inputs.") var = scope.new_var(name).get_tensor() @@ -154,7 +171,16 @@ class GradientChecker(unittest.TestCase): ] return outs - def compare_grad(self, forward_op, inputs): + def compare_grad(self, forward_op, input_value): + """ Compare the input gradients between CPU and GPU for the given forward + operator. + + :param forward_op: forward operator + :type forward_op: Operator + :param input_value: input values. + :type input_value: dict{string:numpy.array} + :raises: AssertionError, there is different gradient value. + """ backward_op = core.Operator.backward(forward_op, set()) # return if not compile with GPU or not implementing GPU kernel if not (core.is_compile_gpu() and backward_op.support_gpu()): @@ -162,19 +188,31 @@ class GradientChecker(unittest.TestCase): outputs = backward_op.outputs() out_names = [item for k in outputs for item in outputs[k]] - cpu_grads = self.get_grad(forward_op, backward_op, inputs, out_names, - core.CPUPlace()) - gpu_grads = self.get_grad(forward_op, backward_op, inputs, out_names, - core.GPUPlace(0)) + cpu_grads = self.get_grad(forward_op, backward_op, input_value, + out_names, core.CPUPlace()) + gpu_grads = self.get_grad(forward_op, backward_op, input_value, + out_names, core.GPUPlace(0)) for c_grad, g_grad, name in itertools.izip(cpu_grads, gpu_grads, out_names): self.assertTrue( - numpy.allclose(c_grad, g_grad), + numpy.allclose( + c_grad, g_grad, atol=1e-4), "output name: " + name + " has diff") - def assert_is_close(self, numeric_grads, analytic_grads, names, - max_relative_error, msg_prefix): + def __assert_is_close(self, numeric_grads, analytic_grads, names, + max_relative_error, msg_prefix): + """Use relative error for the comparison. + + :param numeric_grads: the numerical graidents. + :type numeric_grads: a list of numpy.array + :param analytic_grads: the analytical graidents. + :type analytic_grads: a list of numpy.array + :param name: the names of gradients, used to print for debug. + :type names: a list of string + :param msg_prefix: string info, used to print for debug. + :type msf_prefix: string + """ for a, b, name in itertools.izip(numeric_grads, analytic_grads, names): abs_a = numpy.abs(a) # if abs_a is nearly zero, then use abs error for a, not relative @@ -241,6 +279,6 @@ class GradientChecker(unittest.TestCase): # get analytical gradients according to different device analytic_grads = self.get_grad(forward_op, backward_op, input_vars, check_names, place) - self.assert_is_close(numeric_grads, analytic_grads, check_names, - max_relative_error, - "Gradient Check On %s" % str(place)) + self.__assert_is_close(numeric_grads, analytic_grads, check_names, + max_relative_error, + "Gradient Check On %s" % str(place)) From e08651f9b5a27db3ff3992ecdcd8bd5cb0cf12e2 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Thu, 17 Aug 2017 13:57:23 +0800 Subject: [PATCH 85/92] remove flag use_mkldnn_wgt --- paddle/gserver/layers/MKLDNNFcLayer.cpp | 8 ++++++-- paddle/gserver/tests/MKLDNNTester.cpp | 27 ++++++++++++++++++------- paddle/gserver/tests/MKLDNNTester.h | 2 +- paddle/trainer/TrainerConfigHelper.cpp | 2 -- paddle/utils/Flags.cpp | 1 - paddle/utils/Flags.h | 1 - 6 files changed, 27 insertions(+), 14 deletions(-) diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/gserver/layers/MKLDNNFcLayer.cpp index 30f567eaf8..d201fac65e 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/gserver/layers/MKLDNNFcLayer.cpp @@ -57,11 +57,14 @@ bool MKLDNNFcLayer::init(const LayerMap& layerMap, } void MKLDNNFcLayer::convertWeightsFromPaddle() { - if (FLAGS_use_mkldnn_wgt) { + if (hasInitedWgt_) { return; } - if (hasInitedWgt_) { + // TODO(TJ): dst format should get from wgtVal_ + int dstFmt = PARAM_FORMAT_MKLDNN_OI; + int srcFmt = weight_->getParameterPtr()->getHeaderFormat(); + if (srcFmt == dstFmt) { return; } @@ -78,6 +81,7 @@ void MKLDNNFcLayer::convertWeightsFromPaddle() { MatrixPtr paddleWgtT; paddleWgt->transpose(paddleWgtT, true); weight_->getW()->copyFrom(*paddleWgtT); + weight_->getParameterPtr()->setHeaderFormat(dstFmt); hasInitedWgt_ = true; } diff --git a/paddle/gserver/tests/MKLDNNTester.cpp b/paddle/gserver/tests/MKLDNNTester.cpp index 99c8c4948c..d20215571d 100644 --- a/paddle/gserver/tests/MKLDNNTester.cpp +++ b/paddle/gserver/tests/MKLDNNTester.cpp @@ -330,9 +330,7 @@ void MKLDNNTester::run(const TestConfig& dnn, log_ = log; lvl_ = level; - // Firstly test FLAGS_use_mkldnn_wgt = false - FLAGS_use_mkldnn_wgt = false; - // reset and run once + // Firstly test mkldnn init from PARAM_FORMAT_ORIGINAL weight reset(dnn, ref, batchSize); randomWgtDatas(); clearWgtDiffs(); @@ -342,17 +340,32 @@ void MKLDNNTester::run(const TestConfig& dnn, runOnce(); } - // Then test FLAGS_use_mkldnn_wgt = true - FLAGS_use_mkldnn_wgt = true; - // after run once the mkldnn weight has been stored in dnnlayer + if (parameters_[DNN].empty()) { + // has no paramters + return; + } + + // After run some iters, the mkldnn weight has been stored in dnnLayer + // and we can also get the mkldnn weight paramter header format + // Weight param should always be index 0 (and bias index 1). + // TODO(TJ): should also considerate mean and var format when batchnorm ready + int dnnWgtFmt = parameters_[DNN][0]->getHeaderFormat(); + int refWgtFmt = parameters_[REF][0]->getHeaderFormat(); + if (dnnWgtFmt == refWgtFmt) { + // weight format are equal, so no need check more + return; + } + // then save the weights and restart again vector dnnWgts, refWgts; CHECK_EQ(parameters_[DNN].size(), parameters_[REF].size()); saveWgt(parameters_[DNN], dnnWgts); saveWgt(parameters_[REF], refWgts); - // restart again with flag true + // restart again with dnn weight format reset(dnn, ref, batchSize); + // TODO(TJ): should also considerate mean and var format when batchnorm ready + parameters_[DNN][0]->setHeaderFormat(dnnWgtFmt); // restore wgt restoreWgt(dnnWgts, parameters_[DNN]); diff --git a/paddle/gserver/tests/MKLDNNTester.h b/paddle/gserver/tests/MKLDNNTester.h index 522eeaf24b..e55e4493ff 100644 --- a/paddle/gserver/tests/MKLDNNTester.h +++ b/paddle/gserver/tests/MKLDNNTester.h @@ -108,7 +108,7 @@ private: * if many(>failRate) wrong(abs(dnn-ref)/abs(ref)>thres) points return the * max(diff/ref) * else return sum(abs(a-b)) / sum(abs(b)) - * The return value should smaller than eps when passing. + * The return value should be smaller than eps when passing. */ double getDelta(const real* d1, const real* d2, diff --git a/paddle/trainer/TrainerConfigHelper.cpp b/paddle/trainer/TrainerConfigHelper.cpp index eba40862b9..a0a365aa0b 100644 --- a/paddle/trainer/TrainerConfigHelper.cpp +++ b/paddle/trainer/TrainerConfigHelper.cpp @@ -29,7 +29,6 @@ DECLARE_bool(with_gpu); DECLARE_bool(parallel_nn); DECLARE_string(config_args); DECLARE_bool(use_mkldnn); -DECLARE_bool(use_mkldnn_wgt); const char *kConfigParserModuleName = "paddle.trainer.config_parser"; const char *kConfigParserFuncName = "parse_config_and_serialize"; @@ -47,7 +46,6 @@ TrainerConfigHelper::TrainerConfigHelper(const std::string &configFilePath) << ",with_cost=" << FLAGS_with_cost << ",use_gpu=" << FLAGS_use_gpu << ",parallel_nn=" << FLAGS_parallel_nn << ",use_mkldnn=" << FLAGS_use_mkldnn - << ",use_mkldnn_wgt=" << FLAGS_use_mkldnn_wgt << ",cudnn_version=" << hl_get_cudnn_lib_version(); if (!FLAGS_config_args.empty()) { configArgs << "," << FLAGS_config_args; diff --git a/paddle/utils/Flags.cpp b/paddle/utils/Flags.cpp index 600c83a848..ab1c181c62 100644 --- a/paddle/utils/Flags.cpp +++ b/paddle/utils/Flags.cpp @@ -27,7 +27,6 @@ DEFINE_bool(use_mkldnn, false, "Default still keep use CPU training"); DEFINE_bool(use_mkldnn, false, "Only support CPU training"); #endif -DEFINE_bool(use_mkldnn_wgt, false, "Init weight from CPU weight"); DEFINE_bool(parallel_nn, false, "Whether to use multi-threads to calculate one neural network." diff --git a/paddle/utils/Flags.h b/paddle/utils/Flags.h index 0aca4c0ee0..1832bb515e 100644 --- a/paddle/utils/Flags.h +++ b/paddle/utils/Flags.h @@ -41,4 +41,3 @@ DECLARE_string(predict_file); DECLARE_bool(prev_batch_state); DECLARE_string(init_model_path); DECLARE_bool(use_mkldnn); -DECLARE_bool(use_mkldnn_wgt); From 47f380bb4786f93aa95da809a8d7f18d862b78ca Mon Sep 17 00:00:00 2001 From: Yancey Date: Thu, 17 Aug 2017 14:16:04 +0800 Subject: [PATCH 86/92] fix ldconfig (#3547) --- paddle/scripts/docker/build.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 7c12664aed..2941662f34 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -146,7 +146,8 @@ RUN apt-get update &&\ pip install /*.whl; apt-get install -f -y && \ apt-get clean -y && \ rm -f /*.whl && \ - paddle version + paddle version && \ + ldconfig ${DOCKERFILE_CUDNN_DSO} ${DOCKERFILE_GPU_ENV} ADD go/cmd/pserver/pserver /usr/bin/ From 5181aefc6bf6d1af1a769879f8cddc9ae9bc2a20 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Thu, 17 Aug 2017 14:18:51 +0800 Subject: [PATCH 87/92] tune max relative error for sigmoid op unit test. --- paddle/operators/sigmoid_op.h | 2 +- python/paddle/v2/framework/tests/gradient_checker.py | 12 ++++++------ python/paddle/v2/framework/tests/test_sigmoid_op.py | 8 ++++---- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/paddle/operators/sigmoid_op.h b/paddle/operators/sigmoid_op.h index 11ab923eb3..b01a9b3f23 100644 --- a/paddle/operators/sigmoid_op.h +++ b/paddle/operators/sigmoid_op.h @@ -37,7 +37,7 @@ class SigmoidKernel : public framework::OpKernel { auto Y = EigenVector::Flatten(*output); auto place = context.GetEigenDevice(); - Y.device(place) = 1.0 / (1.0 + (-1.0 * X).exp()); + Y.device(place) = 1. / (1. + (-X).exp()); } }; diff --git a/python/paddle/v2/framework/tests/gradient_checker.py b/python/paddle/v2/framework/tests/gradient_checker.py index 2c92dfa43e..12f302fe25 100644 --- a/python/paddle/v2/framework/tests/gradient_checker.py +++ b/python/paddle/v2/framework/tests/gradient_checker.py @@ -188,10 +188,10 @@ class GradientChecker(unittest.TestCase): outputs = backward_op.outputs() out_names = [item for k in outputs for item in outputs[k]] - cpu_grads = self.get_grad(forward_op, backward_op, input_value, - out_names, core.CPUPlace()) - gpu_grads = self.get_grad(forward_op, backward_op, input_value, - out_names, core.GPUPlace(0)) + cpu_grads = self.__get_gradient(forward_op, backward_op, input_value, + out_names, core.CPUPlace()) + gpu_grads = self.__get_gradient(forward_op, backward_op, input_value, + out_names, core.GPUPlace(0)) for c_grad, g_grad, name in itertools.izip(cpu_grads, gpu_grads, out_names): @@ -277,8 +277,8 @@ class GradientChecker(unittest.TestCase): check_names = [grad_var_name(name) for name in inputs_to_check] for place in places: # get analytical gradients according to different device - analytic_grads = self.get_grad(forward_op, backward_op, input_vars, - check_names, place) + analytic_grads = self.__get_gradient(forward_op, backward_op, + input_vars, check_names, place) self.__assert_is_close(numeric_grads, analytic_grads, check_names, max_relative_error, "Gradient Check On %s" % str(place)) diff --git a/python/paddle/v2/framework/tests/test_sigmoid_op.py b/python/paddle/v2/framework/tests/test_sigmoid_op.py index c3bd79f5dc..273c2e5ab1 100644 --- a/python/paddle/v2/framework/tests/test_sigmoid_op.py +++ b/python/paddle/v2/framework/tests/test_sigmoid_op.py @@ -14,14 +14,14 @@ class TestSigmoidOp(unittest.TestCase): class TestSigmoidGradOp(GradientChecker): - def test_compare_grad(self): + def test_grad(self): op = create_op("sigmoid") - inputs = {"X": np.random.random((11, 17)).astype("float32")} + inputs = {"X": np.random.uniform(0.1, 1, [11, 17]).astype("float32")} # compare gpu and cpu results for backward op. - # skip this test if only compiling CPU version. + # this test will be skiped if only compiling CPU version. self.compare_grad(op, inputs) # check gradients - self.check_grad(op, inputs, set("X"), "Y") + self.check_grad(op, inputs, set("X"), "Y", max_relative_error=0.007) if __name__ == '__main__': From 1365f2d15e6f1e02592a5cf5b5f5d07a0eb7f99c Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 17 Aug 2017 14:37:03 +0800 Subject: [PATCH 88/92] Remove R-Value reference in AddOp Fit Google C++ Style --- paddle/operators/net_op.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/net_op.h b/paddle/operators/net_op.h index e8720c9609..885ac6eeca 100644 --- a/paddle/operators/net_op.h +++ b/paddle/operators/net_op.h @@ -89,13 +89,13 @@ class NetOp : public framework::OperatorBase { /** * @brief Add an operator by ptr */ - void AddOp(std::unique_ptr&& op) { + void AddOp(std::unique_ptr op) { PADDLE_ENFORCE(!add_op_done_, "Cannot AddOp when this network is sealed"); PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); ops_.push_back(std::move(op)); } - void InsertOp(size_t pos, std::unique_ptr&& op) { + void InsertOp(size_t pos, std::unique_ptr op) { PADDLE_ENFORCE(!add_op_done_, "Cannot InsertOp when this network is sealed"); PADDLE_ENFORCE_NOT_NULL(op, "Cannot Insert Null op"); From 94b58a29d6613f528076269d1332ad9d2f43ec67 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 17 Aug 2017 14:58:21 +0800 Subject: [PATCH 89/92] Follow comments --- paddle/memory/memory.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index dfe9f16f74..c99cc54156 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -30,8 +30,7 @@ std::once_flag cpu_allocator_flag; std::once_flag gpu_allocator_flag; BuddyAllocator* GetCPUBuddyAllocator() { - static std::unique_ptr a{ - nullptr, [](BuddyAllocator* p) { delete p; }}; + static std::unique_ptr a{nullptr}; std::call_once(cpu_allocator_flag, [&]() { a.reset(new BuddyAllocator(new detail::CPUAllocator, From 017a3818dee89ec1cd2b73b31ced9f6c51a12c8e Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 17 Aug 2017 16:38:15 +0800 Subject: [PATCH 90/92] Add memory.h for unique_ptr --- paddle/memory/memory.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index c99cc54156..0266bf4f7d 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -16,6 +16,7 @@ limitations under the License. */ #include // for transform #include // for memcpy +#include // for unique_ptr #include // for call_once #include "paddle/memory/detail/buddy_allocator.h" From 7f8c3f82145dd02cf7d136f27de42a6f0a56024b Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 17 Aug 2017 18:02:20 +0800 Subject: [PATCH 91/92] Add MeanOp's Gradient Test And Fix Mean Op Gradient --- paddle/operators/mean_op.h | 3 ++- python/paddle/v2/framework/tests/test_mean_op.py | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/paddle/operators/mean_op.h b/paddle/operators/mean_op.h index fcb703e63b..9848af280b 100644 --- a/paddle/operators/mean_op.h +++ b/paddle/operators/mean_op.h @@ -55,9 +55,10 @@ class MeanGradKernel : public framework::OpKernel { IG->mutable_data(context.GetPlace()); T ig_size = (T)framework::product(IG->dims()); + Eigen::DSizes bcast(ig_size); EigenVector::Flatten(*IG).device(context.GetEigenDevice()) = - EigenScalar::From(*OG) / ig_size; + (EigenVector::From(*OG) / ig_size).broadcast(bcast); } }; diff --git a/python/paddle/v2/framework/tests/test_mean_op.py b/python/paddle/v2/framework/tests/test_mean_op.py index b5d52b9056..f32b3160d6 100644 --- a/python/paddle/v2/framework/tests/test_mean_op.py +++ b/python/paddle/v2/framework/tests/test_mean_op.py @@ -1,5 +1,6 @@ import unittest from op_test_util import OpTestMeta +from gradient_checker import GradientChecker, create_op import numpy as np @@ -12,5 +13,12 @@ class TestMeanOp(unittest.TestCase): self.outputs = {'Out': np.mean(self.inputs['X'])} +class MeanGradOpTest(GradientChecker): + def test_normal(self): + op = create_op("mean") + inputs = {"X": np.random.random((10, 10)).astype("float32")} + self.check_grad(op, inputs, set("X"), "Out") + + if __name__ == '__main__': unittest.main() From 07d16e3e135826407f76b860faccd9c3babf40c4 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Thu, 17 Aug 2017 19:21:29 +0800 Subject: [PATCH 92/92] refine comments --- paddle/gserver/tests/MKLDNNTester.cpp | 8 ++++---- paddle/parameter/Parameter.h | 22 +++++++++++++++------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/paddle/gserver/tests/MKLDNNTester.cpp b/paddle/gserver/tests/MKLDNNTester.cpp index d20215571d..de1635be2a 100644 --- a/paddle/gserver/tests/MKLDNNTester.cpp +++ b/paddle/gserver/tests/MKLDNNTester.cpp @@ -345,10 +345,10 @@ void MKLDNNTester::run(const TestConfig& dnn, return; } - // After run some iters, the mkldnn weight has been stored in dnnLayer - // and we can also get the mkldnn weight paramter header format - // Weight param should always be index 0 (and bias index 1). - // TODO(TJ): should also considerate mean and var format when batchnorm ready + // After run some iterations, the mkldnn weight has been stored in dnnLayer + // and we can also get the mkldnn weight parameter header format. + // Weight parameter should always be index 0 (and bias index 1). + // TODO(TJ): should also consider mean and var format when batchnorm ready int dnnWgtFmt = parameters_[DNN][0]->getHeaderFormat(); int refWgtFmt = parameters_[REF][0]->getHeaderFormat(); if (dnnWgtFmt == refWgtFmt) { diff --git a/paddle/parameter/Parameter.h b/paddle/parameter/Parameter.h index cffd3aa92e..e31cbc3dee 100644 --- a/paddle/parameter/Parameter.h +++ b/paddle/parameter/Parameter.h @@ -35,9 +35,17 @@ limitations under the License. */ namespace paddle { typedef enum { - PARAM_FORMAT_ORIGINAL = 0, // the paddle original basic format - PARAM_FORMAT_MKLDNN_OI, // the mkldnn format oi - PARAM_FORMAT_ITEMS, // the total format items numbers + /// The paddle original basic format + PARAM_FORMAT_ORIGINAL = 0, + + /// See mkldnn_memory_format_t in + /// https://github.com/01org/mkl-dnn/blob/master/include/mkldnn_types.h + /// for a detailed description. + /// 2D weights tensor in the format (output channels, input channels). + PARAM_FORMAT_MKLDNN_OI, + + /// The total format items numbers + PARAM_FORMAT_ITEMS, } PARAM_FORMAT; class SparsePrefetchRowCpuMatrix; @@ -256,19 +264,19 @@ public: }; /** - * @brief Is the header supported + * @brief Is the header format supported. */ static bool isHeaderFormatSupported(int32_t fmt) { return fmt < PARAM_FORMAT_ITEMS; } /** - * @brief Get the format in header + * @brief Get the format in header. */ int getHeaderFormat() { return headerFormat_; } /** - * @brief Set the format in header + * @brief Set the format in header. */ void setHeaderFormat(int32_t fmt) { headerFormat_ = fmt; } @@ -343,7 +351,7 @@ protected: bool updated_; SparseFormat format_; - // The header format for saving or loading param + /// The header format for saving or loading param int32_t headerFormat_; std::vector> updaterHooks_;