From b6dc3a59f1712569ad8a19dd63bb536af8c56f57 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 28 Jun 2018 12:27:25 +0800 Subject: [PATCH 01/34] Add DataBalanceOpHandle to MultiDeviceSSAGragh --- paddle/fluid/framework/details/CMakeLists.txt | 3 +- .../details/data_balance_op_handle.cc | 138 ++++++++++++++++++ .../details/data_balance_op_handle.h | 50 +++++++ .../details/multi_devices_graph_builder.cc | 27 +++- .../details/multi_devices_graph_builder.h | 3 + paddle/fluid/framework/lod_tensor.cc | 3 +- paddle/fluid/operators/read_op.cc | 10 +- 7 files changed, 229 insertions(+), 5 deletions(-) create mode 100644 paddle/fluid/framework/details/data_balance_op_handle.cc create mode 100644 paddle/fluid/framework/details/data_balance_op_handle.h diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index 3c73b6cc55..4fb4ec38ee 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -25,11 +25,12 @@ else() cc_library(broadcast_op_handle SRCS broadcast_op_handle.cc DEPS op_handle_base scope ddim memory variable_visitor) endif() +cc_library(data_balance_op_handle SRCS data_balance_op_handle.cc DEPS op_handle_base scope lod_tensor) cc_library(gather_op_handle SRCS gather_op_handle.cc DEPS op_handle_base scope ddim memory variable_visitor) cc_library(fuse_vars_op_handle SRCS fuse_vars_op_handle.cc DEPS op_handle_base scope) cc_library(multi_devices_graph_builder SRCS multi_devices_graph_builder.cc DEPS ssa_graph_builder computation_op_handle - scale_loss_grad_op_handle rpc_op_handle all_reduce_op_handle reduce_op_handle broadcast_op_handle) + scale_loss_grad_op_handle rpc_op_handle all_reduce_op_handle reduce_op_handle broadcast_op_handle data_balance_op_handle) cc_library(ssa_graph_builder_factory SRCS ssa_graph_builder_factory.cc DEPS multi_devices_graph_builder ssa_graph_printer ssa_graph_checker) diff --git a/paddle/fluid/framework/details/data_balance_op_handle.cc b/paddle/fluid/framework/details/data_balance_op_handle.cc new file mode 100644 index 0000000000..786d95acb1 --- /dev/null +++ b/paddle/fluid/framework/details/data_balance_op_handle.cc @@ -0,0 +1,138 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/data_balance_op_handle.h" +#include +#include "paddle/fluid/framework/details/container_cast.h" + +namespace paddle { +namespace framework { +namespace details { + +DataBalanceOpHandle::DataBalanceOpHandle( + const std::vector &local_scopes, + const std::vector &places) + : local_scopes_(local_scopes), places_(places) {} + +std::string DataBalanceOpHandle::Name() const { return "data balance"; } + +std::vector> DataBalanceOpHandle::GetBalancePlan( + const std::vector &device_sizes) { + int device_num = device_sizes.size(); + int total_size = 0; + int empty_num = 0; + std::vector> size_device_vec; + size_device_vec.reserve(device_num); + for (int i = 0; i < device_num; ++i) { + if (device_sizes[i] == 0) { + ++empty_num; + } + total_size += device_sizes[i]; + size_device_vec.push_back({{device_sizes[i], i}}); + } + std::vector> res; + if (empty_num == 0) { + // No need to do data balance. + return res; + } + if (total_size < device_num) { + // No enough data. + PADDLE_THROW("There is no next data."); + } + std::sort(size_device_vec.begin(), size_device_vec.end(), + [](const std::array &a, const std::array &b) { + return a[0] > b[0]; + }); + int expected_device_size = total_size / device_num; + int src_idx = 0; + for (int dst_idx = device_num - empty_num; dst_idx < device_num; ++dst_idx) { + if (size_device_vec[src_idx][0] <= expected_device_size) { + ++src_idx; + PADDLE_ENFORCE_LT(src_idx, device_num - empty_num); + } + size_device_vec[src_idx][0] -= expected_device_size; + size_device_vec[dst_idx][0] += expected_device_size; + res.push_back({{size_device_vec[src_idx][1], size_device_vec[dst_idx][1], + expected_device_size}}); + } + return res; +} + +void DataBalanceOpHandle::RunImpl() { + if (places_.size() == 1) { + return; + } + auto in_var_handles = DynamicCast(inputs_); + auto out_var_handles = DynamicCast(outputs_); + PADDLE_ENFORCE(in_var_handles.size() % places_.size() == 0); + PADDLE_ENFORCE_EQ( + in_var_handles.size(), out_var_handles.size(), + "The NoDummyInputSize and NoDummyOutputSize should be equal."); + int data_num = in_var_handles.size() / places_.size(); + WaitInputVarGenerated(); + + std::vector> lod_tensors; + std::vector device_sizes; + for (int i = 0; i < static_cast(in_var_handles.size()); ++i) { + PADDLE_ENFORCE_EQ(in_var_handles[i]->name_, out_var_handles[i]->name_, + "The name of input and output should be equal."); + int place_idx = i / data_num; + int data_idx = i % data_num; + auto *local_scope = + local_scopes_[place_idx]->FindVar(kLocalExecScopeName)->Get(); + auto *tensor_var = local_scope->FindVar(in_var_handles[i]->name_); + PADDLE_ENFORCE(tensor_var->IsType()); + auto *tensor = tensor_var->GetMutable(); + PADDLE_ENFORCE(places_[place_idx] == tensor->place()); + lod_tensors[data_idx].push_back(tensor); + int ins_size = + tensor->lod().empty() ? tensor->dims()[0] : tensor->NumElements(); + if (data_idx == 0) { + device_sizes.emplace_back(ins_size); + } else { + PADDLE_ENFORCE_EQ(ins_size, device_sizes.at(place_idx)); + } + } + const auto &balance_plan = GetBalancePlan(device_sizes); + + for (const auto &trans : balance_plan) { + for (int data_idx = 0; data_idx < data_num; ++data_idx) { + LoDTensor *src_tensor = lod_tensors[data_idx][trans[0]]; + LoDTensor *dst_tensor = lod_tensors[data_idx][trans[1]]; + int trans_ins_size = trans[2]; + LoD src_lod = src_tensor->lod(); + int src_ins_size = + src_lod.empty() ? src_tensor->dims()[0] : src_tensor->NumElements(); + int cut_point = src_ins_size - trans_ins_size; + if (!src_lod.empty()) { + for (auto &level : src_lod) { + cut_point = level[cut_point]; + } + } + TensorCopySync(src_tensor->Slice(cut_point, src_tensor->dims()[0]), + dst_tensor->place(), dst_tensor); + src_tensor->ShareDataWith(src_tensor->Slice(0, cut_point)); + if (!src_lod.empty()) { + dst_tensor->set_lod(SliceInLevel( + src_lod, 0, src_ins_size - trans_ins_size, src_ins_size)); + src_tensor->set_lod( + SliceInLevel(src_lod, 0, 0, src_ins_size - trans_ins_size)); + } + } + } +} + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/data_balance_op_handle.h b/paddle/fluid/framework/details/data_balance_op_handle.h new file mode 100644 index 0000000000..00bc4837d6 --- /dev/null +++ b/paddle/fluid/framework/details/data_balance_op_handle.h @@ -0,0 +1,50 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" + +namespace paddle { +namespace framework { +namespace details { + +struct DataBalanceOpHandle : public OpHandleBase { + public: + DataBalanceOpHandle(const std::vector &local_scopes, + const std::vector &places); + + std::string Name() const override; + + bool IsMultiDeviceTransfer() override { return false; }; + + protected: + void RunImpl() override; + + private: + // std::vector<(src_dev_id, dst_dev_id, trans_size)> + std::vector> GetBalancePlan( + const std::vector &batch_size_per_device); + + const std::vector &local_scopes_; + const std::vector &places_; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index e7063fb042..357f6ff5dc 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -20,6 +20,7 @@ #include "paddle/fluid/framework/details/all_reduce_op_handle.h" #include "paddle/fluid/framework/details/broadcast_op_handle.h" #include "paddle/fluid/framework/details/computation_op_handle.h" +#include "paddle/fluid/framework/details/data_balance_op_handle.h" #include "paddle/fluid/framework/details/multi_devices_graph_builder.h" #include "paddle/fluid/framework/details/reduce_op_handle.h" #include "paddle/fluid/framework/details/rpc_op_handle.h" @@ -217,6 +218,11 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( // gradients. CreateComputationalOps(&result, *op, places_.size()); + if (op->Type() == "read") { + const auto &data_var_names = op->Output("Out"); + InsertDataBalanceOp(&result, data_var_names); + } + if (!is_forwarding && places_.size() > 1) { // Currently, we assume that once gradient is generated, it can be // broadcast, and each gradient is only broadcast once. @@ -360,6 +366,24 @@ void MultiDevSSAGraphBuilder::InsertAllReduceOp(SSAGraph *result, } } +void MultiDevSSAGraphBuilder::InsertDataBalanceOp( + SSAGraph *result, const std::vector &datas) const { + result->ops_.emplace_back(new DataBalanceOpHandle(local_scopes_, places_)); + auto *op_handle = result->ops_.back().get(); + for (size_t i = 0; i < places_.size(); ++i) { + auto &p = places_[i]; + SetCommunicationContext(op_handle, p); + for (const std::string &d_name : datas) { + auto &vars = result->vars_[i][d_name]; + PADDLE_ENFORCE(!vars.empty()); + op_handle->AddInput(vars.back().get()); + auto var = new VarHandle(vars.size(), i, d_name, p); + vars.emplace_back(var); + op_handle->AddOutput(var); + } + } +} + bool MultiDevSSAGraphBuilder::IsParameterGradientOnce( const std::string &og, std::unordered_set *og_has_been_broadcast) const { @@ -509,7 +533,8 @@ void MultiDevSSAGraphBuilder::CreateRPCOp(SSAGraph *result, op_dev_id = GetVarDeviceID(op.InputArgumentNames()[0]); // the variable name which contains .block means it was splited by // split_byref op - // so that we can balance the variable blocks to all the pserver instances. + // so that we can balance the variable blocks to all the pserver + // instances. if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce && op.InputArgumentNames()[0].find(".block") == std::string::npos) { op_dev_id = GetAppropriateDeviceID(op.InputArgumentNames()); diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.h b/paddle/fluid/framework/details/multi_devices_graph_builder.h index 0b6347bf51..a964e02488 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.h +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.h @@ -101,6 +101,9 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { void InsertAllReduceOp(SSAGraph *result, const std::string &og) const; + void InsertDataBalanceOp(SSAGraph *result, + const std::vector &datas) const; + void CreateBroadcastOp(SSAGraph *result, const std::string &p_name, size_t src_dev_id) const; diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index d29d8ce1c5..49672e1181 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -68,7 +68,7 @@ std::ostream &operator<<(std::ostream &os, const LoDTensor &t) { // only print first ten elements int64_t size = t.numel() < 10 ? t.numel() : 10; for (int64_t i = 0; i < size; ++i) { - if (t.type().hash_code() == typeid(float).hash_code()) { + if (t.type().hash_code() == typeid(float).hash_code()) { // NOLINT os << t.data()[i] << " "; } else if (t.type().hash_code() == typeid(int64_t).hash_code()) { os << t.data()[i] << " "; @@ -89,6 +89,7 @@ std::string LoDToString(const LoD &lod) { LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin, size_t elem_end) { PADDLE_ENFORCE_LT(level, in.size()); + PADDLE_ENFORCE_LT(elem_begin, elem_end); PADDLE_ENFORCE_LT(elem_end, in[level].size()); LoD res; diff --git a/paddle/fluid/operators/read_op.cc b/paddle/fluid/operators/read_op.cc index 72a27d4358..8e9f91c185 100644 --- a/paddle/fluid/operators/read_op.cc +++ b/paddle/fluid/operators/read_op.cc @@ -66,9 +66,15 @@ class ReadOp : public framework::OperatorBase { std::vector out_arg_names = Outputs("Out"); std::vector ins; reader->ReadNext(&ins); - PADDLE_ENFORCE(!ins.empty(), "There is no next data."); + if (ins.empty()) { + ins.resize(out_arg_names.size()); + for (auto& tensor : ins) { + // data type is not important for subsequent DataBalanceOpHandle + tensor.mutable_data(framework::make_ddim({0}), dev_place); + } + } PADDLE_ENFORCE_EQ(ins.size(), out_arg_names.size()); - for (size_t i = 0; i < ins.size(); ++i) { + for (size_t i = 0; i < out_arg_names.size(); ++i) { auto* out = scope.FindVar(out_arg_names[i])->GetMutable(); out->ShareDataWith(ins[i]); From 2e320079d35e140d2c9b01f859bf386fd3cf9304 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 28 Jun 2018 12:49:41 +0000 Subject: [PATCH 02/34] fix bugs --- paddle/fluid/framework/details/data_balance_op_handle.cc | 4 +--- paddle/fluid/framework/details/data_balance_op_handle.h | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/framework/details/data_balance_op_handle.cc b/paddle/fluid/framework/details/data_balance_op_handle.cc index 786d95acb1..24a68506e8 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.cc +++ b/paddle/fluid/framework/details/data_balance_op_handle.cc @@ -81,8 +81,7 @@ void DataBalanceOpHandle::RunImpl() { "The NoDummyInputSize and NoDummyOutputSize should be equal."); int data_num = in_var_handles.size() / places_.size(); WaitInputVarGenerated(); - - std::vector> lod_tensors; + std::vector> lod_tensors(data_num); std::vector device_sizes; for (int i = 0; i < static_cast(in_var_handles.size()); ++i) { PADDLE_ENFORCE_EQ(in_var_handles[i]->name_, out_var_handles[i]->name_, @@ -105,7 +104,6 @@ void DataBalanceOpHandle::RunImpl() { } } const auto &balance_plan = GetBalancePlan(device_sizes); - for (const auto &trans : balance_plan) { for (int data_idx = 0; data_idx < data_num; ++data_idx) { LoDTensor *src_tensor = lod_tensors[data_idx][trans[0]]; diff --git a/paddle/fluid/framework/details/data_balance_op_handle.h b/paddle/fluid/framework/details/data_balance_op_handle.h index 00bc4837d6..5552be2e6e 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.h +++ b/paddle/fluid/framework/details/data_balance_op_handle.h @@ -41,8 +41,8 @@ struct DataBalanceOpHandle : public OpHandleBase { std::vector> GetBalancePlan( const std::vector &batch_size_per_device); - const std::vector &local_scopes_; - const std::vector &places_; + const std::vector local_scopes_; + const std::vector places_; }; } // namespace details From 47388020a2e8e702191369f578fd558fe338d723 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 29 Jun 2018 03:42:18 +0000 Subject: [PATCH 03/34] fix bugs --- .../framework/details/data_balance_op_handle.cc | 15 +++++++++++++++ .../framework/details/data_balance_op_handle.h | 11 ++++++++++- .../details/multi_devices_graph_builder.cc | 5 +++++ paddle/fluid/framework/details/op_handle_base.cc | 1 + 4 files changed, 31 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/data_balance_op_handle.cc b/paddle/fluid/framework/details/data_balance_op_handle.cc index 24a68506e8..023e0cdf91 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.cc +++ b/paddle/fluid/framework/details/data_balance_op_handle.cc @@ -20,10 +20,24 @@ namespace paddle { namespace framework { namespace details { +#ifdef PADDLE_WITH_CUDA +DataBalanceOpHandle::DataBalanceOpHandle( + const std::vector &local_scopes, + const std::vector &places, + const platform::NCCLContextMap *ctxs) + : local_scopes_(local_scopes), places_(places) { + if (ctxs) { + for (auto &p : places_) { + this->dev_ctxes_[p] = ctxs->DevCtx(p); + } + } +} +#else DataBalanceOpHandle::DataBalanceOpHandle( const std::vector &local_scopes, const std::vector &places) : local_scopes_(local_scopes), places_(places) {} +#endif std::string DataBalanceOpHandle::Name() const { return "data balance"; } @@ -104,6 +118,7 @@ void DataBalanceOpHandle::RunImpl() { } } const auto &balance_plan = GetBalancePlan(device_sizes); + for (const auto &trans : balance_plan) { for (int data_idx = 0; data_idx < data_num; ++data_idx) { LoDTensor *src_tensor = lod_tensors[data_idx][trans[0]]; diff --git a/paddle/fluid/framework/details/data_balance_op_handle.h b/paddle/fluid/framework/details/data_balance_op_handle.h index 5552be2e6e..a4adafdfeb 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.h +++ b/paddle/fluid/framework/details/data_balance_op_handle.h @@ -19,6 +19,9 @@ #include "paddle/fluid/framework/details/op_handle_base.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/platform/nccl_helper.h" +#endif namespace paddle { namespace framework { @@ -26,8 +29,14 @@ namespace details { struct DataBalanceOpHandle : public OpHandleBase { public: +#ifdef PADDLE_WITH_CUDA DataBalanceOpHandle(const std::vector &local_scopes, - const std::vector &places); + const std::vector &places, + const platform::NCCLContextMap *ctxs); +#else + DataBalanceOpHandle(const std::vector &local_scopes, + const std::vector *places) +#endif std::string Name() const override; diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index 4ddc1f2ddd..8a9f0b1054 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -368,7 +368,12 @@ void MultiDevSSAGraphBuilder::InsertAllReduceOp(SSAGraph *result, void MultiDevSSAGraphBuilder::InsertDataBalanceOp( SSAGraph *result, const std::vector &datas) const { +#ifdef PADDLE_WITH_CUDA + result->ops_.emplace_back( + new DataBalanceOpHandle(local_scopes_, places_, nccl_ctxs_)); +#else result->ops_.emplace_back(new DataBalanceOpHandle(local_scopes_, places_)); +#endif auto *op_handle = result->ops_.back().get(); for (size_t i = 0; i < places_.size(); ++i) { auto &p = places_[i]; diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index 1f84c3b9e2..856124875d 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -60,6 +60,7 @@ void OpHandleBase::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) { #ifdef PADDLE_WITH_CUDA if (platform::is_cpu_place(waited_ctx->GetPlace()) || events_.empty()) { for (auto &dev_ctx : dev_ctxes_) { + PADDLE_ENFORCE_NOT_NULL(dev_ctx.second); dev_ctx.second->Wait(); } } else { From 077434c26c78c47e9b58a1a6eaaec435bd4e188f Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 29 Jun 2018 11:47:52 +0800 Subject: [PATCH 04/34] fix CPU compile error --- paddle/fluid/framework/details/data_balance_op_handle.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/data_balance_op_handle.h b/paddle/fluid/framework/details/data_balance_op_handle.h index a4adafdfeb..76a407e361 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.h +++ b/paddle/fluid/framework/details/data_balance_op_handle.h @@ -35,7 +35,7 @@ struct DataBalanceOpHandle : public OpHandleBase { const platform::NCCLContextMap *ctxs); #else DataBalanceOpHandle(const std::vector &local_scopes, - const std::vector *places) + const std::vector &places); #endif std::string Name() const override; From 15be51385e2d3707f95505d51621136c562793cf Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 29 Jun 2018 13:02:23 +0800 Subject: [PATCH 05/34] fix Mac compile errors --- .../analysis/tensorrt_subgraph_node_mark_pass.h | 13 ++++++++++--- .../inference/analysis/tensorrt_subgraph_pass.h | 9 +++++---- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.h b/paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.h index 6cfac55d3b..c558a6ebbd 100644 --- a/paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.h +++ b/paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.h @@ -16,6 +16,10 @@ * This file defines TensorRTSubgraphNodeMarkPass which helps to mark the ops * that supported by TensorRT engine. */ + +#pragma once + +#include #include "paddle/fluid/inference/analysis/pass.h" #include "paddle/fluid/inference/analysis/subgraph_splitter.h" @@ -30,7 +34,8 @@ class TensorRTSubgraphNodeMarkPass : public DataFlowGraphPass { public: using teller_t = SubGraphSplitter::NodeInsideSubgraphTeller; - TensorRTSubgraphNodeMarkPass(const teller_t& teller) : teller_(teller) {} + explicit TensorRTSubgraphNodeMarkPass(const teller_t& teller) + : teller_(teller) {} bool Initialize(Argument* argument) override { return true; } @@ -38,8 +43,10 @@ class TensorRTSubgraphNodeMarkPass : public DataFlowGraphPass { // sub-graph into TensorRT. void Run(DataFlowGraph* graph) override; - std::string repr() const { return "tensorrt-sub-subgraph-mark"; } - std::string description() const { return "tensorrt sub-graph mark pass"; } + std::string repr() const override { return "tensorrt-sub-subgraph-mark"; } + std::string description() const override { + return "tensorrt sub-graph mark pass"; + } Pass* CreateGraphvizDebugerPass() const override; bool Finalize() override; diff --git a/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h index 11e0880695..c6741a9209 100644 --- a/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h +++ b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/inference/analysis/node.h" #include "paddle/fluid/inference/analysis/pass.h" #include "paddle/fluid/inference/analysis/subgraph_splitter.h" @@ -30,7 +31,7 @@ class TensorRTSubGraphPass : public DataFlowGraphPass { // Tell whether to transform a sub-graph into TensorRT. using NodeInsideSubgraphTeller = SubGraphFuse::NodeInsideSubgraphTeller; - TensorRTSubGraphPass(const NodeInsideSubgraphTeller& teller); + explicit TensorRTSubGraphPass(const NodeInsideSubgraphTeller& teller); bool Initialize(Argument* argument) override { return true; } @@ -40,8 +41,8 @@ class TensorRTSubGraphPass : public DataFlowGraphPass { bool Finalize() override { return true; } - std::string repr() const { return "tensorrt-sub-graph"; } - std::string description() const { return "tensorrt sub graph pass"; } + std::string repr() const override { return "tensorrt-sub-graph"; } + std::string description() const override { return "tensorrt sub graph pass"; } private: NodeInsideSubgraphTeller node_inside_subgraph_teller_; @@ -49,4 +50,4 @@ class TensorRTSubGraphPass : public DataFlowGraphPass { } // namespace analysis } // namespace inference -} // paddle +} // namespace paddle From 4b950951d3f13f6b0c3289c7eb9b7afab3794108 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 29 Jun 2018 14:37:38 +0800 Subject: [PATCH 06/34] Add unittests and fix a few bugs --- .../details/data_balance_op_handle.cc | 1 - .../framework/details/fetch_op_handle.cc | 2 +- .../details/multi_devices_graph_builder.cc | 6 +- paddle/fluid/framework/lod_tensor.cc | 1 + paddle/fluid/operators/read_op.cc | 16 +- .../tests/unittests/test_data_balance.py | 188 ++++++++++++++++++ 6 files changed, 206 insertions(+), 8 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_data_balance.py diff --git a/paddle/fluid/framework/details/data_balance_op_handle.cc b/paddle/fluid/framework/details/data_balance_op_handle.cc index 023e0cdf91..f8d431ef2a 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.cc +++ b/paddle/fluid/framework/details/data_balance_op_handle.cc @@ -107,7 +107,6 @@ void DataBalanceOpHandle::RunImpl() { auto *tensor_var = local_scope->FindVar(in_var_handles[i]->name_); PADDLE_ENFORCE(tensor_var->IsType()); auto *tensor = tensor_var->GetMutable(); - PADDLE_ENFORCE(places_[place_idx] == tensor->place()); lod_tensors[data_idx].push_back(tensor); int ins_size = tensor->lod().empty() ? tensor->dims()[0] : tensor->NumElements(); diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index 224e8e1f6e..d646c94460 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -67,8 +67,8 @@ void FetchOpHandle::RunImpl() { #endif } else { tensors_[i].ShareDataWith(t); - tensors_[i].set_lod(t.lod()); } + tensors_[i].set_lod(t.lod()); } this->WaitAndMergeCPUTensors(); diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index 8a9f0b1054..edfefb8231 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -216,11 +216,13 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( } else { // This op runs on all devices, and its output may have parameter's // gradients. - CreateComputationalOps(&result, *op, places_.size()); - if (op->Type() == "read") { + op->SetAttr("throw_eof_exp", false); + CreateComputationalOps(&result, *op, places_.size()); const auto &data_var_names = op->Output("Out"); InsertDataBalanceOp(&result, data_var_names); + } else { + CreateComputationalOps(&result, *op, places_.size()); } if (!is_forwarding && places_.size() > 1) { diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index 49672e1181..dcbd2f22fc 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -393,6 +393,7 @@ void LoDTensor::MergeLoDTensor( new_dim[0] += t->dims()[0]; auto &lod = t->lod(); + PADDLE_ENFORCE_EQ(new_lod.size(), lod.size()); for (size_t j = 0; j < lod.size(); ++j) { auto &sub_lod = new_lod[j]; auto &offset = sub_lod.back(); diff --git a/paddle/fluid/operators/read_op.cc b/paddle/fluid/operators/read_op.cc index 8e9f91c185..60e4eb7576 100644 --- a/paddle/fluid/operators/read_op.cc +++ b/paddle/fluid/operators/read_op.cc @@ -67,10 +67,14 @@ class ReadOp : public framework::OperatorBase { std::vector ins; reader->ReadNext(&ins); if (ins.empty()) { - ins.resize(out_arg_names.size()); - for (auto& tensor : ins) { - // data type is not important for subsequent DataBalanceOpHandle - tensor.mutable_data(framework::make_ddim({0}), dev_place); + if (Attr("throw_eof_exp")) { + PADDLE_THROW("There is no next data."); + } else { + ins.resize(out_arg_names.size()); + for (auto& tensor : ins) { + // data type is not important for subsequent DataBalanceOpHandle + tensor.mutable_data(framework::make_ddim({0}), dev_place); + } } } PADDLE_ENFORCE_EQ(ins.size(), out_arg_names.size()); @@ -88,6 +92,10 @@ class ReadOpMaker : public framework::OpProtoAndCheckerMaker { void Make() override { AddInput("Reader", "(ReaderHolder) The executed reader."); AddOutput("Out", "(LoDTensor) The output data.").AsDuplicable(); + AddAttr("throw_eof_exp", + "If set true, an exception will be thrown when the Reader " + "yields empty (which means there is no next data).") + .SetDefault(true); AddComment(R"DOC( Read Operator diff --git a/python/paddle/fluid/tests/unittests/test_data_balance.py b/python/paddle/fluid/tests/unittests/test_data_balance.py new file mode 100644 index 0000000000..44c1adad9e --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_data_balance.py @@ -0,0 +1,188 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import paddle.fluid as fluid +import paddle.v2 as paddle +import paddle.v2.dataset.mnist as mnist +import numpy as np + + +class TestDataBalance(unittest.TestCase): + def prepare_data(self): + def fake_data_generator(): + for n in xrange(self.total_ins_num): + yield np.ones((3, 4)) * n, n + + # Prepare data + with fluid.program_guard(fluid.Program(), fluid.Program()): + reader = paddle.batch( + fake_data_generator, batch_size=self.batch_size) + feeder = fluid.DataFeeder( + feed_list=[ + fluid.layers.data( + name='image', shape=[3, 4], dtype='float32'), + fluid.layers.data( + name='label', shape=[1], dtype='int64'), + ], + place=fluid.CPUPlace()) + self.num_batches = fluid.recordio_writer.convert_reader_to_recordio_file( + self.data_file_name, reader, feeder) + + def prepare_lod_data(self): + def fake_data_generator(): + for n in xrange(1, self.total_ins_num + 1): + d1 = (np.ones((n, 3)) * n).astype('float32') + d2 = (np.array(n).reshape((1, 1))).astype('int32') + yield d1, d2 + + # Prepare lod data + with fluid.program_guard(fluid.Program(), fluid.Program()): + with fluid.recordio_writer.create_recordio_writer( + filename=self.lod_data_file_name) as writer: + eof = False + generator = fake_data_generator() + while (not eof): + data_batch = [ + np.array([]).reshape((0, 3)), np.array([]).reshape( + (0, 1)) + ] + lod = [0] + for _ in xrange(self.batch_size): + try: + ins = generator.next() + except StopIteration: + eof = True + break + for i, d in enumerate(ins): + data_batch[i] = np.concatenate( + (data_batch[i], d), axis=0) + lod.append(lod[-1] + ins[0].shape[0]) + if data_batch[0].shape[0] > 0: + for i, d in enumerate(data_batch): + t = fluid.LoDTensor() + t.set(data_batch[i], fluid.CPUPlace()) + if i == 0: + t.set_lod([lod]) + writer.append_tensor(t) + writer.complete_append_tensor() + + def setUp(self): + self.use_cuda = fluid.core.is_compiled_with_cuda() + self.data_file_name = './data_balance_test.recordio' + self.lod_data_file_name = './data_balance_with_lod_test.recordio' + self.total_ins_num = 50 + self.batch_size = 10 + self.prepare_data() + self.prepare_lod_data() + + def main(self): + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data_reader = fluid.layers.io.open_files( + filenames=[self.data_file_name], + shapes=[[-1, 3, 4], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + if self.use_cuda: + data_reader = fluid.layers.double_buffer(data_reader) + image, label = fluid.layers.read_file(data_reader) + + place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + + parallel_exe = fluid.ParallelExecutor( + use_cuda=self.use_cuda, main_program=main_prog) + + if (parallel_exe.device_count > self.batch_size): + print("WARNING: Unittest TestDataBalance skipped. \ + For the result is not correct when device count \ + is larger than batch size.") + exit(0) + fetch_list = [image.name, label.name] + + data_appeared = [False] * self.total_ins_num + while (True): + try: + image_val, label_val = parallel_exe.run(fetch_list, + return_numpy=True) + except fluid.core.EnforceNotMet as ex: + self.assertIn("There is no next data.", ex.message) + break + ins_num = image_val.shape[0] + broadcasted_label = np.ones( + (ins_num, 3, 4)) * label_val.reshape((ins_num, 1, 1)) + self.assertEqual(image_val.all(), broadcasted_label.all()) + for l in label_val: + self.assertFalse(data_appeared[l[0]]) + data_appeared[l[0]] = True + for i in data_appeared: + self.assertTrue(i) + + def main_lod(self): + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data_reader = fluid.layers.io.open_files( + filenames=[self.lod_data_file_name], + shapes=[[-1, 3], [-1, 1]], + lod_levels=[1, 0], + dtypes=['float32', 'int32'], + thread_num=1) + ins, label = fluid.layers.read_file(data_reader) + + place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + + parallel_exe = fluid.ParallelExecutor( + use_cuda=self.use_cuda, main_program=main_prog) + + if (parallel_exe.device_count > self.batch_size): + print("WARNING: Unittest TestDataBalance skipped. \ + For the result is not correct when device count \ + is larger than batch size.") + exit(0) + fetch_list = [ins.name, label.name] + + data_appeared = [False] * self.total_ins_num + while (True): + try: + ins_tensor, label_tensor = parallel_exe.run( + fetch_list, return_numpy=False) + except fluid.core.EnforceNotMet as ex: + self.assertIn("There is no next data.", ex.message) + break + + ins_val = np.array(ins_tensor) + label_val = np.array(label_tensor) + ins_lod = ins_tensor.lod()[0] + self.assertEqual(ins_val.shape[1], 3) + self.assertEqual(label_val.shape[1], 1) + self.assertEqual(len(ins_lod) - 1, label_val.shape[0]) + for i in range(0, len(ins_lod) - 1): + ins_elem = ins_val[ins_lod[i]:ins_lod[i + 1]][:] + label_elem = label_val[i][0] + self.assertEqual(ins_elem.all(), label_elem.all()) + self.assertFalse(data_appeared[int(label_elem - 1)]) + data_appeared[int(label_elem - 1)] = True + + for i in data_appeared: + self.assertTrue(i) + + def test_all(self): + self.main() + self.main_lod() From 3606a306f2c90fe0277f02577fd321b175b365a2 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Sat, 30 Jun 2018 01:06:15 +0800 Subject: [PATCH 07/34] refine --- python/paddle/fluid/tests/unittests/test_data_balance.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/paddle/fluid/tests/unittests/test_data_balance.py b/python/paddle/fluid/tests/unittests/test_data_balance.py index 44c1adad9e..b558d7c2ea 100644 --- a/python/paddle/fluid/tests/unittests/test_data_balance.py +++ b/python/paddle/fluid/tests/unittests/test_data_balance.py @@ -15,7 +15,6 @@ import unittest import paddle.fluid as fluid import paddle.v2 as paddle -import paddle.v2.dataset.mnist as mnist import numpy as np From 2547c4858cf71083c1e42e413885db40e352c165 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Sat, 30 Jun 2018 11:20:47 +0800 Subject: [PATCH 08/34] add no_random args in fluid_benchmark.py --- benchmark/fluid/args.py | 6 +++++- benchmark/fluid/fluid_benchmark.py | 10 ++++++---- benchmark/fluid/models/resnet.py | 4 ++-- 3 files changed, 13 insertions(+), 7 deletions(-) mode change 100755 => 100644 benchmark/fluid/fluid_benchmark.py diff --git a/benchmark/fluid/args.py b/benchmark/fluid/args.py index 99c9d79b06..a79f25ccc6 100644 --- a/benchmark/fluid/args.py +++ b/benchmark/fluid/args.py @@ -125,6 +125,10 @@ def parse_args(): parser.add_argument( '--use_inference_transpiler', action='store_true', - help='If set, uses inference transpiler to optimize the program.') + help='If set, use inference transpiler to optimize the program.') + parser.add_argument( + '--no_random', + action='store_true', + help='If set, keep the random seed and do not shuffle the data.') args = parser.parse_args() return args diff --git a/benchmark/fluid/fluid_benchmark.py b/benchmark/fluid/fluid_benchmark.py old mode 100755 new mode 100644 index dcd4d9ea95..94ea7bd6ac --- a/benchmark/fluid/fluid_benchmark.py +++ b/benchmark/fluid/fluid_benchmark.py @@ -132,10 +132,6 @@ def train(avg_loss, infer_prog, optimizer, train_reader, test_reader, batch_acc, exe.run(startup_prog) # Use inference_transpiler to speedup - if args.use_inference_transpiler: - t = fluid.InferenceTranspiler() - t.transpile(infer_prog, place) - if not args.use_reader_op: feed_var_list = [ var for var in train_prog.global_block().vars.itervalues() @@ -186,6 +182,10 @@ def train(avg_loss, infer_prog, optimizer, train_reader, test_reader, batch_acc, print("Pass: %d, Loss: %f" % (pass_id, np.mean(train_losses))), # evaluation if not args.no_test and batch_acc and not args.use_reader_op: + if args.use_inference_transpiler: + t = fluid.InferenceTranspiler() + t.transpile(infer_prog, place) + pass_test_acc = test(exe, infer_prog, test_reader, feeder, batch_acc) print(", Test Accuracy: %f" % pass_test_acc) @@ -316,6 +316,8 @@ def main(): args = parse_args() print_arguments(args) print_paddle_envs() + if args.no_random: + fluid.default_startup_program().random_seed = 1 # the unique trainer id, starting from 0, needed by trainer # only diff --git a/benchmark/fluid/models/resnet.py b/benchmark/fluid/models/resnet.py index 9ed1093c54..d44a9c07d3 100644 --- a/benchmark/fluid/models/resnet.py +++ b/benchmark/fluid/models/resnet.py @@ -197,12 +197,12 @@ def get_model(args): optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9) batched_train_reader = paddle.batch( - paddle.reader.shuffle( + train_reader if args.no_random else paddle.reader.shuffle( train_reader, buf_size=5120), batch_size=args.batch_size * args.gpus, drop_last=True) batched_test_reader = paddle.batch( - train_reader, batch_size=args.batch_size, drop_last=True) + test_reader, batch_size=args.batch_size, drop_last=True) return avg_cost, inference_program, optimizer, batched_train_reader,\ batched_test_reader, batch_acc From 1366832a41785ece0480dbf5d997b80f4080af7a Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Sun, 1 Jul 2018 18:04:48 +0800 Subject: [PATCH 09/34] add dist pass barrier --- paddle/fluid/framework/executor.cc | 18 +++++++--- paddle/fluid/framework/executor.h | 9 +++-- .../operators/distributed/grpc_client.cc | 29 ++++++++++++--- .../fluid/operators/distributed/grpc_client.h | 24 ++++++++----- .../operators/distributed/request_handler.h | 3 ++ .../distributed/request_handler_impl.cc | 36 +++++++++---------- .../fluid/operators/distributed/rpc_client.cc | 2 +- .../fluid/operators/distributed/rpc_client.h | 15 +++++--- .../fluid/operators/distributed/rpc_server.cc | 22 ++++++++++-- .../fluid/operators/distributed/rpc_server.h | 8 ++++- paddle/fluid/pybind/pybind.cc | 3 +- python/paddle/fluid/executor.py | 6 ++++ 12 files changed, 128 insertions(+), 47 deletions(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index ae98fccc96..87b0ff0c80 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -48,10 +48,20 @@ ExecutorPrepareContext::~ExecutorPrepareContext() { Executor::Executor(const platform::Place& place) : place_(place) {} #ifdef PADDLE_WITH_DISTRIBUTE -void Executor::Complete() { - ::paddle::operators::distributed::RPCClient::GetInstance< - ::paddle::operators::distributed::GRPCClient>() - ->SendComplete(); +void Executor::BeginPass() { + auto client = ::paddle::operators::distributed::RPCClient::GetInstance< + ::paddle::operators::distributed::GRPCClient>(); + + client->SendBeginPass(); + client->Wait(); +} + +void Executor::EndPass() { + auto client = ::paddle::operators::distributed::RPCClient::GetInstance< + ::paddle::operators::distributed::GRPCClient>(); + + client->SendEndPass(); + client->Wait(); } #endif diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h index 3aa5ffef69..563a4b2bb6 100644 --- a/paddle/fluid/framework/executor.h +++ b/paddle/fluid/framework/executor.h @@ -46,9 +46,14 @@ class Executor { #ifdef PADDLE_WITH_DISTRIBUTE /* - * Sending signal to pserver to mark current trainer stop. + * Sending signal to pserver to mark current pass started. */ - void Complete(); + void BeginPass(); + + /* + * Sending signal to pserver to mark current pass finished. + */ + void EndPass(); #endif /* @Brief diff --git a/paddle/fluid/operators/distributed/grpc_client.cc b/paddle/fluid/operators/distributed/grpc_client.cc index 8228a8c5a3..d8dc667fe7 100644 --- a/paddle/fluid/operators/distributed/grpc_client.cc +++ b/paddle/fluid/operators/distributed/grpc_client.cc @@ -35,9 +35,17 @@ void GRPCClient::InitEventLoop() { client_thread_.reset(new std::thread(std::bind(&GRPCClient::Proceed, this))); } -void GRPCClient::SendComplete() { +void GRPCClient::SendBeginPass() { for (auto& it : channels_) { - this->AsyncSendComplete(it.first); + VLOG(3) << "send begin pass to: " it.first; + this->AsyncSendBeginPass(it.first); + } +} + +void GRPCClient::SendEndPass() { + for (auto& it : channels_) { + VLOG(3) << "send end pass to " << it.first; + this->AsyncSendEndPass(it.first); } } @@ -226,19 +234,32 @@ void GRPCClient::AsyncSendFetchBarrier(const std::string& ep, req_count_++; } -void GRPCClient::AsyncSendComplete(const std::string& ep, int64_t time_out) { +void GRPCClient::AsyncSendBeginPass(const std::string& ep, int64_t time_out) { const auto ch = GetChannel(ep); BatchBarrierProcessor* s = new BatchBarrierProcessor(ch); s->Prepare(time_out); sendrecv::VariableMessage req; - req.set_varname(COMPLETE_MESSAGE); + req.set_varname(BEGIN_PASS_MESSAGE); auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_); rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); req_count_++; } +void GRPCClient::AsyncSendEndPass(const std::string& ep, int64_t time_out) { + const auto ch = GetChannel(ep); + + FetchBarrierProcessor* s = new FetchBarrierProcessor(ch); + s->Prepare(time_out); + + sendrecv::VariableMessage req; + req.set_varname(END_PASS_MESSAGE); + auto rpc = s->stub_->AsyncGetVariable(s->context_.get(), req, &cq_); + rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); + req_count_++; +} + void GRPCClient::AsyncCheckpointNotify(const std::string& ep, const std::string& dir, int64_t time_out) { diff --git a/paddle/fluid/operators/distributed/grpc_client.h b/paddle/fluid/operators/distributed/grpc_client.h index 7a08f2d3a4..5dae20155e 100644 --- a/paddle/fluid/operators/distributed/grpc_client.h +++ b/paddle/fluid/operators/distributed/grpc_client.h @@ -77,11 +77,12 @@ class BaseProcessor { context_.reset(new grpc::ClientContext()); var_h_ = var_info; context_->set_wait_for_ready(true); - - std::chrono::system_clock::time_point deadline = - std::chrono::system_clock::now() + std::chrono::milliseconds(time_out); - - context_->set_deadline(deadline); + if (time_out) { + std::chrono::system_clock::time_point deadline = + std::chrono::system_clock::now() + + std::chrono::milliseconds(time_out); + context_->set_deadline(deadline); + } } virtual void Prepare(int64_t time_out) { @@ -214,9 +215,17 @@ class GRPCClient : public RPCClient { void AsyncCheckpointNotify(const std::string& ep, const std::string& dir, int64_t time_out = FLAGS_rpc_deadline) override; + void AsyncSendBeginPass(const std::string& ep, + int64_t time_out = FLAGS_rpc_deadline) override; + + void AsyncSendEndPass(const std::string& ep, + int64_t time_out = FLAGS_rpc_deadline) override; + void Wait() override; - void SendComplete() override; + void SendBeginPass() override; + + void SendEndPass() override; protected: void InitImpl() override; @@ -227,9 +236,6 @@ class GRPCClient : public RPCClient { void Proceed(); - void AsyncSendComplete(const std::string& ep, - int64_t time_out = FLAGS_rpc_deadline); - std::shared_ptr GetChannel(const std::string& ep); private: diff --git a/paddle/fluid/operators/distributed/request_handler.h b/paddle/fluid/operators/distributed/request_handler.h index 90742a201a..271306d5d2 100644 --- a/paddle/fluid/operators/distributed/request_handler.h +++ b/paddle/fluid/operators/distributed/request_handler.h @@ -37,11 +37,14 @@ constexpr char kRequestSend[] = "RequestSend"; constexpr char kRequestGet[] = "RequestGet"; constexpr char kRequestPrefetch[] = "RequestPrefetch"; constexpr char kRequestCheckpoint[] = "RequestCheckpoint"; +constexpr char kRequestPassBarrier[] = "RequestPassBarrier"; #define LISTEN_TERMINATE_MESSAGE "TERMINATE@RECV" #define BATCH_BARRIER_MESSAGE "BATCH_BARRIER@RECV" #define FETCH_BARRIER_MESSAGE "FETCH_BARRIER@RECV" #define COMPLETE_MESSAGE "COMPLETE@RECV" +#define BEGIN_PASS_MESSAGE "BEGIN_PASS@RECV" +#define END_PASS_MESSAGE "END_PASS@RECV" #define CHECKPOINT_SAVE_MESSAGE "SAVE@CHECKPOINTNOTIFY" #define CHECKPOINT_LOAD_MESSAGE "LOAD@CHECKPOINTNOTIFY" diff --git a/paddle/fluid/operators/distributed/request_handler_impl.cc b/paddle/fluid/operators/distributed/request_handler_impl.cc index 163154c678..5e6bff20f5 100644 --- a/paddle/fluid/operators/distributed/request_handler_impl.cc +++ b/paddle/fluid/operators/distributed/request_handler_impl.cc @@ -55,14 +55,14 @@ bool RequestSendHandler::Handle(const std::string& varname, if (varname == BATCH_BARRIER_MESSAGE) { VLOG(3) << "sync: recv batch barrier message"; rpc_server_->IncreaseBatchBarrier(kRequestSend); - } else if (varname == COMPLETE_MESSAGE) { - VLOG(3) << "sync: recv complete message"; - rpc_server_->DecreaseClientNum(); + } else if (varname == BEGIN_PASS_MESSAGE) { + VLOG(3) << "sync: recv begin pass message"; + rpc_server_->WaitCond(kRequestSend); + rpc_server_->BeginPass(); } else { VLOG(3) << "sync: received var_name: " << varname; - if (sync_mode_) { - rpc_server_->WaitCond(kRequestSend); - } + rpc_server_->WaitCond(kRequestSend); + VLOG(3) << "sync: processing received var: " << varname; if (invar == nullptr) { LOG(ERROR) << "sync: Can not find server side var: " << varname; @@ -91,21 +91,21 @@ bool RequestGetHandler::Handle(const std::string& varname, framework::Variable** outvar, const std::string& out_var_name) { VLOG(4) << "RequestGetHandler:" << varname; - - if (varname != FETCH_BARRIER_MESSAGE) { - if (sync_mode_) { + if (sync_mode_) { + if (varname == FETCH_BARRIER_MESSAGE) { + VLOG(3) << "sync: recv fetch barrier message"; + rpc_server_->IncreaseBatchBarrier(kRequestGet); + } else if (varname == END_PASS_MESSAGE) { + rpc_server_->EndPass(); + } else { rpc_server_->WaitCond(kRequestGet); + *outvar = scope_->FindVar(varname); + } + } else { + if (varname != FETCH_BARRIER_MESSAGE && varname != END_PASS_MESSAGE) { + *outvar = scope_->FindVar(varname); } - *outvar = scope_->FindVar(varname); - return true; - } - - // FETCH_BARRIER_MESSAGE - if (sync_mode_) { - VLOG(3) << "sync: recv fetch barrier message"; - rpc_server_->IncreaseBatchBarrier(kRequestGet); } - return true; } diff --git a/paddle/fluid/operators/distributed/rpc_client.cc b/paddle/fluid/operators/distributed/rpc_client.cc index b5ec9fe536..382b65d637 100644 --- a/paddle/fluid/operators/distributed/rpc_client.cc +++ b/paddle/fluid/operators/distributed/rpc_client.cc @@ -16,7 +16,7 @@ #include "gflags/gflags.h" // default to 3min to avoid temprary network failures. -DEFINE_int32(rpc_deadline, 180000, "deadline timeouts for rpc"); +DEFINE_int32(rpc_deadline, 30000, "deadline timeouts for rpc"); namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/distributed/rpc_client.h b/paddle/fluid/operators/distributed/rpc_client.h index 37783b78ec..6479d3a97b 100644 --- a/paddle/fluid/operators/distributed/rpc_client.h +++ b/paddle/fluid/operators/distributed/rpc_client.h @@ -60,10 +60,17 @@ class RPCClient { const std::string& dir, int64_t time_out = FLAGS_rpc_deadline) = 0; - // SendComplete tells all the server that current trainer have no more data - // to train, so that the pserver can reduce it's barrier count, and continue - // to train with other trainers. - virtual void SendComplete() = 0; + virtual void AsyncSendBeginPass(const std::string& ep, + int64_t time_out = FLAGS_rpc_deadline) = 0; + + virtual void AsyncSendEndPass(const std::string& ep, + int64_t time_out = FLAGS_rpc_deadline) = 0; + + // BeginePass/EndPass tells all the pserver that start/end a pass, so that + // the pserver can increase/reduce it's barrier count, and continue to train + // with other trainers. + virtual void SendBeginPass() = 0; + virtual void SendEndPass() = 0; virtual void Wait() = 0; diff --git a/paddle/fluid/operators/distributed/rpc_server.cc b/paddle/fluid/operators/distributed/rpc_server.cc index c0520e248d..5f4c134837 100644 --- a/paddle/fluid/operators/distributed/rpc_server.cc +++ b/paddle/fluid/operators/distributed/rpc_server.cc @@ -44,7 +44,8 @@ void RPCServer::SavePort() const { void RPCServer::WaitBarrier(const std::string& rpc_name) { std::unique_lock lock(this->mutex_); barrier_cond_.wait(lock, [this, &rpc_name] { - return (barrier_counter_[rpc_name] >= client_num_ || exit_flag_.load()); + return ((barrier_counter_[rpc_name] == client_num_ && client_num_ != 0) || + exit_flag_.load()); }); VLOG(3) << "batch_barrier_: " << rpc_name << " " @@ -63,10 +64,25 @@ void RPCServer::IncreaseBatchBarrier(const std::string rpc_name) { } } -void RPCServer::DecreaseClientNum() { +void RPCServer::BeginPass() { + VLOG(4) << "RPCServer begin increase pass barrier"; { - std::unique_lock lock(mutex_); + std::unique_lock locl(mutex_); + client_num_++; + VLOG(4) << "increase client_num to: " << client_num_; + } + barrier_cond_.notify_all(); +} + +void RPCServer::EndPass() { + VLOG(4) << "RPCServer begin increase pass barrier"; + { + std::unique_lock locl(mutex_); client_num_--; + VLOG(4) << "decrease client_num to: " << client_num_; + if (cur_cond_.load() == rpc_cond_map_[kRequestGet]) { + barrier_counter_[kRequestGet]--; + } } barrier_cond_.notify_all(); } diff --git a/paddle/fluid/operators/distributed/rpc_server.h b/paddle/fluid/operators/distributed/rpc_server.h index cf25e78435..833991c8aa 100644 --- a/paddle/fluid/operators/distributed/rpc_server.h +++ b/paddle/fluid/operators/distributed/rpc_server.h @@ -43,6 +43,9 @@ class RPCServer { bool IsExit() { return exit_flag_.load(); } int GetSelectedPort() const { return selected_port_; } + + int GetClientNum() const; + void SavePort() const; // RegisterRPC, register the rpc method name to a handler @@ -60,7 +63,10 @@ class RPCServer { void SetCond(const std::string& rpc_name); void WaitCond(const std::string& rpc_name); void IncreaseBatchBarrier(const std::string rpc_name); - void DecreaseClientNum(); + + void BeginPass(); + void EndPass(); + void ResetBarrierCounter(); protected: diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 36d0809968..3f1e2ceedb 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -493,7 +493,8 @@ All parameter, weight, gradient are variables in Paddle. py::class_(m, "Executor") .def(py::init()) #ifdef PADDLE_WITH_DISTRIBUTE - .def("complete", &Executor::Complete) + .def("begin_pass", &Executor::BeginPass) + .def("end_pass", &Executor::EndPass) #endif .def("run", [](Executor &self, const ProgramDesc &prog, Scope *scope, int block_id, bool create_local_scope, bool create_vars) { diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 145f1423e4..b436dfe70a 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -348,6 +348,12 @@ class Executor(object): ] return outs + def begin_pass(self): + self.executor.begin_pass() + + def end_pass(self): + self.executor.end_pass() + def run(self, program=None, feed=None, From 029425a5f46cdf0643deace411f1681fb0f37b8b Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Sun, 1 Jul 2018 23:12:53 +0800 Subject: [PATCH 10/34] update --- paddle/fluid/operators/distributed/grpc_client.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/operators/distributed/grpc_client.cc b/paddle/fluid/operators/distributed/grpc_client.cc index d8dc667fe7..5d2e368879 100644 --- a/paddle/fluid/operators/distributed/grpc_client.cc +++ b/paddle/fluid/operators/distributed/grpc_client.cc @@ -37,7 +37,7 @@ void GRPCClient::InitEventLoop() { void GRPCClient::SendBeginPass() { for (auto& it : channels_) { - VLOG(3) << "send begin pass to: " it.first; + VLOG(3) << "send begin pass to: " << it.first; this->AsyncSendBeginPass(it.first); } } From 8c1326c5fefd5c429670a215695d3df682bba7b2 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Sun, 1 Jul 2018 17:24:09 +0800 Subject: [PATCH 11/34] move v2 api and capi to legacy --- paddle/CMakeLists.txt | 6 +++--- paddle/{ => legacy}/api/Arguments.cpp | 0 paddle/{ => legacy}/api/CMakeLists.txt | 0 paddle/{ => legacy}/api/ConfigParser.cpp | 0 paddle/{ => legacy}/api/Evaluator.cpp | 0 paddle/{ => legacy}/api/GradientMachine.cpp | 0 paddle/{ => legacy}/api/Internal.h | 0 paddle/{ => legacy}/api/Matrix.cpp | 0 paddle/{ => legacy}/api/Paddle.i | 0 paddle/{ => legacy}/api/PaddleAPI.h | 0 paddle/{ => legacy}/api/PaddleAPIPrivate.h | 0 paddle/{ => legacy}/api/Parameter.cpp | 0 paddle/{ => legacy}/api/ParameterOptimizer.cpp | 0 paddle/{ => legacy}/api/ParameterUpdater.cpp | 0 paddle/{ => legacy}/api/SequenceGenerator.cpp | 0 paddle/{ => legacy}/api/Trainer.cpp | 0 paddle/{ => legacy}/api/Util.cpp | 0 paddle/{ => legacy}/api/Vector.cpp | 0 paddle/{ => legacy}/api/__init__.py | 0 paddle/{ => legacy}/api/numpy.i | 0 paddle/{ => legacy}/api/test/.gitignore | 0 paddle/{ => legacy}/api/test/CMakeLists.txt | 0 paddle/{ => legacy}/api/test/testArguments.py | 0 paddle/{ => legacy}/api/test/testGradientMachine.py | 0 paddle/{ => legacy}/api/test/testMatrix.py | 0 paddle/{ => legacy}/api/test/testTrain.py | 0 paddle/{ => legacy}/api/test/testTrainConfig.py | 0 paddle/{ => legacy}/api/test/testTrainer.py | 0 paddle/{ => legacy}/api/test/testVector.py | 0 paddle/{ => legacy}/api/test/util.py | 0 paddle/{ => legacy}/capi/Arguments.cpp | 0 paddle/{ => legacy}/capi/CMakeLists.txt | 0 paddle/{ => legacy}/capi/Main.cpp | 0 paddle/{ => legacy}/capi/Matrix.cpp | 0 paddle/{ => legacy}/capi/Vector.cpp | 0 paddle/{ => legacy}/capi/arguments.h | 0 paddle/{ => legacy}/capi/capi.h | 0 paddle/{ => legacy}/capi/capi_private.h | 0 paddle/{ => legacy}/capi/config.h.in | 0 paddle/{ => legacy}/capi/error.cpp | 0 paddle/{ => legacy}/capi/error.h | 0 paddle/{ => legacy}/capi/examples/.gitignore | 0 paddle/{ => legacy}/capi/examples/README.md | 0 paddle/{ => legacy}/capi/examples/model_inference/README.md | 0 .../capi/examples/model_inference/common/common.h | 0 .../capi/examples/model_inference/dense/CMakeLists.txt | 0 .../capi/examples/model_inference/dense/convert_protobin.sh | 0 .../{ => legacy}/capi/examples/model_inference/dense/main.c | 0 .../capi/examples/model_inference/dense/merge_v2_model.py | 0 .../capi/examples/model_inference/dense/mnist_v2.py | 0 .../capi/examples/model_inference/dense/trainer_config.py | 0 .../capi/examples/model_inference/multi_thread/.gitignore | 0 .../examples/model_inference/multi_thread/CMakeLists.txt | 0 .../model_inference/multi_thread/convert_protobin.sh | 0 .../capi/examples/model_inference/multi_thread/main.c | 0 .../capi/examples/model_inference/multi_thread/main_gpu.c | 0 .../examples/model_inference/multi_thread/trainer_config.py | 0 .../capi/examples/model_inference/sequence/.gitignore | 0 .../capi/examples/model_inference/sequence/CMakeLists.txt | 0 .../examples/model_inference/sequence/convert_protobin.sh | 0 .../capi/examples/model_inference/sequence/main.c | 0 .../examples/model_inference/sequence/trainer_config.py | 0 .../capi/examples/model_inference/sparse_binary/.gitignore | 0 .../examples/model_inference/sparse_binary/CMakeLists.txt | 0 .../model_inference/sparse_binary/convert_protobin.sh | 0 .../capi/examples/model_inference/sparse_binary/main.c | 0 .../model_inference/sparse_binary/trainer_config.py | 0 paddle/{ => legacy}/capi/gradient_machine.cpp | 0 paddle/{ => legacy}/capi/gradient_machine.h | 0 paddle/{ => legacy}/capi/main.h | 0 paddle/{ => legacy}/capi/matrix.h | 0 paddle/{ => legacy}/capi/paddle_capi.map | 0 paddle/{ => legacy}/capi/tests/.gitignore | 0 paddle/{ => legacy}/capi/tests/CMakeLists.txt | 0 paddle/{ => legacy}/capi/tests/test_Arguments.cpp | 0 paddle/{ => legacy}/capi/tests/test_GradientMachine.cpp | 0 paddle/{ => legacy}/capi/tests/test_Matrix.cpp | 0 paddle/{ => legacy}/capi/tests/test_Vector.cpp | 0 paddle/{ => legacy}/capi/tests/test_predict_network.py | 0 paddle/{ => legacy}/capi/vector.h | 0 80 files changed, 3 insertions(+), 3 deletions(-) rename paddle/{ => legacy}/api/Arguments.cpp (100%) rename paddle/{ => legacy}/api/CMakeLists.txt (100%) rename paddle/{ => legacy}/api/ConfigParser.cpp (100%) rename paddle/{ => legacy}/api/Evaluator.cpp (100%) rename paddle/{ => legacy}/api/GradientMachine.cpp (100%) rename paddle/{ => legacy}/api/Internal.h (100%) rename paddle/{ => legacy}/api/Matrix.cpp (100%) rename paddle/{ => legacy}/api/Paddle.i (100%) rename paddle/{ => legacy}/api/PaddleAPI.h (100%) rename paddle/{ => legacy}/api/PaddleAPIPrivate.h (100%) rename paddle/{ => legacy}/api/Parameter.cpp (100%) rename paddle/{ => legacy}/api/ParameterOptimizer.cpp (100%) rename paddle/{ => legacy}/api/ParameterUpdater.cpp (100%) rename paddle/{ => legacy}/api/SequenceGenerator.cpp (100%) rename paddle/{ => legacy}/api/Trainer.cpp (100%) rename paddle/{ => legacy}/api/Util.cpp (100%) rename paddle/{ => legacy}/api/Vector.cpp (100%) rename paddle/{ => legacy}/api/__init__.py (100%) rename paddle/{ => legacy}/api/numpy.i (100%) rename paddle/{ => legacy}/api/test/.gitignore (100%) rename paddle/{ => legacy}/api/test/CMakeLists.txt (100%) rename paddle/{ => legacy}/api/test/testArguments.py (100%) rename paddle/{ => legacy}/api/test/testGradientMachine.py (100%) rename paddle/{ => legacy}/api/test/testMatrix.py (100%) rename paddle/{ => legacy}/api/test/testTrain.py (100%) rename paddle/{ => legacy}/api/test/testTrainConfig.py (100%) rename paddle/{ => legacy}/api/test/testTrainer.py (100%) rename paddle/{ => legacy}/api/test/testVector.py (100%) rename paddle/{ => legacy}/api/test/util.py (100%) rename paddle/{ => legacy}/capi/Arguments.cpp (100%) rename paddle/{ => legacy}/capi/CMakeLists.txt (100%) rename paddle/{ => legacy}/capi/Main.cpp (100%) rename paddle/{ => legacy}/capi/Matrix.cpp (100%) rename paddle/{ => legacy}/capi/Vector.cpp (100%) rename paddle/{ => legacy}/capi/arguments.h (100%) rename paddle/{ => legacy}/capi/capi.h (100%) rename paddle/{ => legacy}/capi/capi_private.h (100%) rename paddle/{ => legacy}/capi/config.h.in (100%) rename paddle/{ => legacy}/capi/error.cpp (100%) rename paddle/{ => legacy}/capi/error.h (100%) rename paddle/{ => legacy}/capi/examples/.gitignore (100%) rename paddle/{ => legacy}/capi/examples/README.md (100%) rename paddle/{ => legacy}/capi/examples/model_inference/README.md (100%) rename paddle/{ => legacy}/capi/examples/model_inference/common/common.h (100%) rename paddle/{ => legacy}/capi/examples/model_inference/dense/CMakeLists.txt (100%) rename paddle/{ => legacy}/capi/examples/model_inference/dense/convert_protobin.sh (100%) rename paddle/{ => legacy}/capi/examples/model_inference/dense/main.c (100%) rename paddle/{ => legacy}/capi/examples/model_inference/dense/merge_v2_model.py (100%) rename paddle/{ => legacy}/capi/examples/model_inference/dense/mnist_v2.py (100%) rename paddle/{ => legacy}/capi/examples/model_inference/dense/trainer_config.py (100%) rename paddle/{ => legacy}/capi/examples/model_inference/multi_thread/.gitignore (100%) rename paddle/{ => legacy}/capi/examples/model_inference/multi_thread/CMakeLists.txt (100%) rename paddle/{ => legacy}/capi/examples/model_inference/multi_thread/convert_protobin.sh (100%) rename paddle/{ => legacy}/capi/examples/model_inference/multi_thread/main.c (100%) rename paddle/{ => legacy}/capi/examples/model_inference/multi_thread/main_gpu.c (100%) rename paddle/{ => legacy}/capi/examples/model_inference/multi_thread/trainer_config.py (100%) rename paddle/{ => legacy}/capi/examples/model_inference/sequence/.gitignore (100%) rename paddle/{ => legacy}/capi/examples/model_inference/sequence/CMakeLists.txt (100%) rename paddle/{ => legacy}/capi/examples/model_inference/sequence/convert_protobin.sh (100%) rename paddle/{ => legacy}/capi/examples/model_inference/sequence/main.c (100%) rename paddle/{ => legacy}/capi/examples/model_inference/sequence/trainer_config.py (100%) rename paddle/{ => legacy}/capi/examples/model_inference/sparse_binary/.gitignore (100%) rename paddle/{ => legacy}/capi/examples/model_inference/sparse_binary/CMakeLists.txt (100%) rename paddle/{ => legacy}/capi/examples/model_inference/sparse_binary/convert_protobin.sh (100%) rename paddle/{ => legacy}/capi/examples/model_inference/sparse_binary/main.c (100%) rename paddle/{ => legacy}/capi/examples/model_inference/sparse_binary/trainer_config.py (100%) rename paddle/{ => legacy}/capi/gradient_machine.cpp (100%) rename paddle/{ => legacy}/capi/gradient_machine.h (100%) rename paddle/{ => legacy}/capi/main.h (100%) rename paddle/{ => legacy}/capi/matrix.h (100%) rename paddle/{ => legacy}/capi/paddle_capi.map (100%) rename paddle/{ => legacy}/capi/tests/.gitignore (100%) rename paddle/{ => legacy}/capi/tests/CMakeLists.txt (100%) rename paddle/{ => legacy}/capi/tests/test_Arguments.cpp (100%) rename paddle/{ => legacy}/capi/tests/test_GradientMachine.cpp (100%) rename paddle/{ => legacy}/capi/tests/test_Matrix.cpp (100%) rename paddle/{ => legacy}/capi/tests/test_Vector.cpp (100%) rename paddle/{ => legacy}/capi/tests/test_predict_network.py (100%) rename paddle/{ => legacy}/capi/vector.h (100%) diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index efa59fc4a5..7a4bd9183a 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -7,18 +7,18 @@ if(NOT WITH_FLUID_ONLY) add_subdirectory(legacy/parameter) if(MOBILE_INFERENCE) - add_subdirectory(capi) + add_subdirectory(legacy/capi) else() add_subdirectory(legacy/pserver) add_subdirectory(trainer) add_subdirectory(scripts) if(WITH_C_API) - add_subdirectory(capi) + add_subdirectory(legacy/capi) endif() if(WITH_SWIG_PY) - add_subdirectory(api) + add_subdirectory(legacy/api) endif() endif() endif() diff --git a/paddle/api/Arguments.cpp b/paddle/legacy/api/Arguments.cpp similarity index 100% rename from paddle/api/Arguments.cpp rename to paddle/legacy/api/Arguments.cpp diff --git a/paddle/api/CMakeLists.txt b/paddle/legacy/api/CMakeLists.txt similarity index 100% rename from paddle/api/CMakeLists.txt rename to paddle/legacy/api/CMakeLists.txt diff --git a/paddle/api/ConfigParser.cpp b/paddle/legacy/api/ConfigParser.cpp similarity index 100% rename from paddle/api/ConfigParser.cpp rename to paddle/legacy/api/ConfigParser.cpp diff --git a/paddle/api/Evaluator.cpp b/paddle/legacy/api/Evaluator.cpp similarity index 100% rename from paddle/api/Evaluator.cpp rename to paddle/legacy/api/Evaluator.cpp diff --git a/paddle/api/GradientMachine.cpp b/paddle/legacy/api/GradientMachine.cpp similarity index 100% rename from paddle/api/GradientMachine.cpp rename to paddle/legacy/api/GradientMachine.cpp diff --git a/paddle/api/Internal.h b/paddle/legacy/api/Internal.h similarity index 100% rename from paddle/api/Internal.h rename to paddle/legacy/api/Internal.h diff --git a/paddle/api/Matrix.cpp b/paddle/legacy/api/Matrix.cpp similarity index 100% rename from paddle/api/Matrix.cpp rename to paddle/legacy/api/Matrix.cpp diff --git a/paddle/api/Paddle.i b/paddle/legacy/api/Paddle.i similarity index 100% rename from paddle/api/Paddle.i rename to paddle/legacy/api/Paddle.i diff --git a/paddle/api/PaddleAPI.h b/paddle/legacy/api/PaddleAPI.h similarity index 100% rename from paddle/api/PaddleAPI.h rename to paddle/legacy/api/PaddleAPI.h diff --git a/paddle/api/PaddleAPIPrivate.h b/paddle/legacy/api/PaddleAPIPrivate.h similarity index 100% rename from paddle/api/PaddleAPIPrivate.h rename to paddle/legacy/api/PaddleAPIPrivate.h diff --git a/paddle/api/Parameter.cpp b/paddle/legacy/api/Parameter.cpp similarity index 100% rename from paddle/api/Parameter.cpp rename to paddle/legacy/api/Parameter.cpp diff --git a/paddle/api/ParameterOptimizer.cpp b/paddle/legacy/api/ParameterOptimizer.cpp similarity index 100% rename from paddle/api/ParameterOptimizer.cpp rename to paddle/legacy/api/ParameterOptimizer.cpp diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/legacy/api/ParameterUpdater.cpp similarity index 100% rename from paddle/api/ParameterUpdater.cpp rename to paddle/legacy/api/ParameterUpdater.cpp diff --git a/paddle/api/SequenceGenerator.cpp b/paddle/legacy/api/SequenceGenerator.cpp similarity index 100% rename from paddle/api/SequenceGenerator.cpp rename to paddle/legacy/api/SequenceGenerator.cpp diff --git a/paddle/api/Trainer.cpp b/paddle/legacy/api/Trainer.cpp similarity index 100% rename from paddle/api/Trainer.cpp rename to paddle/legacy/api/Trainer.cpp diff --git a/paddle/api/Util.cpp b/paddle/legacy/api/Util.cpp similarity index 100% rename from paddle/api/Util.cpp rename to paddle/legacy/api/Util.cpp diff --git a/paddle/api/Vector.cpp b/paddle/legacy/api/Vector.cpp similarity index 100% rename from paddle/api/Vector.cpp rename to paddle/legacy/api/Vector.cpp diff --git a/paddle/api/__init__.py b/paddle/legacy/api/__init__.py similarity index 100% rename from paddle/api/__init__.py rename to paddle/legacy/api/__init__.py diff --git a/paddle/api/numpy.i b/paddle/legacy/api/numpy.i similarity index 100% rename from paddle/api/numpy.i rename to paddle/legacy/api/numpy.i diff --git a/paddle/api/test/.gitignore b/paddle/legacy/api/test/.gitignore similarity index 100% rename from paddle/api/test/.gitignore rename to paddle/legacy/api/test/.gitignore diff --git a/paddle/api/test/CMakeLists.txt b/paddle/legacy/api/test/CMakeLists.txt similarity index 100% rename from paddle/api/test/CMakeLists.txt rename to paddle/legacy/api/test/CMakeLists.txt diff --git a/paddle/api/test/testArguments.py b/paddle/legacy/api/test/testArguments.py similarity index 100% rename from paddle/api/test/testArguments.py rename to paddle/legacy/api/test/testArguments.py diff --git a/paddle/api/test/testGradientMachine.py b/paddle/legacy/api/test/testGradientMachine.py similarity index 100% rename from paddle/api/test/testGradientMachine.py rename to paddle/legacy/api/test/testGradientMachine.py diff --git a/paddle/api/test/testMatrix.py b/paddle/legacy/api/test/testMatrix.py similarity index 100% rename from paddle/api/test/testMatrix.py rename to paddle/legacy/api/test/testMatrix.py diff --git a/paddle/api/test/testTrain.py b/paddle/legacy/api/test/testTrain.py similarity index 100% rename from paddle/api/test/testTrain.py rename to paddle/legacy/api/test/testTrain.py diff --git a/paddle/api/test/testTrainConfig.py b/paddle/legacy/api/test/testTrainConfig.py similarity index 100% rename from paddle/api/test/testTrainConfig.py rename to paddle/legacy/api/test/testTrainConfig.py diff --git a/paddle/api/test/testTrainer.py b/paddle/legacy/api/test/testTrainer.py similarity index 100% rename from paddle/api/test/testTrainer.py rename to paddle/legacy/api/test/testTrainer.py diff --git a/paddle/api/test/testVector.py b/paddle/legacy/api/test/testVector.py similarity index 100% rename from paddle/api/test/testVector.py rename to paddle/legacy/api/test/testVector.py diff --git a/paddle/api/test/util.py b/paddle/legacy/api/test/util.py similarity index 100% rename from paddle/api/test/util.py rename to paddle/legacy/api/test/util.py diff --git a/paddle/capi/Arguments.cpp b/paddle/legacy/capi/Arguments.cpp similarity index 100% rename from paddle/capi/Arguments.cpp rename to paddle/legacy/capi/Arguments.cpp diff --git a/paddle/capi/CMakeLists.txt b/paddle/legacy/capi/CMakeLists.txt similarity index 100% rename from paddle/capi/CMakeLists.txt rename to paddle/legacy/capi/CMakeLists.txt diff --git a/paddle/capi/Main.cpp b/paddle/legacy/capi/Main.cpp similarity index 100% rename from paddle/capi/Main.cpp rename to paddle/legacy/capi/Main.cpp diff --git a/paddle/capi/Matrix.cpp b/paddle/legacy/capi/Matrix.cpp similarity index 100% rename from paddle/capi/Matrix.cpp rename to paddle/legacy/capi/Matrix.cpp diff --git a/paddle/capi/Vector.cpp b/paddle/legacy/capi/Vector.cpp similarity index 100% rename from paddle/capi/Vector.cpp rename to paddle/legacy/capi/Vector.cpp diff --git a/paddle/capi/arguments.h b/paddle/legacy/capi/arguments.h similarity index 100% rename from paddle/capi/arguments.h rename to paddle/legacy/capi/arguments.h diff --git a/paddle/capi/capi.h b/paddle/legacy/capi/capi.h similarity index 100% rename from paddle/capi/capi.h rename to paddle/legacy/capi/capi.h diff --git a/paddle/capi/capi_private.h b/paddle/legacy/capi/capi_private.h similarity index 100% rename from paddle/capi/capi_private.h rename to paddle/legacy/capi/capi_private.h diff --git a/paddle/capi/config.h.in b/paddle/legacy/capi/config.h.in similarity index 100% rename from paddle/capi/config.h.in rename to paddle/legacy/capi/config.h.in diff --git a/paddle/capi/error.cpp b/paddle/legacy/capi/error.cpp similarity index 100% rename from paddle/capi/error.cpp rename to paddle/legacy/capi/error.cpp diff --git a/paddle/capi/error.h b/paddle/legacy/capi/error.h similarity index 100% rename from paddle/capi/error.h rename to paddle/legacy/capi/error.h diff --git a/paddle/capi/examples/.gitignore b/paddle/legacy/capi/examples/.gitignore similarity index 100% rename from paddle/capi/examples/.gitignore rename to paddle/legacy/capi/examples/.gitignore diff --git a/paddle/capi/examples/README.md b/paddle/legacy/capi/examples/README.md similarity index 100% rename from paddle/capi/examples/README.md rename to paddle/legacy/capi/examples/README.md diff --git a/paddle/capi/examples/model_inference/README.md b/paddle/legacy/capi/examples/model_inference/README.md similarity index 100% rename from paddle/capi/examples/model_inference/README.md rename to paddle/legacy/capi/examples/model_inference/README.md diff --git a/paddle/capi/examples/model_inference/common/common.h b/paddle/legacy/capi/examples/model_inference/common/common.h similarity index 100% rename from paddle/capi/examples/model_inference/common/common.h rename to paddle/legacy/capi/examples/model_inference/common/common.h diff --git a/paddle/capi/examples/model_inference/dense/CMakeLists.txt b/paddle/legacy/capi/examples/model_inference/dense/CMakeLists.txt similarity index 100% rename from paddle/capi/examples/model_inference/dense/CMakeLists.txt rename to paddle/legacy/capi/examples/model_inference/dense/CMakeLists.txt diff --git a/paddle/capi/examples/model_inference/dense/convert_protobin.sh b/paddle/legacy/capi/examples/model_inference/dense/convert_protobin.sh similarity index 100% rename from paddle/capi/examples/model_inference/dense/convert_protobin.sh rename to paddle/legacy/capi/examples/model_inference/dense/convert_protobin.sh diff --git a/paddle/capi/examples/model_inference/dense/main.c b/paddle/legacy/capi/examples/model_inference/dense/main.c similarity index 100% rename from paddle/capi/examples/model_inference/dense/main.c rename to paddle/legacy/capi/examples/model_inference/dense/main.c diff --git a/paddle/capi/examples/model_inference/dense/merge_v2_model.py b/paddle/legacy/capi/examples/model_inference/dense/merge_v2_model.py similarity index 100% rename from paddle/capi/examples/model_inference/dense/merge_v2_model.py rename to paddle/legacy/capi/examples/model_inference/dense/merge_v2_model.py diff --git a/paddle/capi/examples/model_inference/dense/mnist_v2.py b/paddle/legacy/capi/examples/model_inference/dense/mnist_v2.py similarity index 100% rename from paddle/capi/examples/model_inference/dense/mnist_v2.py rename to paddle/legacy/capi/examples/model_inference/dense/mnist_v2.py diff --git a/paddle/capi/examples/model_inference/dense/trainer_config.py b/paddle/legacy/capi/examples/model_inference/dense/trainer_config.py similarity index 100% rename from paddle/capi/examples/model_inference/dense/trainer_config.py rename to paddle/legacy/capi/examples/model_inference/dense/trainer_config.py diff --git a/paddle/capi/examples/model_inference/multi_thread/.gitignore b/paddle/legacy/capi/examples/model_inference/multi_thread/.gitignore similarity index 100% rename from paddle/capi/examples/model_inference/multi_thread/.gitignore rename to paddle/legacy/capi/examples/model_inference/multi_thread/.gitignore diff --git a/paddle/capi/examples/model_inference/multi_thread/CMakeLists.txt b/paddle/legacy/capi/examples/model_inference/multi_thread/CMakeLists.txt similarity index 100% rename from paddle/capi/examples/model_inference/multi_thread/CMakeLists.txt rename to paddle/legacy/capi/examples/model_inference/multi_thread/CMakeLists.txt diff --git a/paddle/capi/examples/model_inference/multi_thread/convert_protobin.sh b/paddle/legacy/capi/examples/model_inference/multi_thread/convert_protobin.sh similarity index 100% rename from paddle/capi/examples/model_inference/multi_thread/convert_protobin.sh rename to paddle/legacy/capi/examples/model_inference/multi_thread/convert_protobin.sh diff --git a/paddle/capi/examples/model_inference/multi_thread/main.c b/paddle/legacy/capi/examples/model_inference/multi_thread/main.c similarity index 100% rename from paddle/capi/examples/model_inference/multi_thread/main.c rename to paddle/legacy/capi/examples/model_inference/multi_thread/main.c diff --git a/paddle/capi/examples/model_inference/multi_thread/main_gpu.c b/paddle/legacy/capi/examples/model_inference/multi_thread/main_gpu.c similarity index 100% rename from paddle/capi/examples/model_inference/multi_thread/main_gpu.c rename to paddle/legacy/capi/examples/model_inference/multi_thread/main_gpu.c diff --git a/paddle/capi/examples/model_inference/multi_thread/trainer_config.py b/paddle/legacy/capi/examples/model_inference/multi_thread/trainer_config.py similarity index 100% rename from paddle/capi/examples/model_inference/multi_thread/trainer_config.py rename to paddle/legacy/capi/examples/model_inference/multi_thread/trainer_config.py diff --git a/paddle/capi/examples/model_inference/sequence/.gitignore b/paddle/legacy/capi/examples/model_inference/sequence/.gitignore similarity index 100% rename from paddle/capi/examples/model_inference/sequence/.gitignore rename to paddle/legacy/capi/examples/model_inference/sequence/.gitignore diff --git a/paddle/capi/examples/model_inference/sequence/CMakeLists.txt b/paddle/legacy/capi/examples/model_inference/sequence/CMakeLists.txt similarity index 100% rename from paddle/capi/examples/model_inference/sequence/CMakeLists.txt rename to paddle/legacy/capi/examples/model_inference/sequence/CMakeLists.txt diff --git a/paddle/capi/examples/model_inference/sequence/convert_protobin.sh b/paddle/legacy/capi/examples/model_inference/sequence/convert_protobin.sh similarity index 100% rename from paddle/capi/examples/model_inference/sequence/convert_protobin.sh rename to paddle/legacy/capi/examples/model_inference/sequence/convert_protobin.sh diff --git a/paddle/capi/examples/model_inference/sequence/main.c b/paddle/legacy/capi/examples/model_inference/sequence/main.c similarity index 100% rename from paddle/capi/examples/model_inference/sequence/main.c rename to paddle/legacy/capi/examples/model_inference/sequence/main.c diff --git a/paddle/capi/examples/model_inference/sequence/trainer_config.py b/paddle/legacy/capi/examples/model_inference/sequence/trainer_config.py similarity index 100% rename from paddle/capi/examples/model_inference/sequence/trainer_config.py rename to paddle/legacy/capi/examples/model_inference/sequence/trainer_config.py diff --git a/paddle/capi/examples/model_inference/sparse_binary/.gitignore b/paddle/legacy/capi/examples/model_inference/sparse_binary/.gitignore similarity index 100% rename from paddle/capi/examples/model_inference/sparse_binary/.gitignore rename to paddle/legacy/capi/examples/model_inference/sparse_binary/.gitignore diff --git a/paddle/capi/examples/model_inference/sparse_binary/CMakeLists.txt b/paddle/legacy/capi/examples/model_inference/sparse_binary/CMakeLists.txt similarity index 100% rename from paddle/capi/examples/model_inference/sparse_binary/CMakeLists.txt rename to paddle/legacy/capi/examples/model_inference/sparse_binary/CMakeLists.txt diff --git a/paddle/capi/examples/model_inference/sparse_binary/convert_protobin.sh b/paddle/legacy/capi/examples/model_inference/sparse_binary/convert_protobin.sh similarity index 100% rename from paddle/capi/examples/model_inference/sparse_binary/convert_protobin.sh rename to paddle/legacy/capi/examples/model_inference/sparse_binary/convert_protobin.sh diff --git a/paddle/capi/examples/model_inference/sparse_binary/main.c b/paddle/legacy/capi/examples/model_inference/sparse_binary/main.c similarity index 100% rename from paddle/capi/examples/model_inference/sparse_binary/main.c rename to paddle/legacy/capi/examples/model_inference/sparse_binary/main.c diff --git a/paddle/capi/examples/model_inference/sparse_binary/trainer_config.py b/paddle/legacy/capi/examples/model_inference/sparse_binary/trainer_config.py similarity index 100% rename from paddle/capi/examples/model_inference/sparse_binary/trainer_config.py rename to paddle/legacy/capi/examples/model_inference/sparse_binary/trainer_config.py diff --git a/paddle/capi/gradient_machine.cpp b/paddle/legacy/capi/gradient_machine.cpp similarity index 100% rename from paddle/capi/gradient_machine.cpp rename to paddle/legacy/capi/gradient_machine.cpp diff --git a/paddle/capi/gradient_machine.h b/paddle/legacy/capi/gradient_machine.h similarity index 100% rename from paddle/capi/gradient_machine.h rename to paddle/legacy/capi/gradient_machine.h diff --git a/paddle/capi/main.h b/paddle/legacy/capi/main.h similarity index 100% rename from paddle/capi/main.h rename to paddle/legacy/capi/main.h diff --git a/paddle/capi/matrix.h b/paddle/legacy/capi/matrix.h similarity index 100% rename from paddle/capi/matrix.h rename to paddle/legacy/capi/matrix.h diff --git a/paddle/capi/paddle_capi.map b/paddle/legacy/capi/paddle_capi.map similarity index 100% rename from paddle/capi/paddle_capi.map rename to paddle/legacy/capi/paddle_capi.map diff --git a/paddle/capi/tests/.gitignore b/paddle/legacy/capi/tests/.gitignore similarity index 100% rename from paddle/capi/tests/.gitignore rename to paddle/legacy/capi/tests/.gitignore diff --git a/paddle/capi/tests/CMakeLists.txt b/paddle/legacy/capi/tests/CMakeLists.txt similarity index 100% rename from paddle/capi/tests/CMakeLists.txt rename to paddle/legacy/capi/tests/CMakeLists.txt diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/legacy/capi/tests/test_Arguments.cpp similarity index 100% rename from paddle/capi/tests/test_Arguments.cpp rename to paddle/legacy/capi/tests/test_Arguments.cpp diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/legacy/capi/tests/test_GradientMachine.cpp similarity index 100% rename from paddle/capi/tests/test_GradientMachine.cpp rename to paddle/legacy/capi/tests/test_GradientMachine.cpp diff --git a/paddle/capi/tests/test_Matrix.cpp b/paddle/legacy/capi/tests/test_Matrix.cpp similarity index 100% rename from paddle/capi/tests/test_Matrix.cpp rename to paddle/legacy/capi/tests/test_Matrix.cpp diff --git a/paddle/capi/tests/test_Vector.cpp b/paddle/legacy/capi/tests/test_Vector.cpp similarity index 100% rename from paddle/capi/tests/test_Vector.cpp rename to paddle/legacy/capi/tests/test_Vector.cpp diff --git a/paddle/capi/tests/test_predict_network.py b/paddle/legacy/capi/tests/test_predict_network.py similarity index 100% rename from paddle/capi/tests/test_predict_network.py rename to paddle/legacy/capi/tests/test_predict_network.py diff --git a/paddle/capi/vector.h b/paddle/legacy/capi/vector.h similarity index 100% rename from paddle/capi/vector.h rename to paddle/legacy/capi/vector.h From 96c6e55169154466d8569695d47f4f5a985134c3 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Sun, 1 Jul 2018 18:27:51 +0800 Subject: [PATCH 12/34] fix --- paddle/legacy/api/Paddle.i | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/legacy/api/Paddle.i b/paddle/legacy/api/Paddle.i index 3237e73745..e6165fb106 100644 --- a/paddle/legacy/api/Paddle.i +++ b/paddle/legacy/api/Paddle.i @@ -2,7 +2,7 @@ %include "std_string.i" %{ #define SWIG_FILE_WITH_INIT -#include "api/PaddleAPI.h" +#include "legacy/api/PaddleAPI.h" %} %include "exception.i" @@ -199,4 +199,4 @@ namespace std { %ignore OptimizationConfigPrivate; %ignore ParameterTraverseCallbackPrivate; %include "utils/GlobalConstants.h" -%include "api/PaddleAPI.h" +%include "legacy/api/PaddleAPI.h" From ff4317cee9bbec749fa41e2fdcfbe84cefbbba2b Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 2 Jul 2018 11:21:37 +0800 Subject: [PATCH 13/34] follow comments --- paddle/fluid/framework/details/build_strategy.h | 2 ++ paddle/fluid/framework/details/data_balance_op_handle.cc | 8 ++++++-- .../framework/details/multi_devices_graph_builder.cc | 2 +- paddle/fluid/framework/details/op_handle_base.cc | 1 + paddle/fluid/pybind/pybind.cc | 6 +++++- python/paddle/fluid/tests/unittests/.gitignore | 2 ++ 6 files changed, 17 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/framework/details/build_strategy.h b/paddle/fluid/framework/details/build_strategy.h index 64e83acb4d..9c2c845c6e 100644 --- a/paddle/fluid/framework/details/build_strategy.h +++ b/paddle/fluid/framework/details/build_strategy.h @@ -33,6 +33,8 @@ struct BuildStrategy { GradientScaleStrategy gradient_scale_{GradientScaleStrategy::kCoeffNumDevice}; std::string debug_graphviz_path_{""}; + + bool enable_data_balance_{true}; }; } // namespace details diff --git a/paddle/fluid/framework/details/data_balance_op_handle.cc b/paddle/fluid/framework/details/data_balance_op_handle.cc index f8d431ef2a..b914851fe0 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.cc +++ b/paddle/fluid/framework/details/data_balance_op_handle.cc @@ -73,7 +73,9 @@ std::vector> DataBalanceOpHandle::GetBalancePlan( for (int dst_idx = device_num - empty_num; dst_idx < device_num; ++dst_idx) { if (size_device_vec[src_idx][0] <= expected_device_size) { ++src_idx; - PADDLE_ENFORCE_LT(src_idx, device_num - empty_num); + PADDLE_ENFORCE_LT( + src_idx, device_num - empty_num, + "In current srategy an empty tensor should not be copy source."); } size_device_vec[src_idx][0] -= expected_device_size; size_device_vec[dst_idx][0] += expected_device_size; @@ -113,7 +115,9 @@ void DataBalanceOpHandle::RunImpl() { if (data_idx == 0) { device_sizes.emplace_back(ins_size); } else { - PADDLE_ENFORCE_EQ(ins_size, device_sizes.at(place_idx)); + PADDLE_ENFORCE_EQ( + ins_size, device_sizes.at(place_idx), + "All data on the same device shall have the same batch size."); } } const auto &balance_plan = GetBalancePlan(device_sizes); diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index edfefb8231..46d0c2769c 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -216,7 +216,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( } else { // This op runs on all devices, and its output may have parameter's // gradients. - if (op->Type() == "read") { + if (op->Type() == "read" && strategy_.enable_data_balance_) { op->SetAttr("throw_eof_exp", false); CreateComputationalOps(&result, *op, places_.size()); const auto &data_var_names = op->Output("Out"); diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index 856124875d..3560fabb42 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -58,6 +58,7 @@ void OpHandleBase::Run(bool use_cuda) { void OpHandleBase::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) { #ifdef PADDLE_WITH_CUDA + PADDLE_ENFORCE_NOT_NULL(waited_ctx); if (platform::is_cpu_place(waited_ctx->GetPlace()) || events_.empty()) { for (auto &dev_ctx : dev_ctxes_) { PADDLE_ENFORCE_NOT_NULL(dev_ctx.second); diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 36d0809968..9fc647a7d2 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -643,7 +643,11 @@ All parameter, weight, gradient are variables in Paddle. [](const BuildStrategy &self) { return self.debug_graphviz_path_; }, [](BuildStrategy &self, const std::string &path) { self.debug_graphviz_path_ = path; - }); + }) + .def_property( + "enable_data_balance", + [](const BuildStrategy &self) { return self.enable_data_balance_; }, + [](BuildStrategy &self, bool b) { self.enable_data_balance_ = b; }); pe.def(py::init &, const std::unordered_set &, diff --git a/python/paddle/fluid/tests/unittests/.gitignore b/python/paddle/fluid/tests/unittests/.gitignore index 3538a9c200..b1e8fda03a 100644 --- a/python/paddle/fluid/tests/unittests/.gitignore +++ b/python/paddle/fluid/tests/unittests/.gitignore @@ -4,3 +4,5 @@ mnist_1.recordio mnist_2.recordio flowers.recordio wmt16.recordio +data_balance_test.recordio +data_balance_with_lod_test.recordio From 37410a0c752cc87751bad8810971fa049f101d74 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Mon, 2 Jul 2018 14:46:09 +0800 Subject: [PATCH 14/34] update by comment --- paddle/fluid/framework/executor.cc | 16 ++++++---------- .../fluid/operators/distributed/grpc_client.cc | 2 ++ paddle/fluid/operators/distributed/rpc_server.cc | 4 ++-- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 3928750138..84f67fafa1 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -47,19 +47,15 @@ Executor::Executor(const platform::Place& place) : place_(place) {} #ifdef PADDLE_WITH_DISTRIBUTE void Executor::BeginPass() { - auto client = ::paddle::operators::distributed::RPCClient::GetInstance< - ::paddle::operators::distributed::GRPCClient>(); - - client->SendBeginPass(); - client->Wait(); + ::paddle::operators::distributed::RPCClient::GetInstance< + ::paddle::operators::distributed::GRPCClient>() + ->SendBeginPass(); } void Executor::EndPass() { - auto client = ::paddle::operators::distributed::RPCClient::GetInstance< - ::paddle::operators::distributed::GRPCClient>(); - - client->SendEndPass(); - client->Wait(); + ::paddle::operators::distributed::RPCClient::GetInstance< + ::paddle::operators::distributed::GRPCClient>() + ->SendEndPass(); } #endif diff --git a/paddle/fluid/operators/distributed/grpc_client.cc b/paddle/fluid/operators/distributed/grpc_client.cc index 5d2e368879..4a09f3870d 100644 --- a/paddle/fluid/operators/distributed/grpc_client.cc +++ b/paddle/fluid/operators/distributed/grpc_client.cc @@ -40,6 +40,7 @@ void GRPCClient::SendBeginPass() { VLOG(3) << "send begin pass to: " << it.first; this->AsyncSendBeginPass(it.first); } + this->Wait(); } void GRPCClient::SendEndPass() { @@ -47,6 +48,7 @@ void GRPCClient::SendEndPass() { VLOG(3) << "send end pass to " << it.first; this->AsyncSendEndPass(it.first); } + this->Wait(); } GRPCClient::~GRPCClient() { diff --git a/paddle/fluid/operators/distributed/rpc_server.cc b/paddle/fluid/operators/distributed/rpc_server.cc index 5f4c134837..d49ee34eea 100644 --- a/paddle/fluid/operators/distributed/rpc_server.cc +++ b/paddle/fluid/operators/distributed/rpc_server.cc @@ -67,7 +67,7 @@ void RPCServer::IncreaseBatchBarrier(const std::string rpc_name) { void RPCServer::BeginPass() { VLOG(4) << "RPCServer begin increase pass barrier"; { - std::unique_lock locl(mutex_); + std::unique_lock lock(mutex_); client_num_++; VLOG(4) << "increase client_num to: " << client_num_; } @@ -77,7 +77,7 @@ void RPCServer::BeginPass() { void RPCServer::EndPass() { VLOG(4) << "RPCServer begin increase pass barrier"; { - std::unique_lock locl(mutex_); + std::unique_lock lock(mutex_); client_num_--; VLOG(4) << "decrease client_num to: " << client_num_; if (cur_cond_.load() == rpc_cond_map_[kRequestGet]) { From 1ce478f100efe6e35e164e294bf8e6682b360fdd Mon Sep 17 00:00:00 2001 From: yuyang18 Date: Mon, 2 Jul 2018 16:13:52 +0800 Subject: [PATCH 15/34] Polish reshape op --- paddle/fluid/framework/op_registry.h | 84 ++++++++++++++++--- paddle/fluid/operators/reshape_op.cc | 74 ++++++++++++++-- .../{reshape_op.cu => reshape_op.cu.cc} | 18 ++-- paddle/fluid/operators/reshape_op.h | 71 +++------------- 4 files changed, 157 insertions(+), 90 deletions(-) rename paddle/fluid/operators/{reshape_op.cu => reshape_op.cu.cc} (51%) diff --git a/paddle/fluid/framework/op_registry.h b/paddle/fluid/framework/op_registry.h index 43ab227a94..f0278cc49c 100644 --- a/paddle/fluid/framework/op_registry.h +++ b/paddle/fluid/framework/op_registry.h @@ -76,6 +76,19 @@ class OpRegistry { template struct OpKernelRegistrarFunctor; +template +inline void RegisterKernelClass(const char* op_type, const char* library_type) { + std::string library(library_type); + std::string data_layout = "ANYLAYOUT"; + if (library == "MKLDNN") { + data_layout = "MKLDNNLAYOUT"; + } + OpKernelType key(ToDataType(std::type_index(typeid(T))), PlaceType(), + StringToDataLayout(data_layout), + StringToLibraryType(library_type)); + OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KernelType()); +} + template struct OpKernelRegistrarFunctor { using KERNEL_TYPE = @@ -83,16 +96,7 @@ struct OpKernelRegistrarFunctor { void operator()(const char* op_type, const char* library_type) const { using T = typename KERNEL_TYPE::ELEMENT_TYPE; - std::string library(library_type); - std::string data_layout = "ANYLAYOUT"; - if (library == "MKLDNN") { - data_layout = "MKLDNNLAYOUT"; - } - OpKernelType key(ToDataType(std::type_index(typeid(T))), PlaceType(), - StringToDataLayout(data_layout), - StringToLibraryType(library_type)); - OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KERNEL_TYPE); - + RegisterKernelClass(op_type, library_type); constexpr auto size = std::tuple_size>::value; OpKernelRegistrarFunctor func; @@ -116,6 +120,47 @@ class OpKernelRegistrar : public Registrar { } }; +template +struct OpKernelRegistrarFunctorEx; + +template +class OpKernelRegistrarEx : public Registrar { + public: + explicit OpKernelRegistrarEx(const char* op_type, const char* library_type) { + OpKernelRegistrarFunctorEx + func; + func(op_type, library_type); + } +}; + +template +struct OpKernelRegistrarFunctorEx { + void operator()(const char* op_type, const char* library_type) const {} +}; + +template +struct OpKernelRegistrarFunctorEx { + using KERNEL_TYPE = + typename std::tuple_element>::type; + using T = + typename std::tuple_element>::type; + + void operator()(const char* op_type, const char* library_type) const { + RegisterKernelClass(op_type, library_type); + + constexpr auto size = + std::tuple_size>::value; + OpKernelRegistrarFunctorEx= size, I + 2, + DataTypeAndKernelType...> + func; + func(op_type, library_type); + } +}; + /** * check if MACRO is used in GLOBAL NAMESPACE. */ @@ -174,6 +219,25 @@ class OpKernelRegistrar : public Registrar { #define REGISTER_OP_CPU_KERNEL(op_type, ...) \ REGISTER_OP_KERNEL(op_type, CPU, ::paddle::platform::CPUPlace, __VA_ARGS__) +#define REGISTER_OP_KERNEL_EX(op_type, library_type, place_class, ...) \ + STATIC_ASSERT_GLOBAL_NAMESPACE( \ + __reg_op_kernel_##op_type##_##library_type##__, \ + "REGISTER_OP_KERNEL_EX must be called in global namespace"); \ + static ::paddle::framework::OpKernelRegistrarEx \ + __op_kernel_registrar_##op_type##_##library_type##__(#op_type, \ + #library_type); \ + int TouchOpKernelRegistrar_##op_type##_##library_type() { \ + __op_kernel_registrar_##op_type##_##library_type##__.Touch(); \ + return 0; \ + } + +#define REGISTER_OP_CUDA_KERNEL_EX(op_type, ...) \ + REGISTER_OP_KERNEL_EX(p_type, CUDA, ::paddle::platform::CUDAPlace, \ + __VA_ARGS__) + +#define REGISTER_OP_CPU_KERNEL_EX(op_type, ...) \ + REGISTER_OP_KERNEL_EX(op_type, CPU, ::paddle::platform::CPUPlace, __VA_ARGS__) + /** * Macro to mark what Operator and Kernel * we will use and tell the compiler to diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 7f743f577f..ed07e6c2f7 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -107,19 +107,75 @@ class ReshapeGradOp : public framework::OperatorWithKernel { } }; +void ReshapeKernel::Compute(const framework::ExecutionContext &ctx) const { + auto *out = ctx.Output("Out"); + auto *in = ctx.Input("X"); + + auto *shape_tensor = ctx.HasInput("Shape") + ? ctx.Input("Shape") + : nullptr; + + framework::DDim out_dims = out->dims(); + + if (shape_tensor) { + auto *shape_data = shape_tensor->data(); + framework::Tensor cpu_shape_tensor; + if (platform::is_gpu_place(ctx.GetPlace())) { + TensorCopySync(*shape_tensor, platform::CPUPlace(), &cpu_shape_tensor); + shape_data = cpu_shape_tensor.data(); + } + auto shape = + std::vector(shape_data, shape_data + shape_tensor->numel()); + out_dims = ReshapeOp::ValidateShape(shape, in->dims()); + } + if (!in->lod().empty()) { + PADDLE_ENFORCE_EQ(out_dims[0], in->dims()[0], + "Reshape operator cannot reshape an input sequence batch " + "into an output sequence batch that has a different " + "number of time steps. Please consider using " + "sequence_reshape op."); + } + + bool inplace = ctx.Attr("inplace"); + out->Resize(out_dims); + if (!inplace) { + out->mutable_data(ctx.GetPlace(), in->type()); + framework::TensorCopySync(*in, ctx.GetPlace(), out); + out->Resize(out_dims); + } else { + out->ShareDataWith(*in); + out->Resize(out_dims); + } +} +void ReshapeGradKernelBase::Compute( + const framework::ExecutionContext &ctx) const { + auto *d_out = ctx.Input(framework::GradVarName("Out")); + auto *d_x = ctx.Output(framework::GradVarName("X")); + + d_x->mutable_data(ctx.GetPlace(), d_out->type()); + bool inplace = ctx.Attr("inplace"); + + auto in_dims = d_x->dims(); + if (!inplace) { + framework::TensorCopy(*d_out, ctx.GetPlace(), ctx.device_context(), d_x); + ctx.device_context().Wait(); + d_x->Resize(in_dims); + } else { + d_x->ShareDataWith(*d_out); + d_x->Resize(in_dims); + } +} } // namespace operators } // namespace paddle namespace ops = paddle::operators; -using CPU = paddle::platform::CPUDeviceContext; REGISTER_OPERATOR(reshape, ops::ReshapeOp, ops::ReshapeOpMaker, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(reshape_grad, ops::ReshapeGradOp); -REGISTER_OP_CPU_KERNEL(reshape, ops::ReshapeKernel, - ops::ReshapeKernel, - ops::ReshapeKernel, - ops::ReshapeKernel); -REGISTER_OP_CPU_KERNEL(reshape_grad, ops::ReshapeGradKernel, - ops::ReshapeGradKernel, - ops::ReshapeGradKernel, - ops::ReshapeGradKernel); +REGISTER_OP_CPU_KERNEL_EX(reshape, float, ops::ReshapeKernel, double, + ops::ReshapeKernel, int, ops::ReshapeKernel, int64_t, + ops::ReshapeKernel); +REGISTER_OP_CPU_KERNEL(reshape_grad, ops::ReshapeGradKernel, + ops::ReshapeGradKernel, + ops::ReshapeGradKernel, + ops::ReshapeGradKernel); diff --git a/paddle/fluid/operators/reshape_op.cu b/paddle/fluid/operators/reshape_op.cu.cc similarity index 51% rename from paddle/fluid/operators/reshape_op.cu rename to paddle/fluid/operators/reshape_op.cu.cc index c628c634e2..8a09321eef 100644 --- a/paddle/fluid/operators/reshape_op.cu +++ b/paddle/fluid/operators/reshape_op.cu.cc @@ -13,14 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/reshape_op.h" -using CUDA = paddle::platform::CUDADeviceContext; - -REGISTER_OP_CUDA_KERNEL(reshape, paddle::operators::ReshapeKernel, - paddle::operators::ReshapeKernel, - paddle::operators::ReshapeKernel, - paddle::operators::ReshapeKernel); +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL_EX(reshape, float, ops::ReshapeKernel, double, + ops::ReshapeKernel, int, ops::ReshapeKernel, int64_t, + ops::ReshapeKernel); REGISTER_OP_CUDA_KERNEL(reshape_grad, - paddle::operators::ReshapeGradKernel, - paddle::operators::ReshapeGradKernel, - paddle::operators::ReshapeGradKernel, - paddle::operators::ReshapeGradKernel); + paddle::operators::ReshapeGradKernel, + paddle::operators::ReshapeGradKernel, + paddle::operators::ReshapeGradKernel, + paddle::operators::ReshapeGradKernel); diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h index 3dd8c7c11e..c0b57d11d3 100644 --- a/paddle/fluid/operators/reshape_op.h +++ b/paddle/fluid/operators/reshape_op.h @@ -118,72 +118,21 @@ class ReshapeOp : public framework::OperatorWithKernel { } }; -template -class ReshapeKernel : public framework::OpKernel { +class ReshapeKernel : public framework::OpKernelBase { public: - void Compute(const framework::ExecutionContext &ctx) const { - auto *out = ctx.Output("Out"); - auto *in = ctx.Input("X"); - - auto *shape_tensor = ctx.HasInput("Shape") - ? ctx.Input("Shape") - : nullptr; - - framework::DDim out_dims = out->dims(); - - if (shape_tensor) { - auto *shape_data = shape_tensor->data(); - framework::Tensor cpu_shape_tensor; - if (platform::is_gpu_place(ctx.GetPlace())) { - TensorCopySync(*shape_tensor, platform::CPUPlace(), &cpu_shape_tensor); - shape_data = cpu_shape_tensor.data(); - } - auto shape = - std::vector(shape_data, shape_data + shape_tensor->numel()); - out_dims = ReshapeOp::ValidateShape(shape, in->dims()); - } - if (!in->lod().empty()) { - PADDLE_ENFORCE_EQ( - out_dims[0], in->dims()[0], - "Reshape operator cannot reshape an input sequence batch " - "into an output sequence batch that has a different " - "number of time steps. Please consider using " - "sequence_reshape op."); - } + void Compute(const framework::ExecutionContext &ctx) const final; +}; - bool inplace = ctx.Attr("inplace"); - out->Resize(out_dims); - if (!inplace) { - out->mutable_data(ctx.GetPlace()); - framework::TensorCopySync(*in, ctx.GetPlace(), out); - out->Resize(out_dims); - } else { - out->ShareDataWith(*in); - out->Resize(out_dims); - } - } +class ReshapeGradKernelBase : public framework::OpKernelBase { + public: + void Compute(const framework::ExecutionContext &ctx) const; }; -template -class ReshapeGradKernel : public framework::OpKernel { +template +class ReshapeGradKernel : public ReshapeGradKernelBase { public: - void Compute(const framework::ExecutionContext &ctx) const { - auto *d_out = ctx.Input(framework::GradVarName("Out")); - auto *d_x = ctx.Output(framework::GradVarName("X")); - - d_x->mutable_data(ctx.GetPlace()); - bool inplace = ctx.Attr("inplace"); - - auto in_dims = d_x->dims(); - if (!inplace) { - framework::TensorCopy(*d_out, ctx.GetPlace(), ctx.device_context(), d_x); - ctx.device_context().Wait(); - d_x->Resize(in_dims); - } else { - d_x->ShareDataWith(*d_out); - d_x->Resize(in_dims); - } - } + // Tell register element type. + using ELEMENT_TYPE = T; }; } // namespace operators } // namespace paddle From 3b00ed81a996b8a1b89fe81aa6e4b1a95c65e9e5 Mon Sep 17 00:00:00 2001 From: yuyang18 Date: Mon, 2 Jul 2018 16:19:52 +0800 Subject: [PATCH 16/34] Make Kernel registed as a function --- paddle/fluid/framework/op_registry.h | 5 ++++- paddle/fluid/framework/operator.cc | 2 +- paddle/fluid/framework/operator.h | 4 ++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/framework/op_registry.h b/paddle/fluid/framework/op_registry.h index 43ab227a94..674159b732 100644 --- a/paddle/fluid/framework/op_registry.h +++ b/paddle/fluid/framework/op_registry.h @@ -91,7 +91,10 @@ struct OpKernelRegistrarFunctor { OpKernelType key(ToDataType(std::type_index(typeid(T))), PlaceType(), StringToDataLayout(data_layout), StringToLibraryType(library_type)); - OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KERNEL_TYPE); + OperatorWithKernel::AllOpKernels()[op_type][key] = + [](const framework::ExecutionContext& ctx) { + KERNEL_TYPE().Compute(ctx); + }; constexpr auto size = std::tuple_size>::value; OpKernelRegistrarFunctor diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 71cd5a3908..3cf8e8696d 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -651,7 +651,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope, dev_ctx = pool.Get(expected_kernel_key.place_); } - kernel_iter->second->Compute(ExecutionContext(*this, exec_scope, *dev_ctx)); + kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx)); if (!transfered_inplace_vars.empty()) { // there is inplace variable has been transfered. diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 1550d5df17..01d750efbb 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -347,9 +347,9 @@ class OpKernel : public OpKernelBase { class OperatorWithKernel : public OperatorBase { public: + using OpKernelFunc = std::function; using OpKernelMap = - std::unordered_map, - OpKernelType::Hash>; + std::unordered_map; OperatorWithKernel(const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, const AttributeMap& attrs) From 82866d4a1810f8a1c3a8a9b7e866a133c4fe5c4b Mon Sep 17 00:00:00 2001 From: yuyang18 Date: Mon, 2 Jul 2018 16:54:41 +0800 Subject: [PATCH 17/34] Add register kernel functor and shrink reshape op * Shrink reshape_op library size * User can register a standard C++ functor as a op kernel --- paddle/fluid/framework/op_registry.h | 13 +++++-------- paddle/fluid/operators/reshape_op.cc | 18 +++++++++--------- paddle/fluid/operators/reshape_op.cu.cc | 16 ++++++++-------- paddle/fluid/operators/reshape_op.h | 14 ++++---------- 4 files changed, 26 insertions(+), 35 deletions(-) diff --git a/paddle/fluid/framework/op_registry.h b/paddle/fluid/framework/op_registry.h index 751e150845..3314e41cc5 100644 --- a/paddle/fluid/framework/op_registry.h +++ b/paddle/fluid/framework/op_registry.h @@ -146,7 +146,7 @@ struct OpKernelRegistrarFunctorEx struct OpKernelRegistrarFunctorEx { - using KERNEL_TYPE = + using Functor = typename std::tuple_element>::type; using T = @@ -154,10 +154,7 @@ struct OpKernelRegistrarFunctorEx>::type; void operator()(const char* op_type, const char* library_type) const { - RegisterKernelClass( - op_type, library_type, [](const framework::ExecutionContext& ctx) { - KERNEL_TYPE().Compute(ctx); - }); + RegisterKernelClass(op_type, library_type, Functor()); constexpr auto size = std::tuple_size>::value; @@ -238,11 +235,11 @@ struct OpKernelRegistrarFunctorEx("Out"); auto *in = ctx.Input("X"); @@ -147,7 +147,7 @@ void ReshapeKernel::Compute(const framework::ExecutionContext &ctx) const { out->Resize(out_dims); } } -void ReshapeGradKernelBase::Compute( +void ReshapeGradKernel::operator()( const framework::ExecutionContext &ctx) const { auto *d_out = ctx.Input(framework::GradVarName("Out")); auto *d_x = ctx.Output(framework::GradVarName("X")); @@ -172,10 +172,10 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(reshape, ops::ReshapeOp, ops::ReshapeOpMaker, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(reshape_grad, ops::ReshapeGradOp); -REGISTER_OP_CPU_KERNEL_EX(reshape, float, ops::ReshapeKernel, double, - ops::ReshapeKernel, int, ops::ReshapeKernel, int64_t, - ops::ReshapeKernel); -REGISTER_OP_CPU_KERNEL(reshape_grad, ops::ReshapeGradKernel, - ops::ReshapeGradKernel, - ops::ReshapeGradKernel, - ops::ReshapeGradKernel); +REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double, + ops::ReshapeKernel, int, ops::ReshapeKernel, + int64_t, ops::ReshapeKernel); +REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel, + double, ops::ReshapeGradKernel, int, + ops::ReshapeGradKernel, int64_t, + ops::ReshapeGradKernel); diff --git a/paddle/fluid/operators/reshape_op.cu.cc b/paddle/fluid/operators/reshape_op.cu.cc index 8a09321eef..374b2dbc6a 100644 --- a/paddle/fluid/operators/reshape_op.cu.cc +++ b/paddle/fluid/operators/reshape_op.cu.cc @@ -14,11 +14,11 @@ limitations under the License. */ #include "paddle/fluid/operators/reshape_op.h" namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL_EX(reshape, float, ops::ReshapeKernel, double, - ops::ReshapeKernel, int, ops::ReshapeKernel, int64_t, - ops::ReshapeKernel); -REGISTER_OP_CUDA_KERNEL(reshape_grad, - paddle::operators::ReshapeGradKernel, - paddle::operators::ReshapeGradKernel, - paddle::operators::ReshapeGradKernel, - paddle::operators::ReshapeGradKernel); + +REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double, + ops::ReshapeKernel, int, ops::ReshapeKernel, + int64_t, ops::ReshapeKernel); +REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel, + double, ops::ReshapeGradKernel, int, + ops::ReshapeGradKernel, int64_t, + ops::ReshapeGradKernel); diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h index c0b57d11d3..68e1690a53 100644 --- a/paddle/fluid/operators/reshape_op.h +++ b/paddle/fluid/operators/reshape_op.h @@ -118,21 +118,15 @@ class ReshapeOp : public framework::OperatorWithKernel { } }; -class ReshapeKernel : public framework::OpKernelBase { +class ReshapeKernel { public: - void Compute(const framework::ExecutionContext &ctx) const final; + void operator()(const framework::ExecutionContext &ctx) const; }; -class ReshapeGradKernelBase : public framework::OpKernelBase { +class ReshapeGradKernel { public: - void Compute(const framework::ExecutionContext &ctx) const; + void operator()(const framework::ExecutionContext &ctx) const; }; -template -class ReshapeGradKernel : public ReshapeGradKernelBase { - public: - // Tell register element type. - using ELEMENT_TYPE = T; -}; } // namespace operators } // namespace paddle From 6038a6312034f8914071c6428c7cd62dd3fed594 Mon Sep 17 00:00:00 2001 From: yuyang18 Date: Mon, 2 Jul 2018 17:02:51 +0800 Subject: [PATCH 18/34] Fix fc mkldnn op --- paddle/fluid/operators/fc_mkldnn_op.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/fluid/operators/fc_mkldnn_op.cc b/paddle/fluid/operators/fc_mkldnn_op.cc index 847b7b0c12..99fa659a35 100644 --- a/paddle/fluid/operators/fc_mkldnn_op.cc +++ b/paddle/fluid/operators/fc_mkldnn_op.cc @@ -115,6 +115,7 @@ class MKLDNNMemory { template class FCMKLDNNOpKernel : public paddle::framework::OpKernel { + public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), "It must use CPUPlace."); From adfaf9a6657b677fab796ea223640e1375150fde Mon Sep 17 00:00:00 2001 From: Wu Yi Date: Mon, 2 Jul 2018 17:04:29 +0800 Subject: [PATCH 19/34] make transpiler test reliable (#11848) * make transpiler test reliable * add more * follow comments --- .../tests/unittests/test_dist_transpiler.py | 247 ++++++++++++++++-- .../unittests/test_simple_dist_transpiler.py | 80 ------ .../fluid/tests/unittests/transpiler_test.py | 73 ------ .../fluid/transpiler/distribute_transpiler.py | 19 +- 4 files changed, 235 insertions(+), 184 deletions(-) delete mode 100644 python/paddle/fluid/tests/unittests/test_simple_dist_transpiler.py delete mode 100644 python/paddle/fluid/tests/unittests/transpiler_test.py diff --git a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py index b4379ad447..75b4b4e50d 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py +++ b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py @@ -15,51 +15,248 @@ import unittest import paddle.fluid as fluid from paddle.fluid.transpiler.distribute_transpiler import delete_ops +import traceback -from transpiler_test import TranspilerTest - -class TestDistTranspiler(TranspilerTest): +class TranspilerTest(unittest.TestCase): def setUp(self): - self.current_pserver_ep = "127.0.0.1:6174" + self.trainer_id = 0 + self.trainers = 2 + self.pservers = 2 + # NOTE: we do not actually bind this port + self.pserver_eps = "127.0.0.1:6174,127.0.0.1:6175" + self.pserver1_ep = "127.0.0.1:6174" + self.pserver2_ep = "127.0.0.1:6175" + self.slice_var_up = True + self.sync_mode = True + self.transpiler = None + + def net_conf(self): + x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + y_predict = fluid.layers.fc(input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b')) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1) + sgd_optimizer.minimize(avg_cost) + return + + def get_main_program(self): + main = fluid.Program() + with fluid.program_guard(main): + self.net_conf() + self.origin_prog = main.clone() + return main + + def get_trainer(self): + t = self._transpiler_instance() + return t.get_trainer_program() + + def get_pserver(self, ep): + t = self._transpiler_instance() + pserver = t.get_pserver_program(ep) + startup = t.get_startup_program(ep, pserver) + return pserver, startup + + def _transpiler_instance(self): + if not self.transpiler: + main = self.get_main_program() + self.transpiler = fluid.DistributeTranspiler() + self.transpiler.transpile( + self.trainer_id, + program=main, + pservers=self.pserver_eps, + trainers=self.trainers, + slice_var_up=self.slice_var_up, + sync_mode=self.sync_mode) + return self.transpiler + +class TestBasicModel(TranspilerTest): def test_transpiler(self): + pserver, startup = self.get_pserver(self.pserver1_ep) + pserver2, startup2 = self.get_pserver(self.pserver2_ep) + trainer = self.get_trainer() - pserver, startup = self.get_pserver(self.current_pserver_ep) - self.assertEqual([op.type for op in trainer.global_block().ops], - self.get_expect_trainer_ops()) + + self.assertEqual([op.type for op in trainer.global_block().ops], [ + 'mul', 'elementwise_add', 'elementwise_sub', 'square', 'mean', + 'fill_constant', 'mean_grad', 'square_grad', 'elementwise_sub_grad', + 'elementwise_add_grad', 'send', 'mul_grad', 'split_byref', 'send', + 'send_barrier', 'recv', 'recv', 'fetch_barrier', 'concat' + ]) self.assertEqual(len(pserver.blocks), 3) # block0: listen_and_serv self.assertEqual([op.type for op in pserver.blocks[0].ops], ["listen_and_serv"]) - # block2: optimize pass + # block1~2: optimize pass self.assertEqual([op.type for op in pserver.blocks[1].ops], ["sum", "scale", "sgd"]) - # confirm startup program - - self.assertEqual([op.type for op in startup.global_block().ops], [ - "fill_constant", "fill_constant", "uniform_random", "uniform_random" - ]) - + self.assertEqual([op.type for op in startup.global_block().ops], + ["fill_constant", "fill_constant", "uniform_random"]) # the variable #fc_w will be split into two blocks fc_w_var = startup.global_block().var("fc_w.block1") self.assertEqual(fc_w_var.shape, (500, 1000)) + # all parameters should be optimized on pserver + + pserver_params = [] + for prog in [pserver, pserver2]: + for blk in prog.blocks: + for op in blk.ops: + if "Param" in op.input_names: + param_name = op.input("Param")[0] + is_block_idx = param_name.find(".block") + if is_block_idx != -1: + origin_param_name = param_name[:is_block_idx] + else: + origin_param_name = param_name + pserver_params.append(origin_param_name) + trainer_params = [] + for op in self.origin_prog.global_block().ops: + if "Param" in op.input_names: + trainer_params.append(op.input("Param")[0]) + self.assertEqual(set(pserver_params), set(trainer_params)) + + +class TestNoSliceVar(TranspilerTest): + def setUp(self): + super(TestNoSliceVar, self).setUp() + self.slice_var_up = False + + def test_transpiler(self): + _, startup = self.get_pserver(self.pserver1_ep) + _, startup2 = self.get_pserver(self.pserver2_ep) + + if startup.global_block().vars.has_key("fc_w"): + fc_w_var = startup.global_block().vars["fc_w"] + elif startup2.global_block().vars.has_key("fc_w"): + fc_w_var = startup2.global_block().vars["fc_w"] + + self.assertEqual(fc_w_var.shape, (1000, 1000)) - def get_expect_trainer_ops(self): - trainer = fluid.Program() - with fluid.program_guard(trainer): - optimize_ops, params_grads = self.net_conf() +class TestLRDecay(TranspilerTest): + def net_conf(self): + x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + y_predict = fluid.layers.fc(input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b')) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.exponential_decay( + learning_rate=1.0, + decay_steps=2100, + decay_rate=0.1, + staircase=True)) + sgd_optimizer.minimize(avg_cost) + return + + def test_transpiler(self): + pserver, startup = self.get_pserver(self.pserver1_ep) + trainer = self.get_trainer() + + self.assertEqual(len(pserver.blocks), 4) + lr_decay_ops = [op.type for op in pserver.blocks[1].ops] + self.assertEqual(lr_decay_ops, [ + "increment", "cast", "fill_constant", "elementwise_div", "floor", + "fill_constant", "elementwise_pow", "fill_constant", + "elementwise_mul" + ]) + + +class TestLRDecayConditional(TranspilerTest): + def net_conf(self): + x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + y_predict = fluid.layers.fc(input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b')) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.piecewise_decay([10000, 20000], + [1.0, 0.5, 1.0])) + sgd_optimizer.minimize(avg_cost) + return + + def test_transpiler(self): + pserver, startup = self.get_pserver(self.pserver1_ep) + trainer = self.get_trainer() + + serv_op = pserver.blocks[0].ops[0] + sub_blocks = [] + optimize_blocks = [] + for b in serv_op.attrs["optimize_blocks"]: + optimize_blocks.append(b.idx) + for b in pserver.blocks: + if b.idx not in optimize_blocks: + sub_blocks.append(b.idx) + + self.assertEqual(len(pserver.blocks), 7) + lr_decay_ops = [op.type for op in pserver.blocks[1].ops] + self.assertEqual(lr_decay_ops, [ + "increment", "cast", "fill_constant", "fill_constant", "less_than", + "logical_not", "conditional_block", "fill_constant", + "fill_constant", "less_than", "logical_not", "logical_and", + "logical_and", "conditional_block", "fill_constant", + "conditional_block" + ]) + # test the condition blocks + for b in sub_blocks: + if b == 0: + continue + block = pserver.blocks[b] + self.assertEqual([op.type for op in block.ops], ["assign"]) + + +class TestL2Decay(TranspilerTest): + def net_conf(self): + x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + y_predict = fluid.layers.fc( + input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr( + name='fc_w', + regularizer=fluid.regularizer.L2Decay(), + gradient_clip=fluid.clip.GradientClipByValue(0.1)), + bias_attr=fluid.ParamAttr(name='fc_b')) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1) + sgd_optimizer.minimize(avg_cost) + return + + def test_transpiler(self): + pserver, startup = self.get_pserver(self.pserver1_ep) + trainer = self.get_trainer() + + self.assertEqual(len(pserver.blocks), 3) + self.assertEqual([op.type for op in pserver.blocks[1].ops], + ["sum", "scale", "clip", "sgd"]) + self.assertEqual( + [op.type for op in pserver.blocks[2].ops], + ["sum", "scale", "clip", "scale", "elementwise_add", "sgd"]) + # TODO(typhoonzero): test clipping and L2Decay ops are removed from trainer + - delete_ops(trainer.global_block(), optimize_ops) - ops = [op.type for op in trainer.global_block().ops] + [ - "split_byref", "send", "send_barrier", "recv", "recv", - "fetch_barrier", "concat" - ] - ops.insert(ops.index("elementwise_add_grad") + 1, "send") - return ops + # FIXME(typhoonzero): need to add test for async case: + # see https://github.com/PaddlePaddle/Paddle/issues/11691 +class TestAsyncSGD(TranspilerTest): + pass if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_simple_dist_transpiler.py b/python/paddle/fluid/tests/unittests/test_simple_dist_transpiler.py deleted file mode 100644 index f4aa7426bc..0000000000 --- a/python/paddle/fluid/tests/unittests/test_simple_dist_transpiler.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np - -import paddle.fluid as fluid -from paddle.fluid.transpiler.distribute_transpiler import delete_ops - -from transpiler_test import TranspilerTest - - -class TestSimpleDistTranspiler(TranspilerTest): - def setUp(self): - self.current_pserver_ep = "127.0.0.1:6175" - - def test_simple_transpiler(self): - np.random.seed(1) - - trainer = self.get_trainer() - pserver, startup = self.get_pserver(self.current_pserver_ep) - self.assertEqual([op.type for op in trainer.global_block().ops], - self.get_expect_trainer_ops()) - - self.assertEqual(len(pserver.blocks), 2) - # block0: listen_and_serv - self.assertEqual([op.type for op in pserver.blocks[0].ops], - ["listen_and_serv"]) - # block1: optimize pass - self.assertEqual([op.type for op in pserver.blocks[1].ops], - ["sum", "scale", "sgd"]) - - # confirm startup program - self.assertEqual([op.type for op in startup.global_block().ops], - ["fill_constant", "uniform_random", "uniform_random"]) - - # the variable #fc_w will NOT be splited - fc_w_var = startup.global_block().var("fc_w@GRAD") - self.assertEqual(fc_w_var.shape, (1000, 1000)) - - fc_w_var = startup.global_block().var("fc_w@GRAD.trainer_0") - self.assertEqual(fc_w_var.shape, (1000, 1000)) - - def get_expect_trainer_ops(self): - trainer = fluid.Program() - - with fluid.program_guard(trainer): - optimize_ops, params_grads = self.net_conf() - - delete_ops(trainer.global_block(), optimize_ops) - ops = [op.type for op in trainer.global_block().ops] + [ - "send", "send_barrier", "recv", "recv", "fetch_barrier" - ] - ops.insert(ops.index("elementwise_add_grad") + 1, "send") - return ops - - def _transpiler_instance(self): - main = self.get_main_program() - t = fluid.DistributeTranspiler() - t.transpile( - self.trainer_id, - program=main, - pservers=self.pserver_eps, - trainers=self.trainers, - slice_var_up=False) - return t - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/transpiler_test.py b/python/paddle/fluid/tests/unittests/transpiler_test.py deleted file mode 100644 index d84c5d9c41..0000000000 --- a/python/paddle/fluid/tests/unittests/transpiler_test.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest -import numpy as np - -import paddle.fluid as fluid -import paddle.fluid.core as core -import paddle.fluid.layers as layers - - -class TranspilerTest(unittest.TestCase): - @classmethod - def setUpClass(self): - self.trainer_id = 0 - self.trainers = 2 - self.pservers = 2 - self.pserver_eps = "127.0.0.1:6174,127.0.0.1:6175" - - def net_conf(self): - x = fluid.layers.data(name='x', shape=[1000], dtype='float32') - - y_predict = fluid.layers.fc(input=x, - size=1000, - act=None, - param_attr=fluid.ParamAttr(name='fc_w')) - - y = fluid.layers.data(name='y', shape=[1], dtype='float32') - - cost = fluid.layers.square_error_cost(input=y_predict, label=y) - avg_cost = fluid.layers.mean(cost) - sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1) - - optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) - return optimize_ops, params_grads - - def get_main_program(self): - main = fluid.Program() - - with fluid.program_guard(main): - self.net_conf() - - return main - - def get_trainer(self): - return self._transpiler_instance().get_trainer_program() - - def get_pserver(self, ep): - t = self._transpiler_instance() - pserver = t.get_pserver_program(ep) - startup = t.get_startup_program(ep, pserver) - return pserver, startup - - def _transpiler_instance(self): - main = self.get_main_program() - t = fluid.DistributeTranspiler() - t.transpile( - self.trainer_id, - program=main, - pservers=self.pserver_eps, - trainers=self.trainers) - return t diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 343901cda3..05fed72ee6 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -455,6 +455,8 @@ class DistributeTranspiler(object): __append_optimize_op__(op, per_opt_block, grad_to_block_id, merged_var, lr_ops) + # dedup grad to ids list + grad_to_block_id = list(set(grad_to_block_id)) # append global ops if global_ops: opt_state_block = pserver_program.create_block( @@ -960,8 +962,6 @@ class DistributeTranspiler(object): if not block_map.has_key(varname): block_map[varname] = [] block_map[varname].append((long(offset), long(size))) - # Do not remove this important debug message: - print("block map: %s" % block_map) for varname, splited in block_map.iteritems(): orig_var = program.global_block().var(varname) @@ -1401,6 +1401,16 @@ class DistributeTranspiler(object): break return lr_ops + def _is_opt_role_op(self, op): + # NOTE: depend on oprole to find out whether this op is for + # optimize + op_maker = core.op_proto_and_checker_maker + optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize + if op_maker.kOpRoleAttrName() in op.attrs and \ + int(op.attrs[op_maker.kOpRoleAttrName()]) == int(optimize_role): + return True + return False + def _get_optimize_pass(self): """ Get optimizer operators, paramters and gradients from origin_program @@ -1413,10 +1423,7 @@ class DistributeTranspiler(object): params_grads = [] origin_var_dict = self.origin_program.global_block().vars for op in block.ops: - # NOTE(Yancey1989): we can not use op role to distinguish an optimizer op - # or not, because all ops in optimizer sub-graph would - # sign the optimizer op role - if self._is_optimizer_op(op): + if self._is_opt_role_op(op): opt_ops.append(op) # HACK(wuyi): if we find grad vars from input of optimize # ops, we may get the output of clip op. Use syntax "@GRAD" From 550ab8d7236b30d716ce4a44d2a679cee16434a5 Mon Sep 17 00:00:00 2001 From: yuyang18 Date: Mon, 2 Jul 2018 17:23:42 +0800 Subject: [PATCH 20/34] Use single file than multiple files --- paddle/fluid/operators/reshape_op.cc | 216 ++++++++++++++++++------ paddle/fluid/operators/reshape_op.cu.cc | 24 --- paddle/fluid/operators/reshape_op.h | 132 --------------- 3 files changed, 164 insertions(+), 208 deletions(-) delete mode 100644 paddle/fluid/operators/reshape_op.cu.cc delete mode 100644 paddle/fluid/operators/reshape_op.h diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 6e384e9060..918f3be533 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -12,14 +12,108 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/reshape_op.h" - #include #include +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { +class ReshapeOp : public framework::OperatorWithKernel { + public: + ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of ReshapeOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of ReshapeOp should not be null."); + + const std::vector &shape = ctx->Attrs().Get>("shape"); + PADDLE_ENFORCE(!shape.empty(), + "The shape information must be set by Attr(shape)."); + + if (ctx->HasInput("Shape") && ctx->IsRuntime()) { + // If true, set the shape of Output(Out) according to Input(Shape) in + // ReshapeKernel with ExecutionContext. Also check LoD in ReshapeKernel. + ctx->ShareLoD("X", /*->*/ "Out"); + return; + } + + auto x_dims = ctx->GetInputDim("X"); + auto out_dims = ValidateShape(shape, x_dims); + ctx->SetOutputDim("Out", out_dims); + if (x_dims[0] == out_dims[0]) { + // Only pass LoD when the first dimension of output and Input(X) + // are the same. + ctx->ShareLoD("X", /*->*/ "Out"); + } + } + + static framework::DDim ValidateShape(const std::vector shape, + const framework::DDim &in_dims) { + const int64_t in_size = framework::product(in_dims); + // only one dimension can be set to -1, whose size will be automatically + // infered. + const int64_t unk_dim_val = -1; + const int64_t copy_dim_val = 0; + + std::vector output_shape(shape.size(), 0); + int64_t capacity = 1; + int unk_dim_idx = -1; + for (size_t i = 0; i < shape.size(); ++i) { + if (shape[i] == unk_dim_val) { + PADDLE_ENFORCE( + unk_dim_idx == -1, + "Only one input dimension of Attr(shape) can be unknown."); + unk_dim_idx = i; + } else if (shape[i] == copy_dim_val) { + PADDLE_ENFORCE( + static_cast(i) < in_dims.size(), + "The index of dimension to copy from input shape must be less " + "than the size of input shape."); + } else { + PADDLE_ENFORCE( + shape[i] > 0, + "Each input dimension of Attr(shape) must not be negtive except " + "one unknown dimension."); + } + + capacity *= (shape[i] ? shape[i] : in_dims[i]); + output_shape[i] = + (shape[i] ? static_cast(shape[i]) : in_dims[i]); + } + + if (unk_dim_idx != -1) { + if (in_size > 0) { + // in_size < 0 and is un-determinate in compile time, skip the check, + // for example, in_dims = [-1, 8, 1, 1], shape = [-1, 3, 8], + // capacity = -24, in_size = -8, output_shape[0] = 0 + // the following check will fail. + output_shape[unk_dim_idx] = -in_size / capacity; + PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size, + "Invalid shape is given."); + } else { + output_shape[unk_dim_idx] = -1; + } + } else { + PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given."); + } + return framework::make_ddim(output_shape); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } +}; + class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { @@ -107,64 +201,72 @@ class ReshapeGradOp : public framework::OperatorWithKernel { } }; -void ReshapeKernel::operator()(const framework::ExecutionContext &ctx) const { - auto *out = ctx.Output("Out"); - auto *in = ctx.Input("X"); +class ReshapeKernel { + public: + void operator()(const framework::ExecutionContext &ctx) const { + auto *out = ctx.Output("Out"); + auto *in = ctx.Input("X"); - auto *shape_tensor = ctx.HasInput("Shape") - ? ctx.Input("Shape") - : nullptr; + auto *shape_tensor = ctx.HasInput("Shape") + ? ctx.Input("Shape") + : nullptr; - framework::DDim out_dims = out->dims(); + framework::DDim out_dims = out->dims(); - if (shape_tensor) { - auto *shape_data = shape_tensor->data(); - framework::Tensor cpu_shape_tensor; - if (platform::is_gpu_place(ctx.GetPlace())) { - TensorCopySync(*shape_tensor, platform::CPUPlace(), &cpu_shape_tensor); - shape_data = cpu_shape_tensor.data(); + if (shape_tensor) { + auto *shape_data = shape_tensor->data(); + framework::Tensor cpu_shape_tensor; + if (platform::is_gpu_place(ctx.GetPlace())) { + TensorCopySync(*shape_tensor, platform::CPUPlace(), &cpu_shape_tensor); + shape_data = cpu_shape_tensor.data(); + } + auto shape = + std::vector(shape_data, shape_data + shape_tensor->numel()); + out_dims = ReshapeOp::ValidateShape(shape, in->dims()); + } + if (!in->lod().empty()) { + PADDLE_ENFORCE_EQ( + out_dims[0], in->dims()[0], + "Reshape operator cannot reshape an input sequence batch " + "into an output sequence batch that has a different " + "number of time steps. Please consider using " + "sequence_reshape op."); } - auto shape = - std::vector(shape_data, shape_data + shape_tensor->numel()); - out_dims = ReshapeOp::ValidateShape(shape, in->dims()); - } - if (!in->lod().empty()) { - PADDLE_ENFORCE_EQ(out_dims[0], in->dims()[0], - "Reshape operator cannot reshape an input sequence batch " - "into an output sequence batch that has a different " - "number of time steps. Please consider using " - "sequence_reshape op."); - } - bool inplace = ctx.Attr("inplace"); - out->Resize(out_dims); - if (!inplace) { - out->mutable_data(ctx.GetPlace(), in->type()); - framework::TensorCopySync(*in, ctx.GetPlace(), out); - out->Resize(out_dims); - } else { - out->ShareDataWith(*in); + bool inplace = ctx.Attr("inplace"); out->Resize(out_dims); + if (!inplace) { + out->mutable_data(ctx.GetPlace(), in->type()); + framework::TensorCopySync(*in, ctx.GetPlace(), out); + out->Resize(out_dims); + } else { + out->ShareDataWith(*in); + out->Resize(out_dims); + } } -} -void ReshapeGradKernel::operator()( - const framework::ExecutionContext &ctx) const { - auto *d_out = ctx.Input(framework::GradVarName("Out")); - auto *d_x = ctx.Output(framework::GradVarName("X")); - - d_x->mutable_data(ctx.GetPlace(), d_out->type()); - bool inplace = ctx.Attr("inplace"); - - auto in_dims = d_x->dims(); - if (!inplace) { - framework::TensorCopy(*d_out, ctx.GetPlace(), ctx.device_context(), d_x); - ctx.device_context().Wait(); - d_x->Resize(in_dims); - } else { - d_x->ShareDataWith(*d_out); - d_x->Resize(in_dims); +}; + +class ReshapeGradKernel { + public: + void operator()(const framework::ExecutionContext &ctx) const { + auto *d_out = ctx.Input(framework::GradVarName("Out")); + auto *d_x = ctx.Output(framework::GradVarName("X")); + + d_x->mutable_data(ctx.GetPlace(), d_out->type()); + bool inplace = ctx.Attr("inplace"); + + auto in_dims = d_x->dims(); + if (!inplace) { + framework::TensorCopy(*d_out, ctx.GetPlace(), ctx.device_context(), d_x); + ctx.device_context().Wait(); + d_x->Resize(in_dims); + } else { + d_x->ShareDataWith(*d_out); + d_x->Resize(in_dims); + } } -} +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; @@ -179,3 +281,13 @@ REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel, double, ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel); + +#ifdef PADDLE_WITH_CUDA +REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double, + ops::ReshapeKernel, int, ops::ReshapeKernel, + int64_t, ops::ReshapeKernel); +REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel, + double, ops::ReshapeGradKernel, int, + ops::ReshapeGradKernel, int64_t, + ops::ReshapeGradKernel); +#endif diff --git a/paddle/fluid/operators/reshape_op.cu.cc b/paddle/fluid/operators/reshape_op.cu.cc deleted file mode 100644 index 374b2dbc6a..0000000000 --- a/paddle/fluid/operators/reshape_op.cu.cc +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/reshape_op.h" -namespace ops = paddle::operators; - -REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double, - ops::ReshapeKernel, int, ops::ReshapeKernel, - int64_t, ops::ReshapeKernel); -REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel, - double, ops::ReshapeGradKernel, int, - ops::ReshapeGradKernel, int64_t, - ops::ReshapeGradKernel); diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h deleted file mode 100644 index 68e1690a53..0000000000 --- a/paddle/fluid/operators/reshape_op.h +++ /dev/null @@ -1,132 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include - -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/op_registry.h" - -namespace paddle { -namespace operators { - -class ReshapeOp : public framework::OperatorWithKernel { - public: - ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : OperatorWithKernel(type, inputs, outputs, attrs) {} - - void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of ReshapeOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of ReshapeOp should not be null."); - - const std::vector &shape = ctx->Attrs().Get>("shape"); - PADDLE_ENFORCE(!shape.empty(), - "The shape information must be set by Attr(shape)."); - - if (ctx->HasInput("Shape") && ctx->IsRuntime()) { - // If true, set the shape of Output(Out) according to Input(Shape) in - // ReshapeKernel with ExecutionContext. Also check LoD in ReshapeKernel. - ctx->ShareLoD("X", /*->*/ "Out"); - return; - } - - auto x_dims = ctx->GetInputDim("X"); - auto out_dims = ValidateShape(shape, x_dims); - ctx->SetOutputDim("Out", out_dims); - if (x_dims[0] == out_dims[0]) { - // Only pass LoD when the first dimension of output and Input(X) - // are the same. - ctx->ShareLoD("X", /*->*/ "Out"); - } - } - - static framework::DDim ValidateShape(const std::vector shape, - const framework::DDim &in_dims) { - const int64_t in_size = framework::product(in_dims); - // only one dimension can be set to -1, whose size will be automatically - // infered. - const int64_t unk_dim_val = -1; - const int64_t copy_dim_val = 0; - - std::vector output_shape(shape.size(), 0); - int64_t capacity = 1; - int unk_dim_idx = -1; - for (size_t i = 0; i < shape.size(); ++i) { - if (shape[i] == unk_dim_val) { - PADDLE_ENFORCE( - unk_dim_idx == -1, - "Only one input dimension of Attr(shape) can be unknown."); - unk_dim_idx = i; - } else if (shape[i] == copy_dim_val) { - PADDLE_ENFORCE( - static_cast(i) < in_dims.size(), - "The index of dimension to copy from input shape must be less " - "than the size of input shape."); - } else { - PADDLE_ENFORCE( - shape[i] > 0, - "Each input dimension of Attr(shape) must not be negtive except " - "one unknown dimension."); - } - - capacity *= (shape[i] ? shape[i] : in_dims[i]); - output_shape[i] = - (shape[i] ? static_cast(shape[i]) : in_dims[i]); - } - - if (unk_dim_idx != -1) { - if (in_size > 0) { - // in_size < 0 and is un-determinate in compile time, skip the check, - // for example, in_dims = [-1, 8, 1, 1], shape = [-1, 3, 8], - // capacity = -24, in_size = -8, output_shape[0] = 0 - // the following check will fail. - output_shape[unk_dim_idx] = -in_size / capacity; - PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size, - "Invalid shape is given."); - } else { - output_shape[unk_dim_idx] = -1; - } - } else { - PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given."); - } - return framework::make_ddim(output_shape); - } - - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext &ctx) const override { - return framework::OpKernelType( - framework::ToDataType(ctx.Input("X")->type()), - ctx.device_context()); - } -}; - -class ReshapeKernel { - public: - void operator()(const framework::ExecutionContext &ctx) const; -}; - -class ReshapeGradKernel { - public: - void operator()(const framework::ExecutionContext &ctx) const; -}; - -} // namespace operators -} // namespace paddle From 89970d87b9aca6097f3e736ad607cf9d43ae7b93 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 2 Jul 2018 20:45:39 +0800 Subject: [PATCH 21/34] Change WITH_ANAKIN to OFF --- paddle/scripts/paddle_build.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index b66a05aaeb..d8f0b76b7b 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -106,7 +106,7 @@ function cmake_gen() { -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DWITH_CONTRIB=${WITH_CONTRIB:-ON} - -DWITH_ANAKIN=${WITH_ANAKIN:-ON} + -DWITH_ANAKIN=${WITH_ANAKIN:-OFF} -DWITH_INFERENCE_DEMO=${WITH_INFERENCE_DEMO:-ON} ======================================== EOF @@ -135,7 +135,7 @@ EOF -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} \ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ -DWITH_CONTRIB=${WITH_CONTRIB:-ON} \ - -DWITH_ANAKIN=${WITH_ANAKIN:-ON} \ + -DWITH_ANAKIN=${WITH_ANAKIN:-OFF} \ -DWITH_INFERENCE_DEMO=${WITH_INFERENCE_DEMO:-ON} } From 6335889e97b9f6a24dd2af33ed91cf5cee97f1b7 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 2 Jul 2018 21:20:27 +0800 Subject: [PATCH 22/34] Change Sprintf back --- paddle/fluid/string/printf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/string/printf.h b/paddle/fluid/string/printf.h index e0f6202506..062095a1c3 100644 --- a/paddle/fluid/string/printf.h +++ b/paddle/fluid/string/printf.h @@ -84,7 +84,7 @@ void Fprintf(std::ostream& out, const char* fmt, const Args&... args) { } template -std::string Sprintf(const char* fmt = "", const Args&... args) { +std::string Sprintf(const char* fmt, const Args&... args) { std::ostringstream oss; Fprintf(oss, fmt, args...); return oss.str(); From 3fab4f65a46f6393de1238b808445dbbb0c3fc33 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 3 Jul 2018 00:01:45 +0800 Subject: [PATCH 23/34] Add EOFException to represent EOF in C++ reader --- .../details/data_balance_op_handle.cc | 2 +- .../details/threaded_ssa_graph_executor.cc | 21 ++++++++++++++++--- .../details/threaded_ssa_graph_executor.h | 2 +- paddle/fluid/operators/read_op.cc | 2 +- paddle/fluid/platform/enforce.h | 16 +++++++++++++- paddle/fluid/pybind/exception.cc | 3 +++ .../tests/unittests/test_data_balance.py | 6 ++---- .../tests/unittests/test_multi_file_reader.py | 3 +-- .../tests/unittests/test_multi_pass_reader.py | 3 +-- .../tests/unittests/test_recordio_reader.py | 3 +-- 10 files changed, 44 insertions(+), 17 deletions(-) diff --git a/paddle/fluid/framework/details/data_balance_op_handle.cc b/paddle/fluid/framework/details/data_balance_op_handle.cc index b914851fe0..d07235df58 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.cc +++ b/paddle/fluid/framework/details/data_balance_op_handle.cc @@ -62,7 +62,7 @@ std::vector> DataBalanceOpHandle::GetBalancePlan( } if (total_size < device_num) { // No enough data. - PADDLE_THROW("There is no next data."); + PADDLE_THROW_EOF(); } std::sort(size_device_vec.begin(), size_device_vec.end(), [](const std::array &a, const std::array &b) { diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index b1706eb12d..99b10254a7 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -98,9 +98,18 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( if (timeout) { std::lock_guard l(exception_mu_); if (exception_) { - auto exp = *exception_; - exception_.reset(); - throw exp; + std::exception *exp = exception_.get(); + if (dynamic_cast(exp)) { + auto e = *static_cast(exp); + exception_.reset(); + throw e; + } else if (dynamic_cast(exp)) { + auto e = *static_cast(exp); + exception_.reset(); + throw e; + } else { + LOG(FATAL) << "Unknown exception."; + } } else { continue; } @@ -199,6 +208,12 @@ void ThreadedSSAGraphExecutor::RunOp( running_ops_--; ready_var_q->Extend(op->Outputs()); VLOG(10) << op << " " << op->Name() << "Signal posted"; + } catch (platform::EOFException ex) { + std::lock_guard l(exception_mu_); + // EOFException will not cover up existing EnforceNotMet. + if (exception_.get() == nullptr) { + exception_.reset(new platform::EOFException(ex)); + } } catch (platform::EnforceNotMet ex) { std::lock_guard l(exception_mu_); exception_.reset(new platform::EnforceNotMet(ex)); diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h index 90430be996..c69e0487e2 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -57,7 +57,7 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { std::vector places_; platform::DeviceContextPool fetch_ctxs_; std::mutex exception_mu_; - std::unique_ptr exception_; + std::unique_ptr exception_; std::atomic running_ops_; void InsertPendingOp(std::unordered_map *pending_ops, diff --git a/paddle/fluid/operators/read_op.cc b/paddle/fluid/operators/read_op.cc index 60e4eb7576..695d7ea83d 100644 --- a/paddle/fluid/operators/read_op.cc +++ b/paddle/fluid/operators/read_op.cc @@ -68,7 +68,7 @@ class ReadOp : public framework::OperatorBase { reader->ReadNext(&ins); if (ins.empty()) { if (Attr("throw_eof_exp")) { - PADDLE_THROW("There is no next data."); + PADDLE_THROW_EOF(); } else { ins.resize(out_arg_names.size()); for (auto& tensor : ins) { diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index 70bc9c4e83..3790dd1352 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -73,7 +73,7 @@ struct EnforceNotMet : public std::exception { } catch (const std::exception& exp) { std::ostringstream sout; - sout << string::Sprintf("%s at [%s:%d]", exp.what(), f, l) << std::endl; + sout << string::Sprintf("'%s' at [%s:%d]", exp.what(), f, l) << std::endl; sout << "PaddlePaddle Call Stacks: " << std::endl; void* call_stack[TRACE_STACK_LIMIT]; @@ -102,6 +102,15 @@ struct EnforceNotMet : public std::exception { const char* what() const noexcept { return err_str_.c_str(); } }; +struct EOFException : public std::exception { + std::string err_str_; + EOFException(const char* err_msg, const char* f, int l) { + err_str_ = string::Sprintf("'%s' at [%s:%d]", err_msg, f, l); + } + + const char* what() const noexcept { return err_str_.c_str(); } +}; + // Because most enforce conditions would evaluate to true, we can use // __builtin_expect to instruct the C++ compiler to generate code that // always forces branch prediction of true. @@ -242,6 +251,11 @@ inline void throw_on_error(T e) { #define PADDLE_ENFORCE(...) ::paddle::platform::throw_on_error(__VA_ARGS__); #endif +#define PADDLE_THROW_EOF() \ + do { \ + throw ::paddle::platform::EOFException("There is no next data.", __FILE__, \ + __LINE__); \ + } while (false) /* * Some enforce helpers here, usage: * int a = 1; diff --git a/paddle/fluid/pybind/exception.cc b/paddle/fluid/pybind/exception.cc index 08a2f185e1..831f30e35f 100644 --- a/paddle/fluid/pybind/exception.cc +++ b/paddle/fluid/pybind/exception.cc @@ -18,10 +18,13 @@ namespace paddle { namespace pybind { void BindException(pybind11::module* m) { + static pybind11::exception eof(*m, "EOFException"); static pybind11::exception exc(*m, "EnforceNotMet"); pybind11::register_exception_translator([](std::exception_ptr p) { try { if (p) std::rethrow_exception(p); + } catch (const platform::EOFException& e) { + eof(e.what()); } catch (const platform::EnforceNotMet& e) { exc(e.what()); } diff --git a/python/paddle/fluid/tests/unittests/test_data_balance.py b/python/paddle/fluid/tests/unittests/test_data_balance.py index b558d7c2ea..cffa3329ac 100644 --- a/python/paddle/fluid/tests/unittests/test_data_balance.py +++ b/python/paddle/fluid/tests/unittests/test_data_balance.py @@ -118,8 +118,7 @@ class TestDataBalance(unittest.TestCase): try: image_val, label_val = parallel_exe.run(fetch_list, return_numpy=True) - except fluid.core.EnforceNotMet as ex: - self.assertIn("There is no next data.", ex.message) + except fluid.core.EOFException: break ins_num = image_val.shape[0] broadcasted_label = np.ones( @@ -162,8 +161,7 @@ class TestDataBalance(unittest.TestCase): try: ins_tensor, label_tensor = parallel_exe.run( fetch_list, return_numpy=False) - except fluid.core.EnforceNotMet as ex: - self.assertIn("There is no next data.", ex.message) + except fluid.core.EOFException: break ins_val = np.array(ins_tensor) diff --git a/python/paddle/fluid/tests/unittests/test_multi_file_reader.py b/python/paddle/fluid/tests/unittests/test_multi_file_reader.py index 3f940203b9..dbd510e64f 100644 --- a/python/paddle/fluid/tests/unittests/test_multi_file_reader.py +++ b/python/paddle/fluid/tests/unittests/test_multi_file_reader.py @@ -64,8 +64,7 @@ class TestMultipleReader(unittest.TestCase): while True: try: img_val, = exe.run(fetch_list=[img]) - except fluid.core.EnforceNotMet as ex: - self.assertIn("There is no next data.", ex.message) + except fluid.core.EOFException: break batch_count += 1 self.assertLessEqual(img_val.shape[0], self.batch_size) diff --git a/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py b/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py index 52e7cc1ffb..7fc9f55044 100644 --- a/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py +++ b/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py @@ -59,8 +59,7 @@ class TestMultipleReader(unittest.TestCase): while True: try: img_val, = exe.run(fetch_list=[img]) - except fluid.core.EnforceNotMet as ex: - self.assertIn("There is no next data.", ex.message) + except fluid.core.EOFException: break batch_count += 1 self.assertLessEqual(img_val.shape[0], self.batch_size) diff --git a/python/paddle/fluid/tests/unittests/test_recordio_reader.py b/python/paddle/fluid/tests/unittests/test_recordio_reader.py index f32050014d..69a522e273 100644 --- a/python/paddle/fluid/tests/unittests/test_recordio_reader.py +++ b/python/paddle/fluid/tests/unittests/test_recordio_reader.py @@ -68,8 +68,7 @@ class TestRecordIO(unittest.TestCase): while True: try: tmp, = exe.run(fetch_list=[avg_loss]) - except fluid.core.EnforceNotMet as ex: - self.assertIn("There is no next data.", ex.message) + except fluid.core.EOFException: break avg_loss_np.append(tmp) From 8553ac6a9568b9cb7739707368a587e089535f43 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 3 Jul 2018 10:24:18 +0800 Subject: [PATCH 24/34] fix unittests --- paddle/fluid/platform/enforce.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index 3790dd1352..566485cd3c 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -73,7 +73,7 @@ struct EnforceNotMet : public std::exception { } catch (const std::exception& exp) { std::ostringstream sout; - sout << string::Sprintf("'%s' at [%s:%d]", exp.what(), f, l) << std::endl; + sout << string::Sprintf("%s at [%s:%d]", exp.what(), f, l) << std::endl; sout << "PaddlePaddle Call Stacks: " << std::endl; void* call_stack[TRACE_STACK_LIMIT]; @@ -105,7 +105,7 @@ struct EnforceNotMet : public std::exception { struct EOFException : public std::exception { std::string err_str_; EOFException(const char* err_msg, const char* f, int l) { - err_str_ = string::Sprintf("'%s' at [%s:%d]", err_msg, f, l); + err_str_ = string::Sprintf("%s at [%s:%d]", err_msg, f, l); } const char* what() const noexcept { return err_str_.c_str(); } From ed4b2475f547a88f14f1b4d0d15f7411bf70b5f2 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 3 Jul 2018 11:01:03 +0800 Subject: [PATCH 25/34] add an unittest --- paddle/fluid/platform/enforce_test.cc | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/paddle/fluid/platform/enforce_test.cc b/paddle/fluid/platform/enforce_test.cc index 57d751cc00..0e8684581a 100644 --- a/paddle/fluid/platform/enforce_test.cc +++ b/paddle/fluid/platform/enforce_test.cc @@ -210,3 +210,14 @@ TEST(ENFORCE_USER_DEFINED_CLASS, NE) { Dims a{{1, 2, 3, 4}}, b{{5, 6, 7, 8}}; ASSERT_THROW(PADDLE_ENFORCE_EQ(a, b), paddle::platform::EnforceNotMet); } + +TEST(EOF_EXCEPTION, THROW_EOF) { + bool caught_eof = false; + try { + PADDLE_THROW_EOF(); + } catch (paddle::platform::EOFException error) { + caught_eof = true; + EXPECT_TRUE(HasPrefix(StringPiece(error.what()), "There is no next data.")); + } + EXPECT_TRUE(caught_eof); +} From aa2f76fd9bb635f921a889b03ec4032e92d6df41 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Mon, 2 Jul 2018 11:25:16 +0800 Subject: [PATCH 26/34] move trainer --- .../cluster_train/large_model_dist_train.md | 2 +- doc/v2/design/mkl/mkldnn.md | 40 +++++++++--------- doc/v2/dev/new_layer_en.rst | 2 +- paddle/CMakeLists.txt | 2 +- paddle/legacy/api/ConfigParser.cpp | 2 +- paddle/legacy/api/PaddleAPIPrivate.h | 2 +- paddle/legacy/api/ParameterUpdater.cpp | 6 +-- paddle/legacy/api/Trainer.cpp | 6 +-- paddle/legacy/capi/Main.cpp | 2 +- .../capi/tests/test_GradientMachine.cpp | 2 +- paddle/legacy/gserver/tests/MKLDNNTester.cpp | 2 +- .../gserver/tests/test_CompareSparse.cpp | 2 +- .../gserver/tests/test_CompareTwoNets.cpp | 2 +- .../legacy/gserver/tests/test_Evaluator.cpp | 2 +- .../gserver/tests/test_NetworkCompare.cpp | 2 +- .../tests/test_RecurrentGradientMachine.cpp | 4 +- paddle/{ => legacy}/trainer/CMakeLists.txt | 0 paddle/{ => legacy}/trainer/MergeModel.cpp | 0 .../trainer/NewRemoteParameterUpdater.cpp | 0 .../trainer/NewRemoteParameterUpdater.h | 0 paddle/{ => legacy}/trainer/ParamUtil.cpp | 0 paddle/{ => legacy}/trainer/ParamUtil.h | 0 .../{ => legacy}/trainer/ParameterUpdater.cpp | 0 .../{ => legacy}/trainer/ParameterUpdater.h | 0 .../trainer/RemoteParameterUpdater.cpp | 0 .../trainer/RemoteParameterUpdater.h | 0 paddle/{ => legacy}/trainer/Tester.cpp | 0 paddle/{ => legacy}/trainer/Tester.h | 0 paddle/{ => legacy}/trainer/TesterConfig.h | 0 .../trainer/ThreadParameterUpdater.cpp | 0 .../trainer/ThreadParameterUpdater.h | 0 paddle/{ => legacy}/trainer/Trainer.cpp | 0 paddle/{ => legacy}/trainer/Trainer.h | 0 .../{ => legacy}/trainer/TrainerBenchmark.cpp | 0 .../trainer/TrainerConfigHelper.cpp | 0 .../trainer/TrainerConfigHelper.h | 0 .../{ => legacy}/trainer/TrainerInternal.cpp | 0 paddle/{ => legacy}/trainer/TrainerInternal.h | 0 .../trainer/TrainerInternalConfig.cpp | 0 .../trainer/TrainerInternalConfig.h | 0 paddle/{ => legacy}/trainer/TrainerMain.cpp | 0 paddle/{ => legacy}/trainer/tests/.gitignore | 0 .../{ => legacy}/trainer/tests/CMakeLists.txt | 4 +- paddle/{ => legacy}/trainer/tests/__init__.py | 0 .../trainer/tests/config_parser_test.py | 4 +- .../trainer/tests/fake_file_list.list | 0 paddle/{ => legacy}/trainer/tests/picojson.h | 0 .../test_pydata_provider_wrapper.data | 0 .../test_pydata_provider_wrapper.list | 1 + .../tests/rnn_gen_test_model_dir/r1.test.beam | 0 .../tests/rnn_gen_test_model_dir/r1.test.nest | 0 .../rnn_gen_test_model_dir/r1.test.nobeam | 0 .../rnn_gen_test_model_dir/t1/transtable | Bin .../tests/rnn_gen_test_model_dir/t1/wordvec | Bin .../trainer/tests/sample_data.txt | 0 .../legacy/trainer/tests/sample_filelist.txt | 1 + .../trainer/tests/sample_trainer_config.conf | 4 +- .../tests/sample_trainer_config_hsigmoid.conf | 2 +- .../tests/sample_trainer_config_parallel.conf | 4 +- .../tests/sample_trainer_nest_rnn_gen.conf | 4 +- .../trainer/tests/sample_trainer_rnn_gen.conf | 4 +- .../tests/simple_sparse_neural_network.py | 2 +- .../tests/simple_sparse_neural_network_dp.py | 0 .../trainer/tests/testPyDataWrapper.py | 0 .../trainer/tests/test_Compare.cpp | 5 ++- .../tests/test_PyDataProviderWrapper.cpp | 2 +- .../trainer/tests/test_Trainer.cpp | 9 ++-- .../trainer/tests/test_TrainerOnePass.cpp | 11 ++--- .../trainer/tests/test_config.conf | 2 +- .../trainer/tests/test_gen_dict.txt | 0 .../test_recurrent_machine_generation.cpp | 16 ++++--- .../test_pydata_provider_wrapper.list | 1 - paddle/trainer/tests/sample_filelist.txt | 1 - python/paddle/trainer/config_parser.py | 2 +- python/setup.py.in | 4 +- tools/codestyle/cpplint_pre_commit.hook | 2 +- 76 files changed, 85 insertions(+), 80 deletions(-) rename paddle/{ => legacy}/trainer/CMakeLists.txt (100%) rename paddle/{ => legacy}/trainer/MergeModel.cpp (100%) rename paddle/{ => legacy}/trainer/NewRemoteParameterUpdater.cpp (100%) rename paddle/{ => legacy}/trainer/NewRemoteParameterUpdater.h (100%) rename paddle/{ => legacy}/trainer/ParamUtil.cpp (100%) rename paddle/{ => legacy}/trainer/ParamUtil.h (100%) rename paddle/{ => legacy}/trainer/ParameterUpdater.cpp (100%) rename paddle/{ => legacy}/trainer/ParameterUpdater.h (100%) rename paddle/{ => legacy}/trainer/RemoteParameterUpdater.cpp (100%) rename paddle/{ => legacy}/trainer/RemoteParameterUpdater.h (100%) rename paddle/{ => legacy}/trainer/Tester.cpp (100%) rename paddle/{ => legacy}/trainer/Tester.h (100%) rename paddle/{ => legacy}/trainer/TesterConfig.h (100%) rename paddle/{ => legacy}/trainer/ThreadParameterUpdater.cpp (100%) rename paddle/{ => legacy}/trainer/ThreadParameterUpdater.h (100%) rename paddle/{ => legacy}/trainer/Trainer.cpp (100%) rename paddle/{ => legacy}/trainer/Trainer.h (100%) rename paddle/{ => legacy}/trainer/TrainerBenchmark.cpp (100%) rename paddle/{ => legacy}/trainer/TrainerConfigHelper.cpp (100%) rename paddle/{ => legacy}/trainer/TrainerConfigHelper.h (100%) rename paddle/{ => legacy}/trainer/TrainerInternal.cpp (100%) rename paddle/{ => legacy}/trainer/TrainerInternal.h (100%) rename paddle/{ => legacy}/trainer/TrainerInternalConfig.cpp (100%) rename paddle/{ => legacy}/trainer/TrainerInternalConfig.h (100%) rename paddle/{ => legacy}/trainer/TrainerMain.cpp (100%) rename paddle/{ => legacy}/trainer/tests/.gitignore (100%) rename paddle/{ => legacy}/trainer/tests/CMakeLists.txt (89%) rename paddle/{ => legacy}/trainer/tests/__init__.py (100%) rename paddle/{ => legacy}/trainer/tests/config_parser_test.py (87%) rename paddle/{ => legacy}/trainer/tests/fake_file_list.list (100%) rename paddle/{ => legacy}/trainer/tests/picojson.h (100%) rename paddle/{ => legacy}/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.data (100%) create mode 100644 paddle/legacy/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list rename paddle/{ => legacy}/trainer/tests/rnn_gen_test_model_dir/r1.test.beam (100%) rename paddle/{ => legacy}/trainer/tests/rnn_gen_test_model_dir/r1.test.nest (100%) rename paddle/{ => legacy}/trainer/tests/rnn_gen_test_model_dir/r1.test.nobeam (100%) rename paddle/{ => legacy}/trainer/tests/rnn_gen_test_model_dir/t1/transtable (100%) rename paddle/{ => legacy}/trainer/tests/rnn_gen_test_model_dir/t1/wordvec (100%) rename paddle/{ => legacy}/trainer/tests/sample_data.txt (100%) create mode 100644 paddle/legacy/trainer/tests/sample_filelist.txt rename paddle/{ => legacy}/trainer/tests/sample_trainer_config.conf (95%) rename paddle/{ => legacy}/trainer/tests/sample_trainer_config_hsigmoid.conf (96%) rename paddle/{ => legacy}/trainer/tests/sample_trainer_config_parallel.conf (95%) rename paddle/{ => legacy}/trainer/tests/sample_trainer_nest_rnn_gen.conf (94%) rename paddle/{ => legacy}/trainer/tests/sample_trainer_rnn_gen.conf (94%) rename paddle/{ => legacy}/trainer/tests/simple_sparse_neural_network.py (95%) rename paddle/{ => legacy}/trainer/tests/simple_sparse_neural_network_dp.py (100%) rename paddle/{ => legacy}/trainer/tests/testPyDataWrapper.py (100%) rename paddle/{ => legacy}/trainer/tests/test_Compare.cpp (97%) rename paddle/{ => legacy}/trainer/tests/test_PyDataProviderWrapper.cpp (99%) rename paddle/{ => legacy}/trainer/tests/test_Trainer.cpp (91%) rename paddle/{ => legacy}/trainer/tests/test_TrainerOnePass.cpp (96%) rename paddle/{ => legacy}/trainer/tests/test_config.conf (97%) rename paddle/{ => legacy}/trainer/tests/test_gen_dict.txt (100%) rename paddle/{ => legacy}/trainer/tests/test_recurrent_machine_generation.cpp (90%) delete mode 100644 paddle/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list delete mode 100644 paddle/trainer/tests/sample_filelist.txt diff --git a/doc/v2/design/cluster_train/large_model_dist_train.md b/doc/v2/design/cluster_train/large_model_dist_train.md index 0c4b5bc24c..edb0245ea0 100644 --- a/doc/v2/design/cluster_train/large_model_dist_train.md +++ b/doc/v2/design/cluster_train/large_model_dist_train.md @@ -52,7 +52,7 @@ In `trainer_internal.cpp:L93 trainOneBatch`: When doing actual network forward and backward, at the beginning of each batch, the trainer will try to download one row of data from pserver. -In `trainer/RemoteParameterUpdater.cpp`: `parameterUpdater_->getParametersRemote();`: +In `legacy/trainer/RemoteParameterUpdater.cpp`: `parameterUpdater_->getParametersRemote();`: ```c++ if (fullSize) { diff --git a/doc/v2/design/mkl/mkldnn.md b/doc/v2/design/mkl/mkldnn.md index bd5bcf6f67..4876de0045 100644 --- a/doc/v2/design/mkl/mkldnn.md +++ b/doc/v2/design/mkl/mkldnn.md @@ -18,20 +18,20 @@ Figure 1. PaddlePaddle on IA 具体的完成状态可以参见[这里](https://github.com/PaddlePaddle/Paddle/projects/21)。 ## Contents - -- [Overview](#overview) -- [Actions](#actions) - - [CMake](#cmake) - - [Matrix](#matrix) - - [Layers](#layers) - - [Activations](#activations) - - [Parameters](#parameters) - - [Gradients](#gradients) - - [Unit Tests](#unit-tests) - - [Python API](#python-api) - - [Benchmarking](#benchmarking) - - [Others](#others) -- [Design Concerns](#design-concerns) + +- [Overview](#overview) +- [Actions](#actions) + - [CMake](#cmake) + - [Matrix](#matrix) + - [Layers](#layers) + - [Activations](#activations) + - [Parameters](#parameters) + - [Gradients](#gradients) + - [Unit Tests](#unit-tests) + - [Python API](#python-api) + - [Benchmarking](#benchmarking) + - [Others](#others) +- [Design Concerns](#design-concerns) ## Overview @@ -218,20 +218,20 @@ if use_mkldnn 我们总结出一些特别需要注意的点: 1. 使用**deviceId_**。为了尽可能少的在父类Layer中添加变量或者函数, -我们决定使用已有的`deviceId_`变量来区分layer的属性,定义`-2`为`MKLDNNLayer`特有的设备ID。 -2. 重写父类Layer的**init**函数,修改`deviceId_`为`-2`,代表这个layer是用于跑在MKL-DNN的环境下。 +我们决定使用已有的`deviceId_`变量来区分layer的属性,定义`-2`为`MKLDNNLayer`特有的设备ID。 +2. 重写父类Layer的**init**函数,修改`deviceId_`为`-2`,代表这个layer是用于跑在MKL-DNN的环境下。 3. 创建`MKLDNNBase`,定义一些除了layer和memory相关的类和函数。 -包括MKL-DNN会用到`MKLDNNStream`和`CPUEngine`,和未来可能还会用到`FPGAEngine`等。 +包括MKL-DNN会用到`MKLDNNStream`和`CPUEngine`,和未来可能还会用到`FPGAEngine`等。 4. 如果MKL-DNN layer的后面接有cpu device,那么就会使`output_.value`与`extOutVal_`共享内存, 同时数据格式就是`NCHW`,这样下一个cpu device就能拿到正确的数据。 在有普通的CPU layer时, `extOutVal_`和`extOutGrad_`的格式始终是`NCHW`或者`NC`。 ## References 1. [MKL small library](https://github.com/01org/mkl-dnn#linking-your-application)是[Intel MKL](https://software.intel.com/en-us/mkl)的一个子集。 -主要包括了深度学习相关的数学原语与操作,一般由MKL-DNN在发布[新版本](https://github.com/01org/mkl-dnn/releases)时一起更新。 +主要包括了深度学习相关的数学原语与操作,一般由MKL-DNN在发布[新版本](https://github.com/01org/mkl-dnn/releases)时一起更新。 2. [MKL-DNN System Requirements](https://github.com/01org/mkl-dnn#system-requirements)。 目前在PaddlePaddle中,仅会在支持AVX2指令集及以上的机器才使用MKL-DNN。 3. [原来的方案](https://github.com/PaddlePaddle/Paddle/pull/3096)会引入**nextLayer**的信息。 -但是在PaddlePaddle中,无论是重构前的layer还是重构后的op,都不会想要知道next layer/op的信息。 +但是在PaddlePaddle中,无论是重构前的layer还是重构后的op,都不会想要知道next layer/op的信息。 4. MKL-DNN的高性能格式与PaddlePaddle原有的`NCHW`不同(PaddlePaddle中的cuDNN部分使用的也是`NCHW`,所以不存在这个问题)。 -所以需要引入一个转换方法,并且只需要在必要的时候转换这种格式,才能更好的发挥MKL-DNN的性能。 +所以需要引入一个转换方法,并且只需要在必要的时候转换这种格式,才能更好的发挥MKL-DNN的性能。 diff --git a/doc/v2/dev/new_layer_en.rst b/doc/v2/dev/new_layer_en.rst index 6a848a020d..ad72373880 100644 --- a/doc/v2/dev/new_layer_en.rst +++ b/doc/v2/dev/new_layer_en.rst @@ -339,7 +339,7 @@ If you are creating a new file for the test, such as :code:`paddle/legacy/gserve Implement Python Wrapper ======================== -Implementing Python wrapper allows us to use the added layer in configuration files. All the Python wrappers are in file :code:`python/paddle/trainer/config_parser.py`. An example of the Python wrapper for fully connected layer is listed below. It has the following steps: +Implementing Python wrapper allows us to use the added layer in configuration files. All the Python wrappers are in file :code:`python/paddle/legacy/trainer/config_parser.py`. An example of the Python wrapper for fully connected layer is listed below. It has the following steps: - Use :code:`@config_layer('fc')` at the decorator for all the Python wrapper class. :code:`fc` is the identifier of the layer. - Implements :code:`__init__` constructor function. diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 7a4bd9183a..e1f65e505e 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -10,7 +10,7 @@ if(NOT WITH_FLUID_ONLY) add_subdirectory(legacy/capi) else() add_subdirectory(legacy/pserver) - add_subdirectory(trainer) + add_subdirectory(legacy/trainer) add_subdirectory(scripts) if(WITH_C_API) diff --git a/paddle/legacy/api/ConfigParser.cpp b/paddle/legacy/api/ConfigParser.cpp index d362a1e7cf..016d6da4e2 100644 --- a/paddle/legacy/api/ConfigParser.cpp +++ b/paddle/legacy/api/ConfigParser.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "PaddleAPI.h" #include "PaddleAPIPrivate.h" -#include "paddle/trainer/Trainer.h" +#include "paddle/legacy/trainer/Trainer.h" struct ParameterConfigPrivate { paddle::ParameterPtr parameter; diff --git a/paddle/legacy/api/PaddleAPIPrivate.h b/paddle/legacy/api/PaddleAPIPrivate.h index 2e1c504d2e..3ee192c31d 100644 --- a/paddle/legacy/api/PaddleAPIPrivate.h +++ b/paddle/legacy/api/PaddleAPIPrivate.h @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/legacy/gserver/evaluators/Evaluator.h" #include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" #include "paddle/legacy/parameter/ParameterUpdaterBase.h" -#include "paddle/trainer/TrainerConfigHelper.h" +#include "paddle/legacy/trainer/TrainerConfigHelper.h" struct GradientMachinePrivate { std::shared_ptr machine; diff --git a/paddle/legacy/api/ParameterUpdater.cpp b/paddle/legacy/api/ParameterUpdater.cpp index 63c000c959..44af3f4635 100644 --- a/paddle/legacy/api/ParameterUpdater.cpp +++ b/paddle/legacy/api/ParameterUpdater.cpp @@ -16,10 +16,10 @@ limitations under the License. */ #include "PaddleAPIPrivate.h" #ifndef PADDLE_WITHOUT_GOLANG -#include "paddle/trainer/NewRemoteParameterUpdater.h" +#include "paddle/legacy/trainer/NewRemoteParameterUpdater.h" #endif -#include "paddle/trainer/RemoteParameterUpdater.h" -#include "paddle/trainer/ThreadParameterUpdater.h" +#include "paddle/legacy/trainer/RemoteParameterUpdater.h" +#include "paddle/legacy/trainer/ThreadParameterUpdater.h" ParameterUpdater::ParameterUpdater() : m(new ParameterUpdaterPrivate()) {} diff --git a/paddle/legacy/api/Trainer.cpp b/paddle/legacy/api/Trainer.cpp index 6506acb738..8b39b962ee 100644 --- a/paddle/legacy/api/Trainer.cpp +++ b/paddle/legacy/api/Trainer.cpp @@ -20,9 +20,9 @@ limitations under the License. */ #include #include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" -#include "paddle/trainer/ParamUtil.h" -#include "paddle/trainer/Trainer.h" -#include "paddle/trainer/TrainerInternal.h" +#include "paddle/legacy/trainer/ParamUtil.h" +#include "paddle/legacy/trainer/Trainer.h" +#include "paddle/legacy/trainer/TrainerInternal.h" #include "paddle/utils/Flags.h" using paddle::real; diff --git a/paddle/legacy/capi/Main.cpp b/paddle/legacy/capi/Main.cpp index 0a289dede6..fd9275058a 100644 --- a/paddle/legacy/capi/Main.cpp +++ b/paddle/legacy/capi/Main.cpp @@ -18,7 +18,7 @@ limitations under the License. */ #include #include "capi_private.h" #include "main.h" -#include "paddle/trainer/TrainerConfigHelper.h" +#include "paddle/legacy/trainer/TrainerConfigHelper.h" #include "paddle/utils/Excepts.h" #include "paddle/utils/PythonUtil.h" diff --git a/paddle/legacy/capi/tests/test_GradientMachine.cpp b/paddle/legacy/capi/tests/test_GradientMachine.cpp index 2c02669ccf..b86d2f2049 100644 --- a/paddle/legacy/capi/tests/test_GradientMachine.cpp +++ b/paddle/legacy/capi/tests/test_GradientMachine.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include #include -#include +#include #include #include #include diff --git a/paddle/legacy/gserver/tests/MKLDNNTester.cpp b/paddle/legacy/gserver/tests/MKLDNNTester.cpp index bed58f94bb..b550ba9c72 100644 --- a/paddle/legacy/gserver/tests/MKLDNNTester.cpp +++ b/paddle/legacy/gserver/tests/MKLDNNTester.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "MKLDNNTester.h" #include "paddle/legacy/gserver/layers/MKLDNNBase.h" #include "paddle/legacy/gserver/layers/MKLDNNLayer.h" -#include "paddle/trainer/Trainer.h" +#include "paddle/legacy/trainer/Trainer.h" namespace paddle { diff --git a/paddle/legacy/gserver/tests/test_CompareSparse.cpp b/paddle/legacy/gserver/tests/test_CompareSparse.cpp index 51433c9aaa..26b23eac7c 100644 --- a/paddle/legacy/gserver/tests/test_CompareSparse.cpp +++ b/paddle/legacy/gserver/tests/test_CompareSparse.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include -#include "paddle/trainer/Trainer.h" +#include "paddle/legacy/trainer/Trainer.h" #include #include diff --git a/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp b/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp index 3ac86ce516..6e8f855c6b 100644 --- a/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp +++ b/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include #include -#include "paddle/trainer/Trainer.h" +#include "paddle/legacy/trainer/Trainer.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/legacy/gserver/tests/test_Evaluator.cpp b/paddle/legacy/gserver/tests/test_Evaluator.cpp index 4a8843f3af..8aab50d23e 100644 --- a/paddle/legacy/gserver/tests/test_Evaluator.cpp +++ b/paddle/legacy/gserver/tests/test_Evaluator.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #include #include #include "ModelConfig.pb.h" +#include "paddle/legacy/trainer/Trainer.h" #include "paddle/testing/TestUtil.h" -#include "paddle/trainer/Trainer.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/legacy/gserver/tests/test_NetworkCompare.cpp b/paddle/legacy/gserver/tests/test_NetworkCompare.cpp index 5a6b224583..e07922b58c 100644 --- a/paddle/legacy/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/legacy/gserver/tests/test_NetworkCompare.cpp @@ -18,8 +18,8 @@ limitations under the License. */ #include #include +#include "paddle/legacy/trainer/Trainer.h" #include "paddle/testing/TestUtil.h" -#include "paddle/trainer/Trainer.h" #include "paddle/utils/Stat.h" using namespace paddle; // NOLINT diff --git a/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp b/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp index 9f9fee7ef6..279f2c2fbb 100644 --- a/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp +++ b/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/paddle/trainer/CMakeLists.txt b/paddle/legacy/trainer/CMakeLists.txt similarity index 100% rename from paddle/trainer/CMakeLists.txt rename to paddle/legacy/trainer/CMakeLists.txt diff --git a/paddle/trainer/MergeModel.cpp b/paddle/legacy/trainer/MergeModel.cpp similarity index 100% rename from paddle/trainer/MergeModel.cpp rename to paddle/legacy/trainer/MergeModel.cpp diff --git a/paddle/trainer/NewRemoteParameterUpdater.cpp b/paddle/legacy/trainer/NewRemoteParameterUpdater.cpp similarity index 100% rename from paddle/trainer/NewRemoteParameterUpdater.cpp rename to paddle/legacy/trainer/NewRemoteParameterUpdater.cpp diff --git a/paddle/trainer/NewRemoteParameterUpdater.h b/paddle/legacy/trainer/NewRemoteParameterUpdater.h similarity index 100% rename from paddle/trainer/NewRemoteParameterUpdater.h rename to paddle/legacy/trainer/NewRemoteParameterUpdater.h diff --git a/paddle/trainer/ParamUtil.cpp b/paddle/legacy/trainer/ParamUtil.cpp similarity index 100% rename from paddle/trainer/ParamUtil.cpp rename to paddle/legacy/trainer/ParamUtil.cpp diff --git a/paddle/trainer/ParamUtil.h b/paddle/legacy/trainer/ParamUtil.h similarity index 100% rename from paddle/trainer/ParamUtil.h rename to paddle/legacy/trainer/ParamUtil.h diff --git a/paddle/trainer/ParameterUpdater.cpp b/paddle/legacy/trainer/ParameterUpdater.cpp similarity index 100% rename from paddle/trainer/ParameterUpdater.cpp rename to paddle/legacy/trainer/ParameterUpdater.cpp diff --git a/paddle/trainer/ParameterUpdater.h b/paddle/legacy/trainer/ParameterUpdater.h similarity index 100% rename from paddle/trainer/ParameterUpdater.h rename to paddle/legacy/trainer/ParameterUpdater.h diff --git a/paddle/trainer/RemoteParameterUpdater.cpp b/paddle/legacy/trainer/RemoteParameterUpdater.cpp similarity index 100% rename from paddle/trainer/RemoteParameterUpdater.cpp rename to paddle/legacy/trainer/RemoteParameterUpdater.cpp diff --git a/paddle/trainer/RemoteParameterUpdater.h b/paddle/legacy/trainer/RemoteParameterUpdater.h similarity index 100% rename from paddle/trainer/RemoteParameterUpdater.h rename to paddle/legacy/trainer/RemoteParameterUpdater.h diff --git a/paddle/trainer/Tester.cpp b/paddle/legacy/trainer/Tester.cpp similarity index 100% rename from paddle/trainer/Tester.cpp rename to paddle/legacy/trainer/Tester.cpp diff --git a/paddle/trainer/Tester.h b/paddle/legacy/trainer/Tester.h similarity index 100% rename from paddle/trainer/Tester.h rename to paddle/legacy/trainer/Tester.h diff --git a/paddle/trainer/TesterConfig.h b/paddle/legacy/trainer/TesterConfig.h similarity index 100% rename from paddle/trainer/TesterConfig.h rename to paddle/legacy/trainer/TesterConfig.h diff --git a/paddle/trainer/ThreadParameterUpdater.cpp b/paddle/legacy/trainer/ThreadParameterUpdater.cpp similarity index 100% rename from paddle/trainer/ThreadParameterUpdater.cpp rename to paddle/legacy/trainer/ThreadParameterUpdater.cpp diff --git a/paddle/trainer/ThreadParameterUpdater.h b/paddle/legacy/trainer/ThreadParameterUpdater.h similarity index 100% rename from paddle/trainer/ThreadParameterUpdater.h rename to paddle/legacy/trainer/ThreadParameterUpdater.h diff --git a/paddle/trainer/Trainer.cpp b/paddle/legacy/trainer/Trainer.cpp similarity index 100% rename from paddle/trainer/Trainer.cpp rename to paddle/legacy/trainer/Trainer.cpp diff --git a/paddle/trainer/Trainer.h b/paddle/legacy/trainer/Trainer.h similarity index 100% rename from paddle/trainer/Trainer.h rename to paddle/legacy/trainer/Trainer.h diff --git a/paddle/trainer/TrainerBenchmark.cpp b/paddle/legacy/trainer/TrainerBenchmark.cpp similarity index 100% rename from paddle/trainer/TrainerBenchmark.cpp rename to paddle/legacy/trainer/TrainerBenchmark.cpp diff --git a/paddle/trainer/TrainerConfigHelper.cpp b/paddle/legacy/trainer/TrainerConfigHelper.cpp similarity index 100% rename from paddle/trainer/TrainerConfigHelper.cpp rename to paddle/legacy/trainer/TrainerConfigHelper.cpp diff --git a/paddle/trainer/TrainerConfigHelper.h b/paddle/legacy/trainer/TrainerConfigHelper.h similarity index 100% rename from paddle/trainer/TrainerConfigHelper.h rename to paddle/legacy/trainer/TrainerConfigHelper.h diff --git a/paddle/trainer/TrainerInternal.cpp b/paddle/legacy/trainer/TrainerInternal.cpp similarity index 100% rename from paddle/trainer/TrainerInternal.cpp rename to paddle/legacy/trainer/TrainerInternal.cpp diff --git a/paddle/trainer/TrainerInternal.h b/paddle/legacy/trainer/TrainerInternal.h similarity index 100% rename from paddle/trainer/TrainerInternal.h rename to paddle/legacy/trainer/TrainerInternal.h diff --git a/paddle/trainer/TrainerInternalConfig.cpp b/paddle/legacy/trainer/TrainerInternalConfig.cpp similarity index 100% rename from paddle/trainer/TrainerInternalConfig.cpp rename to paddle/legacy/trainer/TrainerInternalConfig.cpp diff --git a/paddle/trainer/TrainerInternalConfig.h b/paddle/legacy/trainer/TrainerInternalConfig.h similarity index 100% rename from paddle/trainer/TrainerInternalConfig.h rename to paddle/legacy/trainer/TrainerInternalConfig.h diff --git a/paddle/trainer/TrainerMain.cpp b/paddle/legacy/trainer/TrainerMain.cpp similarity index 100% rename from paddle/trainer/TrainerMain.cpp rename to paddle/legacy/trainer/TrainerMain.cpp diff --git a/paddle/trainer/tests/.gitignore b/paddle/legacy/trainer/tests/.gitignore similarity index 100% rename from paddle/trainer/tests/.gitignore rename to paddle/legacy/trainer/tests/.gitignore diff --git a/paddle/trainer/tests/CMakeLists.txt b/paddle/legacy/trainer/tests/CMakeLists.txt similarity index 89% rename from paddle/trainer/tests/CMakeLists.txt rename to paddle/legacy/trainer/tests/CMakeLists.txt index 12c9ea8cef..08548bea4c 100644 --- a/paddle/trainer/tests/CMakeLists.txt +++ b/paddle/legacy/trainer/tests/CMakeLists.txt @@ -5,7 +5,7 @@ add_custom_target(copy_trainer_conf ALL DEPENDS sample_trainer_config.conf) set(PYTHON_PATH ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d - ${PADDLE_BINARY_DIR}/python/:${PADDLE_BINARY_DIR}/paddle/trainer/tests) + ${PADDLE_BINARY_DIR}/python/:${PADDLE_BINARY_DIR}/paddle/legacy/trainer/tests) function(trainer_test TARGET) add_unittest_without_exec(${TARGET} ${TARGET}.cpp) add_test(NAME ${TARGET} @@ -33,5 +33,5 @@ endif() #################### test_config_parser ######################### add_test(NAME test_config_parser COMMAND ${PYTHON_PATH} ${PYTHON_EXECUTABLE} - ${PADDLE_SOURCE_DIR}/paddle/trainer/tests/config_parser_test.py + ${PADDLE_SOURCE_DIR}/paddle/legacy/trainer/tests/config_parser_test.py WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle/) diff --git a/paddle/trainer/tests/__init__.py b/paddle/legacy/trainer/tests/__init__.py similarity index 100% rename from paddle/trainer/tests/__init__.py rename to paddle/legacy/trainer/tests/__init__.py diff --git a/paddle/trainer/tests/config_parser_test.py b/paddle/legacy/trainer/tests/config_parser_test.py similarity index 87% rename from paddle/trainer/tests/config_parser_test.py rename to paddle/legacy/trainer/tests/config_parser_test.py index 88646e11f7..0d3d82cbda 100644 --- a/paddle/trainer/tests/config_parser_test.py +++ b/paddle/legacy/trainer/tests/config_parser_test.py @@ -15,9 +15,9 @@ from paddle.trainer.config_parser import parse_config_and_serialize if __name__ == '__main__': - parse_config_and_serialize('trainer/tests/test_config.conf', '') + parse_config_and_serialize('legacy/trainer/tests/test_config.conf', '') parse_config_and_serialize( - 'trainer/tests/sample_trainer_config.conf', + 'legacy/trainer/tests/sample_trainer_config.conf', 'extension_module_name=paddle.trainer.config_parser_extension') parse_config_and_serialize( 'legacy/gserver/tests/pyDataProvider/trainer.conf', '') diff --git a/paddle/trainer/tests/fake_file_list.list b/paddle/legacy/trainer/tests/fake_file_list.list similarity index 100% rename from paddle/trainer/tests/fake_file_list.list rename to paddle/legacy/trainer/tests/fake_file_list.list diff --git a/paddle/trainer/tests/picojson.h b/paddle/legacy/trainer/tests/picojson.h similarity index 100% rename from paddle/trainer/tests/picojson.h rename to paddle/legacy/trainer/tests/picojson.h diff --git a/paddle/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.data b/paddle/legacy/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.data similarity index 100% rename from paddle/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.data rename to paddle/legacy/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.data diff --git a/paddle/legacy/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list b/paddle/legacy/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list new file mode 100644 index 0000000000..11c1b1b38b --- /dev/null +++ b/paddle/legacy/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list @@ -0,0 +1 @@ +legacy/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.data diff --git a/paddle/trainer/tests/rnn_gen_test_model_dir/r1.test.beam b/paddle/legacy/trainer/tests/rnn_gen_test_model_dir/r1.test.beam similarity index 100% rename from paddle/trainer/tests/rnn_gen_test_model_dir/r1.test.beam rename to paddle/legacy/trainer/tests/rnn_gen_test_model_dir/r1.test.beam diff --git a/paddle/trainer/tests/rnn_gen_test_model_dir/r1.test.nest b/paddle/legacy/trainer/tests/rnn_gen_test_model_dir/r1.test.nest similarity index 100% rename from paddle/trainer/tests/rnn_gen_test_model_dir/r1.test.nest rename to paddle/legacy/trainer/tests/rnn_gen_test_model_dir/r1.test.nest diff --git a/paddle/trainer/tests/rnn_gen_test_model_dir/r1.test.nobeam b/paddle/legacy/trainer/tests/rnn_gen_test_model_dir/r1.test.nobeam similarity index 100% rename from paddle/trainer/tests/rnn_gen_test_model_dir/r1.test.nobeam rename to paddle/legacy/trainer/tests/rnn_gen_test_model_dir/r1.test.nobeam diff --git a/paddle/trainer/tests/rnn_gen_test_model_dir/t1/transtable b/paddle/legacy/trainer/tests/rnn_gen_test_model_dir/t1/transtable similarity index 100% rename from paddle/trainer/tests/rnn_gen_test_model_dir/t1/transtable rename to paddle/legacy/trainer/tests/rnn_gen_test_model_dir/t1/transtable diff --git a/paddle/trainer/tests/rnn_gen_test_model_dir/t1/wordvec b/paddle/legacy/trainer/tests/rnn_gen_test_model_dir/t1/wordvec similarity index 100% rename from paddle/trainer/tests/rnn_gen_test_model_dir/t1/wordvec rename to paddle/legacy/trainer/tests/rnn_gen_test_model_dir/t1/wordvec diff --git a/paddle/trainer/tests/sample_data.txt b/paddle/legacy/trainer/tests/sample_data.txt similarity index 100% rename from paddle/trainer/tests/sample_data.txt rename to paddle/legacy/trainer/tests/sample_data.txt diff --git a/paddle/legacy/trainer/tests/sample_filelist.txt b/paddle/legacy/trainer/tests/sample_filelist.txt new file mode 100644 index 0000000000..8573f9e179 --- /dev/null +++ b/paddle/legacy/trainer/tests/sample_filelist.txt @@ -0,0 +1 @@ +legacy/trainer/tests/sample_data.txt diff --git a/paddle/trainer/tests/sample_trainer_config.conf b/paddle/legacy/trainer/tests/sample_trainer_config.conf similarity index 95% rename from paddle/trainer/tests/sample_trainer_config.conf rename to paddle/legacy/trainer/tests/sample_trainer_config.conf index 2697832840..5800b36256 100644 --- a/paddle/trainer/tests/sample_trainer_config.conf +++ b/paddle/legacy/trainer/tests/sample_trainer_config.conf @@ -16,13 +16,13 @@ from paddle.trainer_config_helpers import * TrainData(SimpleData( - files = "trainer/tests/sample_filelist.txt", + files = "legacy/trainer/tests/sample_filelist.txt", feat_dim = 3, context_len = 0, buffer_capacity = 1000000)) TestData(SimpleData( - files = "trainer/tests/sample_filelist.txt", + files = "legacy/trainer/tests/sample_filelist.txt", feat_dim = 3, context_len = 0, buffer_capacity = 1000000)) diff --git a/paddle/trainer/tests/sample_trainer_config_hsigmoid.conf b/paddle/legacy/trainer/tests/sample_trainer_config_hsigmoid.conf similarity index 96% rename from paddle/trainer/tests/sample_trainer_config_hsigmoid.conf rename to paddle/legacy/trainer/tests/sample_trainer_config_hsigmoid.conf index e4abe31d48..155c40b31f 100644 --- a/paddle/trainer/tests/sample_trainer_config_hsigmoid.conf +++ b/paddle/legacy/trainer/tests/sample_trainer_config_hsigmoid.conf @@ -17,7 +17,7 @@ from paddle.trainer_config_helpers import * TrainData(SimpleData( - files = "trainer/tests/sample_filelist.txt", + files = "legacy/trainer/tests/sample_filelist.txt", feat_dim = 3, context_len = 0, buffer_capacity = 1000000, diff --git a/paddle/trainer/tests/sample_trainer_config_parallel.conf b/paddle/legacy/trainer/tests/sample_trainer_config_parallel.conf similarity index 95% rename from paddle/trainer/tests/sample_trainer_config_parallel.conf rename to paddle/legacy/trainer/tests/sample_trainer_config_parallel.conf index e2b8b3ecda..49cdde7fa2 100644 --- a/paddle/trainer/tests/sample_trainer_config_parallel.conf +++ b/paddle/legacy/trainer/tests/sample_trainer_config_parallel.conf @@ -16,13 +16,13 @@ from paddle.trainer_config_helpers import * TrainData(SimpleData( - files = "trainer/tests/sample_filelist.txt", + files = "legacy/trainer/tests/sample_filelist.txt", feat_dim = 3, context_len = 0, buffer_capacity = 1000000)) TestData(SimpleData( - files = "trainer/tests/sample_filelist.txt", + files = "legacy/trainer/tests/sample_filelist.txt", feat_dim = 3, context_len = 0, buffer_capacity = 1000000)) diff --git a/paddle/trainer/tests/sample_trainer_nest_rnn_gen.conf b/paddle/legacy/trainer/tests/sample_trainer_nest_rnn_gen.conf similarity index 94% rename from paddle/trainer/tests/sample_trainer_nest_rnn_gen.conf rename to paddle/legacy/trainer/tests/sample_trainer_nest_rnn_gen.conf index 741a0aa71d..51ef905a5a 100644 --- a/paddle/trainer/tests/sample_trainer_nest_rnn_gen.conf +++ b/paddle/legacy/trainer/tests/sample_trainer_nest_rnn_gen.conf @@ -63,8 +63,8 @@ beam_gen_concat = recurrent_group(name="rnn_gen_concat", seqtext_printer_evaluator(input=beam_gen_concat, id_input=sent_id, - dict_file="./trainer/tests/test_gen_dict.txt", - result_file="./trainer/tests/dump_text.test") + dict_file="./legacy/trainer/tests/test_gen_dict.txt", + result_file="./legacy/trainer/tests/dump_text.test") #outputs(beam_gen_concat) # In this config, as dummy_data_input doesn't work on beam_gen (we can find dummy_memory # is read-only memory, and isn't used by other layers of step), we show the Inputs and Outputs diff --git a/paddle/trainer/tests/sample_trainer_rnn_gen.conf b/paddle/legacy/trainer/tests/sample_trainer_rnn_gen.conf similarity index 94% rename from paddle/trainer/tests/sample_trainer_rnn_gen.conf rename to paddle/legacy/trainer/tests/sample_trainer_rnn_gen.conf index 58d27f15ae..35c7f0fcd9 100644 --- a/paddle/trainer/tests/sample_trainer_rnn_gen.conf +++ b/paddle/legacy/trainer/tests/sample_trainer_rnn_gen.conf @@ -56,8 +56,8 @@ beam_gen = beam_search(name="rnn_gen", seqtext_printer_evaluator(input=beam_gen, id_input=sent_id, - dict_file="./trainer/tests/test_gen_dict.txt", - result_file="./trainer/tests/dump_text.test") + dict_file="./legacy/trainer/tests/test_gen_dict.txt", + result_file="./legacy/trainer/tests/dump_text.test") #outputs(beam_gen) # In this config, as dummy_data_input doesn't work on beam_gen (we can find dummy_memory # is read-only memory, and isn't used by other layers of step), we show the Inputs and Outputs diff --git a/paddle/trainer/tests/simple_sparse_neural_network.py b/paddle/legacy/trainer/tests/simple_sparse_neural_network.py similarity index 95% rename from paddle/trainer/tests/simple_sparse_neural_network.py rename to paddle/legacy/trainer/tests/simple_sparse_neural_network.py index 970fb466dc..9419f4d903 100644 --- a/paddle/trainer/tests/simple_sparse_neural_network.py +++ b/paddle/legacy/trainer/tests/simple_sparse_neural_network.py @@ -16,7 +16,7 @@ from paddle.trainer_config_helpers import * settings(batch_size=17, learning_method=AdaGradOptimizer(), learning_rate=1e-4) -file_list = 'trainer/tests/fake_file_list.list' +file_list = 'legacy/trainer/tests/fake_file_list.list' define_py_data_sources2( train_list=file_list, diff --git a/paddle/trainer/tests/simple_sparse_neural_network_dp.py b/paddle/legacy/trainer/tests/simple_sparse_neural_network_dp.py similarity index 100% rename from paddle/trainer/tests/simple_sparse_neural_network_dp.py rename to paddle/legacy/trainer/tests/simple_sparse_neural_network_dp.py diff --git a/paddle/trainer/tests/testPyDataWrapper.py b/paddle/legacy/trainer/tests/testPyDataWrapper.py similarity index 100% rename from paddle/trainer/tests/testPyDataWrapper.py rename to paddle/legacy/trainer/tests/testPyDataWrapper.py diff --git a/paddle/trainer/tests/test_Compare.cpp b/paddle/legacy/trainer/tests/test_Compare.cpp similarity index 97% rename from paddle/trainer/tests/test_Compare.cpp rename to paddle/legacy/trainer/tests/test_Compare.cpp index f3a964acb6..496a148bf6 100644 --- a/paddle/trainer/tests/test_Compare.cpp +++ b/paddle/legacy/trainer/tests/test_Compare.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include -#include "paddle/trainer/Trainer.h" +#include "paddle/legacy/trainer/Trainer.h" #include #include @@ -22,7 +22,8 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -static const string& configFile = "trainer/tests/sample_trainer_config.conf"; +static const string& configFile = + "/legacy/trainer/tests/sample_trainer_config.conf"; DECLARE_int32(gpu_id); DECLARE_bool(use_gpu); diff --git a/paddle/trainer/tests/test_PyDataProviderWrapper.cpp b/paddle/legacy/trainer/tests/test_PyDataProviderWrapper.cpp similarity index 99% rename from paddle/trainer/tests/test_PyDataProviderWrapper.cpp rename to paddle/legacy/trainer/tests/test_PyDataProviderWrapper.cpp index e3cd1c904d..94eaba2e2f 100644 --- a/paddle/trainer/tests/test_PyDataProviderWrapper.cpp +++ b/paddle/legacy/trainer/tests/test_PyDataProviderWrapper.cpp @@ -26,7 +26,7 @@ limitations under the License. */ #include "picojson.h" void checkValue(std::vector& arguments, picojson::array& arr); -const std::string kDir = "./trainer/tests/pydata_provider_wrapper_dir/"; +const std::string kDir = "./legacy/trainer/tests/pydata_provider_wrapper_dir/"; TEST(PyDataProviderWrapper, SequenceData) { paddle::DataConfig conf; diff --git a/paddle/trainer/tests/test_Trainer.cpp b/paddle/legacy/trainer/tests/test_Trainer.cpp similarity index 91% rename from paddle/trainer/tests/test_Trainer.cpp rename to paddle/legacy/trainer/tests/test_Trainer.cpp index 394038cf73..9fb80762fe 100644 --- a/paddle/trainer/tests/test_Trainer.cpp +++ b/paddle/legacy/trainer/tests/test_Trainer.cpp @@ -14,18 +14,19 @@ limitations under the License. */ #include #include -#include "paddle/trainer/Trainer.h" +#include "paddle/legacy/trainer/Trainer.h" #include using namespace paddle; // NOLINT using namespace std; // NOLINT -static const string& configFile1 = "trainer/tests/sample_trainer_config.conf"; +static const string& configFile1 = + "legacy/trainer/tests/sample_trainer_config.conf"; static const string& configFile2 = - "trainer/tests/sample_trainer_config_hsigmoid.conf"; + "legacy/trainer/tests/sample_trainer_config_hsigmoid.conf"; static const string& configFile4 = - "trainer/tests/sample_trainer_config_parallel.conf"; + "legacy/trainer/tests/sample_trainer_config_parallel.conf"; DECLARE_bool(use_gpu); DECLARE_string(config); diff --git a/paddle/trainer/tests/test_TrainerOnePass.cpp b/paddle/legacy/trainer/tests/test_TrainerOnePass.cpp similarity index 96% rename from paddle/trainer/tests/test_TrainerOnePass.cpp rename to paddle/legacy/trainer/tests/test_TrainerOnePass.cpp index 1e1b2d2bf4..0e25e35443 100644 --- a/paddle/trainer/tests/test_TrainerOnePass.cpp +++ b/paddle/legacy/trainer/tests/test_TrainerOnePass.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include #include -#include "paddle/trainer/Trainer.h" -#include "paddle/trainer/TrainerInternal.h" +#include "paddle/legacy/trainer/Trainer.h" +#include "paddle/legacy/trainer/TrainerInternal.h" #include #include @@ -23,12 +23,13 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -static const string& configFile1 = "trainer/tests/sample_trainer_config.conf"; +static const string& configFile1 = + "legacy/trainer/tests/sample_trainer_config.conf"; static const string& configFile2 = - "trainer/tests/sample_trainer_config_parallel.conf"; + "legacy/trainer/tests/sample_trainer_config_parallel.conf"; static const string& configFileSimpleSparse = - "trainer/tests/simple_sparse_neural_network.py"; + "legacy/trainer/tests/simple_sparse_neural_network.py"; DECLARE_bool(use_gpu); DECLARE_string(config); diff --git a/paddle/trainer/tests/test_config.conf b/paddle/legacy/trainer/tests/test_config.conf similarity index 97% rename from paddle/trainer/tests/test_config.conf rename to paddle/legacy/trainer/tests/test_config.conf index 2f86aaa753..bce687ad83 100644 --- a/paddle/trainer/tests/test_config.conf +++ b/paddle/legacy/trainer/tests/test_config.conf @@ -16,7 +16,7 @@ from paddle.trainer_config_helpers import * TrainData(SimpleData( - files = "trainer/tests/sample_filelist.txt", + files = "legacy/trainer/tests/sample_filelist.txt", feat_dim = 3, context_len = 0, buffer_capacity = 1000000, diff --git a/paddle/trainer/tests/test_gen_dict.txt b/paddle/legacy/trainer/tests/test_gen_dict.txt similarity index 100% rename from paddle/trainer/tests/test_gen_dict.txt rename to paddle/legacy/trainer/tests/test_gen_dict.txt diff --git a/paddle/trainer/tests/test_recurrent_machine_generation.cpp b/paddle/legacy/trainer/tests/test_recurrent_machine_generation.cpp similarity index 90% rename from paddle/trainer/tests/test_recurrent_machine_generation.cpp rename to paddle/legacy/trainer/tests/test_recurrent_machine_generation.cpp index a8fbe31c2b..bd6ee0f01d 100644 --- a/paddle/trainer/tests/test_recurrent_machine_generation.cpp +++ b/paddle/legacy/trainer/tests/test_recurrent_machine_generation.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include -#include +#include #include #include @@ -22,13 +22,15 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -static const string& CONFIG_FILE = "trainer/tests/sample_trainer_rnn_gen.conf"; +static const string& CONFIG_FILE = + "legacy/trainer/tests/sample_trainer_rnn_gen.conf"; static const string& NEST_CONFIG_FILE = - "trainer/tests/sample_trainer_nest_rnn_gen.conf"; -static const string& OUTPUT_DIR = "trainer/tests/dump_text.test"; -static string modelDir = "trainer/tests/rnn_gen_test_model_dir/t1"; // NOLINT -static string expectFile = // NOLINT - "trainer/tests/rnn_gen_test_model_dir/r1.test"; // NOLINT + "legacy/trainer/tests/sample_trainer_nest_rnn_gen.conf"; +static const string& OUTPUT_DIR = "legacy/trainer/tests/dump_text.test"; +static string modelDir = + "legacy/trainer/tests/rnn_gen_test_model_dir/t1"; // NOLINT +static string expectFile = // NOLINT + "legacy/trainer/tests/rnn_gen_test_model_dir/r1.test"; // NOLINT DECLARE_string(config_args); diff --git a/paddle/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list b/paddle/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list deleted file mode 100644 index 0db50f34dd..0000000000 --- a/paddle/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list +++ /dev/null @@ -1 +0,0 @@ -trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.data diff --git a/paddle/trainer/tests/sample_filelist.txt b/paddle/trainer/tests/sample_filelist.txt deleted file mode 100644 index 7db4c73535..0000000000 --- a/paddle/trainer/tests/sample_filelist.txt +++ /dev/null @@ -1 +0,0 @@ -trainer/tests/sample_data.txt diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 460eb3b349..5b90facd49 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -67,7 +67,7 @@ extension_module_name=[MODULE_NAME], then config_parser will call MODULE_NAME.get_config_funcs(g_config) MODULE_NAME.get_config_funcs() should return a dictionary of name to functions, those functions will be available in the config file. -See trainer/tests/config_parser_test.py for example +See legacy/trainer/tests/config_parser_test.py for example To use this from paddle_trainer, paddle_trainer should be called with --config_args=extension_module_name=[MODULE_NAME] diff --git a/python/setup.py.in b/python/setup.py.in index 032784f4a2..d92abf6088 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -93,8 +93,8 @@ if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: paddle_bins = '' if '${WITH_FLUID_ONLY}'== 'OFF': paddle_bin_dir = 'opt/paddle/bin' - paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer', - '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_merge_model', + paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/legacy/trainer/paddle_trainer', + '${PADDLE_BINARY_DIR}/paddle/legacy/trainer/paddle_merge_model', '${PADDLE_BINARY_DIR}/paddle/legacy/pserver/paddle_pserver_main', '${PADDLE_BINARY_DIR}/paddle/scripts/paddle'] diff --git a/tools/codestyle/cpplint_pre_commit.hook b/tools/codestyle/cpplint_pre_commit.hook index 041ba868af..f4190fb876 100755 --- a/tools/codestyle/cpplint_pre_commit.hook +++ b/tools/codestyle/cpplint_pre_commit.hook @@ -4,7 +4,7 @@ TOTAL_ERRORS=0 # The trick to remove deleted files: https://stackoverflow.com/a/2413151 for file in $(git diff --cached --name-status | awk '$1 != "D" {print $2}'); do - if [[ $file =~ ^(paddle/api/.*|paddle/capi/.*|paddle/contrib/.*|paddle/legacy/cuda/.*|paddle/legacy/function/.*|paddle/legacy/gserver/.*|paddle/legacy/math/.*|paddle/legacy/optimizer/.*|paddle/legacy/parameter/.*|paddle/legacy/pserver/.*|paddle/trainer/.*|paddle/utils/.*|paddle/testing/TestUtil.*) ]]; then + if [[ $file =~ ^(paddle/legacy/api/.*|paddle/legacy/capi/.*|paddle/contrib/.*|paddle/legacy/cuda/.*|paddle/legacy/function/.*|paddle/legacy/gserver/.*|paddle/legacy/math/.*|paddle/legacy/optimizer/.*|paddle/legacy/parameter/.*|paddle/legacy/pserver/.*|paddle/legacy/trainer/.*|paddle/utils/.*|paddle/testing/TestUtil.*) ]]; then continue; else cpplint --filter=-readability/fn_size $file; From 2df8e2931f47abef31f2a6f5a8a281946049b8ed Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 3 Jul 2018 13:22:18 +0800 Subject: [PATCH 27/34] fix --- paddle/legacy/trainer/tests/test_Compare.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/legacy/trainer/tests/test_Compare.cpp b/paddle/legacy/trainer/tests/test_Compare.cpp index 496a148bf6..9623c280eb 100644 --- a/paddle/legacy/trainer/tests/test_Compare.cpp +++ b/paddle/legacy/trainer/tests/test_Compare.cpp @@ -23,7 +23,7 @@ using namespace paddle; // NOLINT using namespace std; // NOLINT static const string& configFile = - "/legacy/trainer/tests/sample_trainer_config.conf"; + "./legacy/trainer/tests/sample_trainer_config.conf"; DECLARE_int32(gpu_id); DECLARE_bool(use_gpu); From 27d69625368a2e21aa7417908fbb4f539a4e3c91 Mon Sep 17 00:00:00 2001 From: Wu Yi Date: Tue, 3 Jul 2018 13:37:50 +0800 Subject: [PATCH 28/34] fix mac build (#11873) * fix mac build * add notes * fix_mac_build * update --- CMakeLists.txt | 1 + cmake/cblas.cmake | 20 +++++++++++--------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 997672169f..23bb27e77b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -65,6 +65,7 @@ option(REPLACE_ENFORCE_GLOG "Replace PADDLE_ENFORCE with glog/CHECK for better d option(WITH_ANAKIN "Compile with Anakin library" OFF) option(WITH_GRPC "Use grpc as the default rpc framework" ${WITH_DISTRIBUTE}) option(WITH_BRPC_RDMA "Use brpc rdma as the rpc protocal" OFF) +option(WITH_SYSTEM_BLAS "Use system blas library" OFF) # CMAKE_BUILD_TYPE if(NOT CMAKE_BUILD_TYPE) diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake index e3b9d94215..6ed51c6484 100644 --- a/cmake/cblas.cmake +++ b/cmake/cblas.cmake @@ -83,18 +83,20 @@ else() set(REFERENCE_CBLAS_LIB_SEARCH_PATHS ${REFERENCE_CBLAS_ROOT}/lib) endif() -find_path(REFERENCE_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS +if(WITH_SYSTEM_BLAS) + find_path(REFERENCE_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${REFERENCE_CBLAS_INCLUDE_SEARCH_PATHS}) -find_library(REFERENCE_CBLAS_LIBRARY NAMES cblas PATHS + find_library(REFERENCE_CBLAS_LIBRARY NAMES cblas PATHS ${REFERENCE_CBLAS_LIB_SEARCH_PATHS}) -if(REFERENCE_CBLAS_INCLUDE_DIR AND REFERENCE_CBLAS_LIBRARY) - set(CBLAS_FOUND ON) - set(CBLAS_PROVIDER REFERENCE) - set(CBLAS_INC_DIR ${REFERENCE_CBLAS_INCLUDE_DIR}) - set(CBLAS_LIBRARIES ${REFERENCE_CBLAS_LIBRARY}) - add_definitions(-DPADDLE_USE_REFERENCE_CBLAS) - message(STATUS "Found reference-cblas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") + if(REFERENCE_CBLAS_INCLUDE_DIR AND REFERENCE_CBLAS_LIBRARY) + set(CBLAS_FOUND ON) + set(CBLAS_PROVIDER REFERENCE) + set(CBLAS_INC_DIR ${REFERENCE_CBLAS_INCLUDE_DIR}) + set(CBLAS_LIBRARIES ${REFERENCE_CBLAS_LIBRARY}) + add_definitions(-DPADDLE_USE_REFERENCE_CBLAS) + message(STATUS "Found reference-cblas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") + endif() endif() if(IOS_USE_VECLIB_FOR_BLAS AND VECLIB_FOUND) From 94cb59ad097851a7806f43c308f164437d0e15d6 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 3 Jul 2018 14:16:53 +0800 Subject: [PATCH 29/34] hide utils to legacy --- README.md | 2 ++ go/CMakeLists.txt | 2 +- go/cmd/master/master.go | 2 +- go/pserver/etcd_client.go | 2 +- paddle/CMakeLists.txt | 2 +- ...addle_inference_api_tensorrt_subgraph_engine.cc | 2 +- .../inference/tensorrt/convert/io_converter.h | 2 +- .../inference/tensorrt/convert/op_converter.h | 2 +- .../fluid/inference/tensorrt/convert/ut_helper.h | 2 +- paddle/fluid/inference/tensorrt/engine.h | 2 +- paddle/fluid/operators/positive_negative_pair_op.h | 2 +- paddle/fluid/operators/tensorrt_engine_op.cc | 2 +- paddle/fluid/platform/float16_test.cu | 2 +- paddle/legacy/api/Paddle.i | 2 +- paddle/legacy/api/PaddleAPI.h | 4 ++-- paddle/legacy/api/SequenceGenerator.cpp | 2 +- paddle/legacy/api/Trainer.cpp | 2 +- paddle/legacy/api/Util.cpp | 8 ++++---- paddle/legacy/capi/Main.cpp | 4 ++-- paddle/legacy/capi/tests/test_Arguments.cpp | 2 +- paddle/legacy/capi/tests/test_GradientMachine.cpp | 2 +- paddle/legacy/cuda/include/hl_base.h | 2 +- paddle/legacy/cuda/include/hl_gpu_gru.cuh | 2 +- paddle/legacy/cuda/include/hl_gpu_lstm.cuh | 2 +- .../legacy/cuda/include/hl_gpu_matrix_kernel.cuh | 2 +- paddle/legacy/cuda/src/hl_cuda_aggregate.cu | 2 +- paddle/legacy/cuda/src/hl_cuda_cublas.cc | 4 ++-- paddle/legacy/cuda/src/hl_cuda_cudnn.cc | 4 ++-- paddle/legacy/cuda/src/hl_cuda_device.cc | 4 ++-- paddle/legacy/cuda/src/hl_cuda_lstm.cu | 2 +- paddle/legacy/cuda/src/hl_cuda_matrix.cu | 2 +- paddle/legacy/cuda/src/hl_cuda_sequence.cu | 2 +- paddle/legacy/cuda/src/hl_cuda_sparse.cu | 2 +- paddle/legacy/cuda/src/hl_table_apply.cu | 2 +- paddle/legacy/cuda/src/hl_top_k.cu | 2 +- paddle/legacy/cuda/src/hl_warpctc_wrap.cc | 4 ++-- paddle/legacy/function/Function.h | 6 +++--- paddle/legacy/function/MulOp.cpp | 2 +- .../gserver/activations/ActivationFunction.cpp | 4 ++-- .../gserver/activations/ActivationFunction.h | 2 +- .../gserver/activations/MKLDNNActivation.cpp | 2 +- .../legacy/gserver/dataproviders/DataProvider.cpp | 8 ++++---- paddle/legacy/gserver/dataproviders/DataProvider.h | 14 +++++++------- .../gserver/dataproviders/MultiDataProvider.cpp | 4 ++-- .../gserver/dataproviders/PyDataProvider.cpp | 6 +++--- .../legacy/gserver/dataproviders/PyDataProvider.h | 2 +- .../gserver/dataproviders/PyDataProvider2.cpp | 6 +++--- .../gserver/evaluators/CTCErrorEvaluator.cpp | 2 +- .../legacy/gserver/evaluators/ChunkEvaluator.cpp | 2 +- paddle/legacy/gserver/evaluators/Evaluator.cpp | 4 ++-- paddle/legacy/gserver/evaluators/Evaluator.h | 4 ++-- .../gserver/gradientmachines/GradientMachine.cpp | 2 +- .../gserver/gradientmachines/GradientMachine.h | 2 +- .../gradientmachines/MultiGradientMachine.cpp | 4 ++-- .../gradientmachines/MultiGradientMachine.h | 4 ++-- .../gserver/gradientmachines/MultiNetwork.cpp | 4 ++-- .../legacy/gserver/gradientmachines/MultiNetwork.h | 2 +- .../gserver/gradientmachines/NeuralNetwork.cpp | 8 ++++---- .../gserver/gradientmachines/NeuralNetwork.h | 2 +- .../gradientmachines/ParallelNeuralNetwork.cpp | 4 ++-- .../gradientmachines/RecurrentGradientMachine.cpp | 6 +++--- .../gradientmachines/RecurrentGradientMachine.h | 2 +- paddle/legacy/gserver/layers/AddtoLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/AddtoLayer.h | 2 +- paddle/legacy/gserver/layers/AgentLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/AgentLayer.h | 2 +- paddle/legacy/gserver/layers/AverageLayer.cpp | 4 ++-- .../legacy/gserver/layers/BatchNormBaseLayer.cpp | 2 +- paddle/legacy/gserver/layers/BatchNormBaseLayer.h | 2 +- .../gserver/layers/BatchNormalizationLayer.cpp | 2 +- .../legacy/gserver/layers/BilinearInterpLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/BlockExpandLayer.cpp | 2 +- paddle/legacy/gserver/layers/ConcatenateLayer.cpp | 2 +- paddle/legacy/gserver/layers/ContextProjection.cpp | 2 +- paddle/legacy/gserver/layers/Conv3DLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/ConvBaseLayer.cpp | 2 +- .../legacy/gserver/layers/ConvBaseProjection.cpp | 2 +- paddle/legacy/gserver/layers/ConvProjection.cpp | 2 +- paddle/legacy/gserver/layers/ConvShiftLayer.cpp | 4 ++-- .../legacy/gserver/layers/ConvTransProjection.cpp | 2 +- .../gserver/layers/ConvexCombinationLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/CosSimLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/CosSimLayer.h | 2 +- paddle/legacy/gserver/layers/CosSimVecMatLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/CostLayer.cpp | 2 +- paddle/legacy/gserver/layers/CropLayer.cpp | 2 +- .../legacy/gserver/layers/CudnnBatchNormLayer.cpp | 2 +- paddle/legacy/gserver/layers/CudnnBatchNormLayer.h | 2 +- .../legacy/gserver/layers/CudnnConvBaseLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/CudnnPoolLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/DataNormLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/DataNormLayer.h | 2 +- paddle/legacy/gserver/layers/DeConv3DLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/DotProdLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/EosIdCheckLayer.cpp | 2 +- paddle/legacy/gserver/layers/ExpandConvLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/ExpandLayer.cpp | 4 ++-- .../gserver/layers/FactorizationMachineLayer.cpp | 4 ++-- .../gserver/layers/FactorizationMachineLayer.h | 2 +- .../gserver/layers/FeatureMapExpandLayer.cpp | 2 +- .../legacy/gserver/layers/FullMatrixProjection.h | 2 +- .../legacy/gserver/layers/FullyConnectedLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/FullyConnectedLayer.h | 2 +- .../legacy/gserver/layers/GatedRecurrentLayer.cpp | 2 +- paddle/legacy/gserver/layers/GruCompute.cpp | 2 +- paddle/legacy/gserver/layers/GruCompute.h | 2 +- paddle/legacy/gserver/layers/GruStepLayer.cpp | 2 +- .../gserver/layers/HierarchicalSigmoidLayer.cpp | 2 +- .../legacy/gserver/layers/IdentityProjection.cpp | 2 +- .../legacy/gserver/layers/InterpolationLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/L2DistanceLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/Layer.cpp | 6 +++--- paddle/legacy/gserver/layers/Layer.h | 4 ++-- paddle/legacy/gserver/layers/LstmCompute.cpp | 2 +- paddle/legacy/gserver/layers/LstmCompute.h | 2 +- paddle/legacy/gserver/layers/LstmLayer.cpp | 2 +- paddle/legacy/gserver/layers/LstmStepLayer.cpp | 2 +- paddle/legacy/gserver/layers/MKLDNNConvLayer.cpp | 2 +- paddle/legacy/gserver/layers/MKLDNNFcLayer.cpp | 2 +- paddle/legacy/gserver/layers/MKLDNNLRNLayer.cpp | 2 +- paddle/legacy/gserver/layers/MKLDNNLayer.h | 2 +- paddle/legacy/gserver/layers/MKLDNNPoolLayer.cpp | 2 +- paddle/legacy/gserver/layers/MaxLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/MaxLayer.h | 2 +- .../legacy/gserver/layers/MaxPoolWithMaskLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/MixedLayer.cpp | 2 +- paddle/legacy/gserver/layers/MultinomialSampler.h | 2 +- paddle/legacy/gserver/layers/MultiplexLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/NormLayer.cpp | 2 +- .../legacy/gserver/layers/NormProjectionLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/OuterProdLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/PadLayer.cpp | 2 +- .../legacy/gserver/layers/ParameterReluLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/ParameterReluLayer.h | 2 +- paddle/legacy/gserver/layers/Pool3DLayer.cpp | 2 +- paddle/legacy/gserver/layers/PoolLayer.cpp | 2 +- .../legacy/gserver/layers/PoolProjectionLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/PowerLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/RecurrentLayer.h | 2 +- .../legacy/gserver/layers/RecurrentLayerGroup.cpp | 2 +- paddle/legacy/gserver/layers/RowConvLayer.cpp | 2 +- .../legacy/gserver/layers/ScaleSubRegionLayer.cpp | 2 +- paddle/legacy/gserver/layers/ScalingLayer.cpp | 4 ++-- .../layers/SelectiveFullyConnectedLayer.cpp | 4 ++-- .../gserver/layers/SelectiveFullyConnectedLayer.h | 2 +- .../legacy/gserver/layers/SequenceConcatLayer.cpp | 4 ++-- .../gserver/layers/SequenceLastInstanceLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/SequencePoolLayer.cpp | 2 +- .../legacy/gserver/layers/SequenceReshapeLayer.cpp | 4 ++-- .../legacy/gserver/layers/SequenceSliceLayer.cpp | 4 ++-- .../legacy/gserver/layers/SlopeInterceptLayer.cpp | 4 ++-- .../gserver/layers/SpatialPyramidPoolLayer.h | 2 +- .../gserver/layers/SubNestedSequenceLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/SubSequenceLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/SumToOneNormLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/SwitchOrderLayer.cpp | 2 +- paddle/legacy/gserver/layers/TensorLayer.cpp | 4 ++-- paddle/legacy/gserver/layers/TensorLayer.h | 2 +- paddle/legacy/gserver/layers/TransLayer.cpp | 2 +- .../layers/TransposedFullMatrixProjection.cpp | 2 +- paddle/legacy/gserver/layers/UpsampleLayer.h | 4 ++-- paddle/legacy/gserver/layers/ValidationLayer.cpp | 2 +- paddle/legacy/gserver/tests/test_BatchNorm.cpp | 2 +- paddle/legacy/gserver/tests/test_CompareSparse.cpp | 2 +- .../legacy/gserver/tests/test_CompareTwoNets.cpp | 2 +- paddle/legacy/gserver/tests/test_ConvTrans.cpp | 2 +- paddle/legacy/gserver/tests/test_ConvUnify.cpp | 2 +- paddle/legacy/gserver/tests/test_KmaxSeqScore.cpp | 2 +- .../legacy/gserver/tests/test_LinearChainCRF.cpp | 2 +- paddle/legacy/gserver/tests/test_MKLDNN.cpp | 2 +- .../gserver/tests/test_MultinomialSampler.cpp | 4 ++-- .../legacy/gserver/tests/test_NetworkCompare.cpp | 4 ++-- .../legacy/gserver/tests/test_PyDataProvider.cpp | 2 +- .../legacy/gserver/tests/test_PyDataProvider2.cpp | 4 ++-- .../tests/test_RecurrentGradientMachine.cpp | 6 +++--- .../legacy/gserver/tests/test_RecurrentLayer.cpp | 2 +- .../legacy/gserver/tests/test_SelectiveFCLayer.cpp | 2 +- paddle/legacy/gserver/tests/test_WarpCTCLayer.cpp | 2 +- paddle/legacy/math/Allocator.h | 2 +- paddle/legacy/math/BaseMatrix.cu | 2 +- paddle/legacy/math/BaseMatrix.h | 2 +- paddle/legacy/math/CpuSparseMatrix.cpp | 2 +- paddle/legacy/math/MathFunctions.cpp | 2 +- paddle/legacy/math/MathUtils.cpp | 2 +- paddle/legacy/math/Matrix.cpp | 4 ++-- paddle/legacy/math/Matrix.h | 8 ++++---- paddle/legacy/math/MatrixBitCode.cpp | 4 ++-- paddle/legacy/math/RowBuffer.h | 2 +- paddle/legacy/math/SparseMatrix.cpp | 2 +- paddle/legacy/math/SparseRowMatrix.cpp | 6 +++--- paddle/legacy/math/SparseRowMatrix.h | 2 +- paddle/legacy/math/Storage.cpp | 4 ++-- paddle/legacy/math/Storage.h | 2 +- paddle/legacy/math/TensorAssign.h | 2 +- paddle/legacy/math/TensorEvaluate.h | 2 +- paddle/legacy/math/TensorExpression.h | 4 ++-- paddle/legacy/math/TrainingAlgorithmOp.cu | 2 +- paddle/legacy/math/TrainingAlgorithmOp.h | 2 +- paddle/legacy/math/Vector.cpp | 10 +++++----- paddle/legacy/math/Vector.h | 4 ++-- paddle/legacy/math/tests/OriginalOptimizerApi.h | 2 +- paddle/legacy/math/tests/PerfUtils.h | 2 +- paddle/legacy/math/tests/test_Allocator.cpp | 4 ++-- paddle/legacy/math/tests/test_CpuGpuVector.cpp | 2 +- paddle/legacy/math/tests/test_ExecViaCpu.cpp | 4 ++-- paddle/legacy/math/tests/test_FPException.cpp | 2 +- paddle/legacy/math/tests/test_GpuProfiler.cpp | 4 ++-- paddle/legacy/math/tests/test_SIMDFunctions.cpp | 2 +- paddle/legacy/math/tests/test_SparseMatrix.cpp | 2 +- .../legacy/math/tests/test_TrainingAlgorithm.cpp | 2 +- paddle/legacy/math/tests/test_matrixCompare.cpp | 6 +++--- paddle/legacy/math/tests/test_matrixUtil.h | 2 +- .../legacy/math/tests/test_sparseMatrixCompare.cpp | 2 +- paddle/legacy/optimizer/serialization.h | 2 +- paddle/legacy/optimizer/tensor.h | 4 ++-- paddle/legacy/parameter/Argument.h | 4 ++-- paddle/legacy/parameter/FirstOrderOptimizer.cpp | 4 ++-- paddle/legacy/parameter/LearningRateScheduler.cpp | 2 +- paddle/legacy/parameter/LearningRateScheduler.h | 2 +- paddle/legacy/parameter/Parameter.cpp | 2 +- paddle/legacy/parameter/Parameter.h | 10 +++++----- paddle/legacy/parameter/ParameterOptimizer.cpp | 2 +- .../legacy/parameter/ParameterUpdateFunctions.cpp | 2 +- paddle/legacy/parameter/ParameterUpdateFunctions.h | 2 +- paddle/legacy/parameter/ParameterUpdaterBase.cpp | 2 +- paddle/legacy/parameter/ParameterUpdaterHook.cpp | 4 ++-- paddle/legacy/parameter/Regularizer.cpp | 4 ++-- paddle/legacy/parameter/Weight.cpp | 2 +- paddle/legacy/parameter/tests/test_common.cpp | 8 ++++---- paddle/legacy/pserver/BaseClient.cpp | 2 +- paddle/legacy/pserver/BaseClient.h | 4 ++-- paddle/legacy/pserver/LightNetwork.cpp | 4 ++-- paddle/legacy/pserver/LightNetwork.h | 2 +- paddle/legacy/pserver/ParameterClient2.cpp | 6 +++--- paddle/legacy/pserver/ParameterClient2.h | 10 +++++----- paddle/legacy/pserver/ParameterServer2.cpp | 8 ++++---- paddle/legacy/pserver/ParameterServer2.h | 8 ++++---- paddle/legacy/pserver/ParameterServerController.h | 2 +- paddle/legacy/pserver/RDMANetwork.h | 2 +- paddle/legacy/pserver/SocketChannel.cpp | 2 +- paddle/legacy/pserver/SocketChannel.h | 2 +- .../legacy/pserver/SparseParameterDistribution.cpp | 4 ++-- .../legacy/pserver/SparseParameterDistribution.h | 2 +- paddle/legacy/pserver/test/SocketTest.cpp | 4 ++-- .../legacy/pserver/test/test_ParameterServer2.cpp | 4 ++-- paddle/legacy/pserver/test/test_ProtoServer.cpp | 4 ++-- paddle/legacy/trainer/MergeModel.cpp | 2 +- .../legacy/trainer/NewRemoteParameterUpdater.cpp | 2 +- paddle/legacy/trainer/NewRemoteParameterUpdater.h | 4 ++-- paddle/legacy/trainer/ParamUtil.cpp | 10 +++++----- paddle/legacy/trainer/ParamUtil.h | 2 +- paddle/legacy/trainer/ParameterUpdater.cpp | 4 ++-- paddle/legacy/trainer/ParameterUpdater.h | 4 ++-- paddle/legacy/trainer/RemoteParameterUpdater.cpp | 4 ++-- paddle/legacy/trainer/RemoteParameterUpdater.h | 4 ++-- paddle/legacy/trainer/Tester.cpp | 8 ++++---- paddle/legacy/trainer/Tester.h | 2 +- paddle/legacy/trainer/TesterConfig.h | 2 +- paddle/legacy/trainer/ThreadParameterUpdater.cpp | 4 ++-- paddle/legacy/trainer/ThreadParameterUpdater.h | 2 +- paddle/legacy/trainer/Trainer.cpp | 10 +++++----- paddle/legacy/trainer/Trainer.h | 2 +- paddle/legacy/trainer/TrainerBenchmark.cpp | 4 ++-- paddle/legacy/trainer/TrainerConfigHelper.cpp | 4 ++-- paddle/legacy/trainer/TrainerConfigHelper.h | 4 ++-- paddle/legacy/trainer/TrainerInternal.cpp | 8 ++++---- paddle/legacy/trainer/TrainerInternal.h | 2 +- paddle/legacy/trainer/TrainerInternalConfig.h | 2 +- paddle/legacy/trainer/TrainerMain.cpp | 2 +- paddle/legacy/trainer/tests/test_Compare.cpp | 2 +- .../trainer/tests/test_PyDataProviderWrapper.cpp | 2 +- paddle/legacy/trainer/tests/test_Trainer.cpp | 4 ++-- .../legacy/trainer/tests/test_TrainerOnePass.cpp | 4 ++-- .../tests/test_recurrent_machine_generation.cpp | 2 +- paddle/{ => legacy}/utils/.gitignore | 0 paddle/{ => legacy}/utils/Any.h | 0 paddle/{ => legacy}/utils/CMakeLists.txt | 0 paddle/{ => legacy}/utils/ClassRegistrar.h | 0 paddle/{ => legacy}/utils/Common.h | 0 paddle/{ => legacy}/utils/CpuId.cpp | 4 ++-- paddle/{ => legacy}/utils/CpuId.h | 0 paddle/{ => legacy}/utils/CustomStackTrace.cpp | 0 paddle/{ => legacy}/utils/CustomStackTrace.h | 0 paddle/{ => legacy}/utils/DynamicLoader.cpp | 0 paddle/{ => legacy}/utils/DynamicLoader.h | 0 paddle/{ => legacy}/utils/Error.h | 0 paddle/{ => legacy}/utils/Excepts.h | 0 paddle/{ => legacy}/utils/Flags.cpp | 0 paddle/{ => legacy}/utils/Flags.h | 0 paddle/{ => legacy}/utils/GlobalConstants.cpp | 0 paddle/{ => legacy}/utils/GlobalConstants.h | 0 paddle/{ => legacy}/utils/Locks.h | 0 paddle/{ => legacy}/utils/Logging.cpp | 0 paddle/{ => legacy}/utils/Logging.h | 0 paddle/{ => legacy}/utils/PythonUtil.cpp | 0 paddle/{ => legacy}/utils/PythonUtil.h | 2 +- paddle/{ => legacy}/utils/Queue.h | 0 paddle/{ => legacy}/utils/Stat.cpp | 0 paddle/{ => legacy}/utils/Stat.h | 0 paddle/{ => legacy}/utils/StringUtil.cpp | 0 paddle/{ => legacy}/utils/StringUtil.h | 0 paddle/{ => legacy}/utils/Thread.h | 0 paddle/{ => legacy}/utils/ThreadLocal.cpp | 0 paddle/{ => legacy}/utils/ThreadLocal.h | 0 paddle/{ => legacy}/utils/Util.cpp | 0 paddle/{ => legacy}/utils/Util.h | 0 paddle/{ => legacy}/utils/Version.cpp | 0 paddle/{ => legacy}/utils/Version.h | 0 paddle/{ => legacy}/utils/arch/linux/Locks.cpp | 4 ++-- paddle/{ => legacy}/utils/arch/osx/Excepts.cpp | 2 +- paddle/{ => legacy}/utils/arch/osx/Locks.cpp | 4 ++-- paddle/{ => legacy}/utils/enable_virtualenv.py | 0 paddle/{ => legacy}/utils/tests/CMakeLists.txt | 2 +- .../utils/tests/test_CustomStackTrace.cpp | 8 ++++---- .../utils/tests/test_CustomStackTracePrint.cpp | 6 +++--- .../utils/tests/test_CustomStackTracePrint.sh | 0 paddle/{ => legacy}/utils/tests/test_Error.cpp | 2 +- paddle/{ => legacy}/utils/tests/test_SIMDFlags.cpp | 6 +++--- paddle/{ => legacy}/utils/tests/test_SpinLock.cpp | 6 +++--- .../{ => legacy}/utils/tests/test_StringUtils.cpp | 2 +- paddle/{ => legacy}/utils/tests/test_Thread.cpp | 2 +- .../utils/tests/test_ThreadBarrier.cpp | 6 +++--- paddle/testing/TestMain.cpp | 2 +- proto/README.md | 3 +++ python/CMakeLists.txt | 2 +- tools/codestyle/cpplint_pre_commit.hook | 2 +- 326 files changed, 468 insertions(+), 463 deletions(-) rename paddle/{ => legacy}/utils/.gitignore (100%) rename paddle/{ => legacy}/utils/Any.h (100%) rename paddle/{ => legacy}/utils/CMakeLists.txt (100%) rename paddle/{ => legacy}/utils/ClassRegistrar.h (100%) rename paddle/{ => legacy}/utils/Common.h (100%) rename paddle/{ => legacy}/utils/CpuId.cpp (96%) rename paddle/{ => legacy}/utils/CpuId.h (100%) rename paddle/{ => legacy}/utils/CustomStackTrace.cpp (100%) rename paddle/{ => legacy}/utils/CustomStackTrace.h (100%) rename paddle/{ => legacy}/utils/DynamicLoader.cpp (100%) rename paddle/{ => legacy}/utils/DynamicLoader.h (100%) rename paddle/{ => legacy}/utils/Error.h (100%) rename paddle/{ => legacy}/utils/Excepts.h (100%) rename paddle/{ => legacy}/utils/Flags.cpp (100%) rename paddle/{ => legacy}/utils/Flags.h (100%) rename paddle/{ => legacy}/utils/GlobalConstants.cpp (100%) rename paddle/{ => legacy}/utils/GlobalConstants.h (100%) rename paddle/{ => legacy}/utils/Locks.h (100%) rename paddle/{ => legacy}/utils/Logging.cpp (100%) rename paddle/{ => legacy}/utils/Logging.h (100%) rename paddle/{ => legacy}/utils/PythonUtil.cpp (100%) rename paddle/{ => legacy}/utils/PythonUtil.h (99%) rename paddle/{ => legacy}/utils/Queue.h (100%) rename paddle/{ => legacy}/utils/Stat.cpp (100%) rename paddle/{ => legacy}/utils/Stat.h (100%) rename paddle/{ => legacy}/utils/StringUtil.cpp (100%) rename paddle/{ => legacy}/utils/StringUtil.h (100%) rename paddle/{ => legacy}/utils/Thread.h (100%) rename paddle/{ => legacy}/utils/ThreadLocal.cpp (100%) rename paddle/{ => legacy}/utils/ThreadLocal.h (100%) rename paddle/{ => legacy}/utils/Util.cpp (100%) rename paddle/{ => legacy}/utils/Util.h (100%) rename paddle/{ => legacy}/utils/Version.cpp (100%) rename paddle/{ => legacy}/utils/Version.h (100%) rename paddle/{ => legacy}/utils/arch/linux/Locks.cpp (97%) rename paddle/{ => legacy}/utils/arch/osx/Excepts.cpp (97%) rename paddle/{ => legacy}/utils/arch/osx/Locks.cpp (97%) rename paddle/{ => legacy}/utils/enable_virtualenv.py (100%) rename paddle/{ => legacy}/utils/tests/CMakeLists.txt (84%) rename paddle/{ => legacy}/utils/tests/test_CustomStackTrace.cpp (94%) rename paddle/{ => legacy}/utils/tests/test_CustomStackTracePrint.cpp (86%) rename paddle/{ => legacy}/utils/tests/test_CustomStackTracePrint.sh (100%) rename paddle/{ => legacy}/utils/tests/test_Error.cpp (96%) rename paddle/{ => legacy}/utils/tests/test_SIMDFlags.cpp (94%) rename paddle/{ => legacy}/utils/tests/test_SpinLock.cpp (93%) rename paddle/{ => legacy}/utils/tests/test_StringUtils.cpp (95%) rename paddle/{ => legacy}/utils/tests/test_Thread.cpp (98%) rename paddle/{ => legacy}/utils/tests/test_ThreadBarrier.cpp (94%) create mode 100644 proto/README.md diff --git a/README.md b/README.md index 63abca069a..eb99ed21d0 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,8 @@ learning to many products at Baidu. Our vision is to enable deep learning for everyone via PaddlePaddle. Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddle/releases) to track the latest feature of PaddlePaddle. +### Lastest PaddlePaddle Version: [Fluid](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid) + ## Features - **Flexibility** diff --git a/go/CMakeLists.txt b/go/CMakeLists.txt index f3a9296c2c..839b75a259 100644 --- a/go/CMakeLists.txt +++ b/go/CMakeLists.txt @@ -20,4 +20,4 @@ add_subdirectory(master/c) add_subdirectory(master) add_subdirectory(pserver) add_subdirectory(pserver/client) -add_subdirectory(utils/networkhelper) +add_subdirectory(legacy/utils/networkhelper) diff --git a/go/cmd/master/master.go b/go/cmd/master/master.go index 537df59c86..6c1e4c7198 100644 --- a/go/cmd/master/master.go +++ b/go/cmd/master/master.go @@ -28,8 +28,8 @@ import ( log "github.com/inconshreveable/log15" "github.com/namsral/flag" + "github.com/PaddlePaddle/Paddle/go/legacy/utils/networkhelper" "github.com/PaddlePaddle/Paddle/go/master" - "github.com/PaddlePaddle/Paddle/go/utils/networkhelper" ) func main() { diff --git a/go/pserver/etcd_client.go b/go/pserver/etcd_client.go index 719013b1bb..80b1abee5e 100644 --- a/go/pserver/etcd_client.go +++ b/go/pserver/etcd_client.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "github.com/PaddlePaddle/Paddle/go/utils/networkhelper" + "github.com/PaddlePaddle/Paddle/go/legacy/utils/networkhelper" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/clientv3/concurrency" log "github.com/inconshreveable/log15" diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index e1f65e505e..6653244507 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -1,7 +1,7 @@ if(NOT WITH_FLUID_ONLY) add_subdirectory(legacy/cuda) add_subdirectory(legacy/function) - add_subdirectory(utils) + add_subdirectory(legacy/utils) add_subdirectory(legacy/math) add_subdirectory(legacy/gserver) add_subdirectory(legacy/parameter) diff --git a/paddle/contrib/inference/paddle_inference_api_tensorrt_subgraph_engine.cc b/paddle/contrib/inference/paddle_inference_api_tensorrt_subgraph_engine.cc index a11396cee9..14554545d9 100644 --- a/paddle/contrib/inference/paddle_inference_api_tensorrt_subgraph_engine.cc +++ b/paddle/contrib/inference/paddle_inference_api_tensorrt_subgraph_engine.cc @@ -15,7 +15,7 @@ #include "paddle/contrib/inference/paddle_inference_api.h" #include "paddle/contrib/inference/paddle_inference_api_impl.h" #include "paddle/fluid/inference/analysis/analyzer.h" -#include "paddle/fluid/inference/utils/singleton.h" +#include "paddle/fluid/inference/legacy/utils/singleton.h" namespace paddle { diff --git a/paddle/fluid/inference/tensorrt/convert/io_converter.h b/paddle/fluid/inference/tensorrt/convert/io_converter.h index 71c48e085d..fc8881f80c 100644 --- a/paddle/fluid/inference/tensorrt/convert/io_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/io_converter.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/inference/utils/singleton.h" +#include "paddle/fluid/inference/legacy/utils/singleton.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index 6697952051..bf4e07fed0 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -19,8 +19,8 @@ limitations under the License. */ #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/inference/legacy/utils/singleton.h" #include "paddle/fluid/inference/tensorrt/engine.h" -#include "paddle/fluid/inference/utils/singleton.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/tensorrt/convert/ut_helper.h b/paddle/fluid/inference/tensorrt/convert/ut_helper.h index 3b1f531adc..0003b16d4e 100644 --- a/paddle/fluid/inference/tensorrt/convert/ut_helper.h +++ b/paddle/fluid/inference/tensorrt/convert/ut_helper.h @@ -25,9 +25,9 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/inference/analysis/helper.h" +#include "paddle/fluid/inference/legacy/utils/singleton.h" #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" #include "paddle/fluid/inference/tensorrt/engine.h" -#include "paddle/fluid/inference/utils/singleton.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index b06a9bbc67..42a596deb1 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -20,8 +20,8 @@ limitations under the License. */ #include #include #include "paddle/fluid/inference/engine.h" +#include "paddle/fluid/inference/legacy/utils/singleton.h" #include "paddle/fluid/inference/tensorrt/helper.h" -#include "paddle/fluid/inference/utils/singleton.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/operators/positive_negative_pair_op.h b/paddle/fluid/operators/positive_negative_pair_op.h index f20f33bbeb..db0a1002f4 100644 --- a/paddle/fluid/operators/positive_negative_pair_op.h +++ b/paddle/fluid/operators/positive_negative_pair_op.h @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/tensorrt_engine_op.cc b/paddle/fluid/operators/tensorrt_engine_op.cc index 647cfc0a0a..b5d0578831 100644 --- a/paddle/fluid/operators/tensorrt_engine_op.cc +++ b/paddle/fluid/operators/tensorrt_engine_op.cc @@ -18,9 +18,9 @@ #include #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/inference/legacy/utils/singleton.h" #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" #include "paddle/fluid/inference/tensorrt/engine.h" -#include "paddle/fluid/inference/utils/singleton.h" #include "paddle/fluid/operators/tensorrt_engine_op.h" namespace paddle { diff --git a/paddle/fluid/platform/float16_test.cu b/paddle/fluid/platform/float16_test.cu index 577fc24ceb..1b9cf9b5d3 100644 --- a/paddle/fluid/platform/float16_test.cu +++ b/paddle/fluid/platform/float16_test.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor_util.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #define ARITHMETIC_KERNEL(op_type, sign) \ __global__ void op_type(const half* in1, const half* in2, half* out) { \ diff --git a/paddle/legacy/api/Paddle.i b/paddle/legacy/api/Paddle.i index e6165fb106..7a1456a5c0 100644 --- a/paddle/legacy/api/Paddle.i +++ b/paddle/legacy/api/Paddle.i @@ -198,5 +198,5 @@ namespace std { %ignore ParameterConfigPrivate; %ignore OptimizationConfigPrivate; %ignore ParameterTraverseCallbackPrivate; -%include "utils/GlobalConstants.h" +%include "legacy/utils/GlobalConstants.h" %include "legacy/api/PaddleAPI.h" diff --git a/paddle/legacy/api/PaddleAPI.h b/paddle/legacy/api/PaddleAPI.h index ba3e815498..475984a3d5 100644 --- a/paddle/legacy/api/PaddleAPI.h +++ b/paddle/legacy/api/PaddleAPI.h @@ -20,8 +20,8 @@ limitations under the License. */ #include #include #include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/GlobalConstants.h" /// Import PaddlePaddle's enumeration into global namespace. using namespace paddle::enumeration_wrapper; // NOLINT diff --git a/paddle/legacy/api/SequenceGenerator.cpp b/paddle/legacy/api/SequenceGenerator.cpp index 96e075df50..2a73228f6d 100644 --- a/paddle/legacy/api/SequenceGenerator.cpp +++ b/paddle/legacy/api/SequenceGenerator.cpp @@ -19,7 +19,7 @@ limitations under the License. */ #include "PaddleAPI.h" #include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" #include "paddle/legacy/parameter/Argument.h" -#include "paddle/utils/Flags.h" +#include "paddle/legacy/utils/Flags.h" // used to represent partial sequence struct Path { diff --git a/paddle/legacy/api/Trainer.cpp b/paddle/legacy/api/Trainer.cpp index 8b39b962ee..e7c607201b 100644 --- a/paddle/legacy/api/Trainer.cpp +++ b/paddle/legacy/api/Trainer.cpp @@ -23,7 +23,7 @@ limitations under the License. */ #include "paddle/legacy/trainer/ParamUtil.h" #include "paddle/legacy/trainer/Trainer.h" #include "paddle/legacy/trainer/TrainerInternal.h" -#include "paddle/utils/Flags.h" +#include "paddle/legacy/utils/Flags.h" using paddle::real; diff --git a/paddle/legacy/api/Util.cpp b/paddle/legacy/api/Util.cpp index d98daadbde..b458c4d90e 100644 --- a/paddle/legacy/api/Util.cpp +++ b/paddle/legacy/api/Util.cpp @@ -15,10 +15,10 @@ limitations under the License. */ #include "PaddleAPI.h" #include "paddle/legacy/parameter/Parameter.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Util.h" #include #include diff --git a/paddle/legacy/capi/Main.cpp b/paddle/legacy/capi/Main.cpp index fd9275058a..17d8f00a88 100644 --- a/paddle/legacy/capi/Main.cpp +++ b/paddle/legacy/capi/Main.cpp @@ -19,8 +19,8 @@ limitations under the License. */ #include "capi_private.h" #include "main.h" #include "paddle/legacy/trainer/TrainerConfigHelper.h" -#include "paddle/utils/Excepts.h" -#include "paddle/utils/PythonUtil.h" +#include "paddle/legacy/utils/Excepts.h" +#include "paddle/legacy/utils/PythonUtil.h" static void initPaddle(int argc, char** argv) { paddle::initMain(argc, argv); diff --git a/paddle/legacy/capi/tests/test_Arguments.cpp b/paddle/legacy/capi/tests/test_Arguments.cpp index bb08adf716..6fb379719d 100644 --- a/paddle/legacy/capi/tests/test_Arguments.cpp +++ b/paddle/legacy/capi/tests/test_Arguments.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include #include "capi.h" #include "gtest/gtest.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" static std::vector randomBuffer(size_t bufSize) { auto& eng = paddle::ThreadLocalRandomEngine::get(); diff --git a/paddle/legacy/capi/tests/test_GradientMachine.cpp b/paddle/legacy/capi/tests/test_GradientMachine.cpp index b86d2f2049..5d1b7cb6ca 100644 --- a/paddle/legacy/capi/tests/test_GradientMachine.cpp +++ b/paddle/legacy/capi/tests/test_GradientMachine.cpp @@ -19,7 +19,7 @@ limitations under the License. */ #include #include #include "capi.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" static std::vector randomBuffer(size_t bufSize) { auto& eng = paddle::ThreadLocalRandomEngine::get(); diff --git a/paddle/legacy/cuda/include/hl_base.h b/paddle/legacy/cuda/include/hl_base.h index 8451d2546d..bfe812a438 100644 --- a/paddle/legacy/cuda/include/hl_base.h +++ b/paddle/legacy/cuda/include/hl_base.h @@ -208,7 +208,7 @@ typedef struct { #include #include "paddle/legacy/cuda/include/hl_cuda.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" extern __thread bool g_sync_flag; extern __thread cudaStream_t default_stream; diff --git a/paddle/legacy/cuda/include/hl_gpu_gru.cuh b/paddle/legacy/cuda/include/hl_gpu_gru.cuh index 9fcad2c3bc..8d299572c7 100644 --- a/paddle/legacy/cuda/include/hl_gpu_gru.cuh +++ b/paddle/legacy/cuda/include/hl_gpu_gru.cuh @@ -18,7 +18,7 @@ limitations under the License. */ #ifdef __NVCC__ -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" /* * threads(framePerBlock, batchPerBlock) diff --git a/paddle/legacy/cuda/include/hl_gpu_lstm.cuh b/paddle/legacy/cuda/include/hl_gpu_lstm.cuh index 92517a44d2..aae011b838 100644 --- a/paddle/legacy/cuda/include/hl_gpu_lstm.cuh +++ b/paddle/legacy/cuda/include/hl_gpu_lstm.cuh @@ -18,7 +18,7 @@ limitations under the License. */ #ifdef __NVCC__ -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "hl_device_functions.cuh" /* diff --git a/paddle/legacy/cuda/include/hl_gpu_matrix_kernel.cuh b/paddle/legacy/cuda/include/hl_gpu_matrix_kernel.cuh index 0db023ce37..6177d23657 100644 --- a/paddle/legacy/cuda/include/hl_gpu_matrix_kernel.cuh +++ b/paddle/legacy/cuda/include/hl_gpu_matrix_kernel.cuh @@ -18,7 +18,7 @@ limitations under the License. */ #define HL_GPU_MATRIX_KERNEL_CUH_ #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "hl_base.h" #ifdef __NVCC__ diff --git a/paddle/legacy/cuda/src/hl_cuda_aggregate.cu b/paddle/legacy/cuda/src/hl_cuda_aggregate.cu index d30c264127..9831c5ecc3 100644 --- a/paddle/legacy/cuda/src/hl_cuda_aggregate.cu +++ b/paddle/legacy/cuda/src/hl_cuda_aggregate.cu @@ -18,7 +18,7 @@ limitations under the License. */ #include "hl_cuda.ph" #include "hl_matrix_base.cuh" #include "hl_thread.ph" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" /** * @brief matrix row operator. diff --git a/paddle/legacy/cuda/src/hl_cuda_cublas.cc b/paddle/legacy/cuda/src/hl_cuda_cublas.cc index 975df42878..283b8b6e9c 100644 --- a/paddle/legacy/cuda/src/hl_cuda_cublas.cc +++ b/paddle/legacy/cuda/src/hl_cuda_cublas.cc @@ -16,8 +16,8 @@ limitations under the License. */ #include #include "hl_cuda.h" #include "hl_thread.ph" -#include "paddle/utils/DynamicLoader.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/DynamicLoader.h" +#include "paddle/legacy/utils/Logging.h" namespace dynload { diff --git a/paddle/legacy/cuda/src/hl_cuda_cudnn.cc b/paddle/legacy/cuda/src/hl_cuda_cudnn.cc index dfa935dcff..b0ac5aaac2 100644 --- a/paddle/legacy/cuda/src/hl_cuda_cudnn.cc +++ b/paddle/legacy/cuda/src/hl_cuda_cudnn.cc @@ -17,8 +17,8 @@ limitations under the License. */ #include #include "hl_cuda_cudnn.ph" #include "hl_thread.ph" -#include "paddle/utils/DynamicLoader.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/DynamicLoader.h" +#include "paddle/legacy/utils/Logging.h" DEFINE_int32(cudnn_conv_workspace_limit_in_mb, 4096, diff --git a/paddle/legacy/cuda/src/hl_cuda_device.cc b/paddle/legacy/cuda/src/hl_cuda_device.cc index 3025aa4852..501e3b0f3b 100644 --- a/paddle/legacy/cuda/src/hl_cuda_device.cc +++ b/paddle/legacy/cuda/src/hl_cuda_device.cc @@ -23,8 +23,8 @@ limitations under the License. */ #include #include "hl_cuda.ph" #include "hl_thread.ph" -#include "paddle/utils/Logging.h" -#include "paddle/utils/DynamicLoader.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/DynamicLoader.h" // clang-format on namespace dynload { diff --git a/paddle/legacy/cuda/src/hl_cuda_lstm.cu b/paddle/legacy/cuda/src/hl_cuda_lstm.cu index b8c4e433a1..9ac564fd25 100644 --- a/paddle/legacy/cuda/src/hl_cuda_lstm.cu +++ b/paddle/legacy/cuda/src/hl_cuda_lstm.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "hl_base.h" #include "hl_cuda_cublas.h" #include "hl_device_functions.cuh" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" typedef hppl::Active::forward t_forward; typedef hppl::Active::backward t_backward; diff --git a/paddle/legacy/cuda/src/hl_cuda_matrix.cu b/paddle/legacy/cuda/src/hl_cuda_matrix.cu index 3e17c8090c..6fe460026b 100644 --- a/paddle/legacy/cuda/src/hl_cuda_matrix.cu +++ b/paddle/legacy/cuda/src/hl_cuda_matrix.cu @@ -20,7 +20,7 @@ limitations under the License. */ #include "hl_matrix_ops.cuh" #include "hl_sequence.h" #include "hl_sparse.ph" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" DEFINE_MATRIX_UNARY_OP(Zero, a = 0); DEFINE_MATRIX_TERNARY_PARAMETER_OP(_add, TWO_PARAMETER, c = p1 * a + p2 * b); diff --git a/paddle/legacy/cuda/src/hl_cuda_sequence.cu b/paddle/legacy/cuda/src/hl_cuda_sequence.cu index a3a5f038de..1d772b5ce2 100644 --- a/paddle/legacy/cuda/src/hl_cuda_sequence.cu +++ b/paddle/legacy/cuda/src/hl_cuda_sequence.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "hl_base.h" #include "hl_device_functions.cuh" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" __global__ void KeMaxSequenceForward(real* input, const int* sequence, diff --git a/paddle/legacy/cuda/src/hl_cuda_sparse.cu b/paddle/legacy/cuda/src/hl_cuda_sparse.cu index 432041fed5..8065a6f9f6 100644 --- a/paddle/legacy/cuda/src/hl_cuda_sparse.cu +++ b/paddle/legacy/cuda/src/hl_cuda_sparse.cu @@ -18,7 +18,7 @@ limitations under the License. */ #include "hl_matrix_ops.cuh" #include "hl_sparse.h" #include "hl_sparse.ph" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" DEFINE_MATRIX_UNARY_PARAMETER_OP(mul_scalar, ONE_PARAMETER, a = a * p); DEFINE_MATRIX_UNARY_OP(Zero, a = 0); diff --git a/paddle/legacy/cuda/src/hl_table_apply.cu b/paddle/legacy/cuda/src/hl_table_apply.cu index efa4bef02b..7411ae35d3 100644 --- a/paddle/legacy/cuda/src/hl_table_apply.cu +++ b/paddle/legacy/cuda/src/hl_table_apply.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include "hl_base.h" #include "hl_cuda.h" #include "hl_device_functions.cuh" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" template __global__ void KeMatrixAddRows(real* output, diff --git a/paddle/legacy/cuda/src/hl_top_k.cu b/paddle/legacy/cuda/src/hl_top_k.cu index 14b9a7f50f..041ac419f5 100644 --- a/paddle/legacy/cuda/src/hl_top_k.cu +++ b/paddle/legacy/cuda/src/hl_top_k.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include "paddle/legacy/cuda/include/hl_base.h" #include "paddle/legacy/cuda/include/hl_sparse.ph" #include "paddle/legacy/cuda/include/hl_top_k.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" // using namespace hppl; diff --git a/paddle/legacy/cuda/src/hl_warpctc_wrap.cc b/paddle/legacy/cuda/src/hl_warpctc_wrap.cc index 5111bceaff..31a8652f1f 100644 --- a/paddle/legacy/cuda/src/hl_warpctc_wrap.cc +++ b/paddle/legacy/cuda/src/hl_warpctc_wrap.cc @@ -14,8 +14,8 @@ limitations under the License. */ #include "hl_warpctc_wrap.h" #include -#include "paddle/utils/DynamicLoader.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/DynamicLoader.h" +#include "paddle/legacy/utils/Logging.h" namespace dynload { diff --git a/paddle/legacy/function/Function.h b/paddle/legacy/function/Function.h index cc6f999a0e..bc5ef7e6f2 100644 --- a/paddle/legacy/function/Function.h +++ b/paddle/legacy/function/Function.h @@ -18,9 +18,9 @@ limitations under the License. */ #include #include "BufferArg.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Any.h" -#include "paddle/utils/ClassRegistrar.h" -#include "paddle/utils/Error.h" +#include "paddle/legacy/utils/Any.h" +#include "paddle/legacy/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/Error.h" namespace paddle { diff --git a/paddle/legacy/function/MulOp.cpp b/paddle/legacy/function/MulOp.cpp index 1401031752..750978fc90 100644 --- a/paddle/legacy/function/MulOp.cpp +++ b/paddle/legacy/function/MulOp.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "MulOp.h" #include "GemmFunctor.h" #include "paddle/legacy/math/SIMDFunctions.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace { inline void vecAddTo(real* a, const real* b, real scaleB, size_t len) { diff --git a/paddle/legacy/gserver/activations/ActivationFunction.cpp b/paddle/legacy/gserver/activations/ActivationFunction.cpp index 69f34db5ac..ae07c7e6d7 100644 --- a/paddle/legacy/gserver/activations/ActivationFunction.cpp +++ b/paddle/legacy/gserver/activations/ActivationFunction.cpp @@ -21,8 +21,8 @@ limitations under the License. */ #include #include #include "paddle/legacy/parameter/Argument.h" -#include "paddle/utils/ClassRegistrar.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/Logging.h" #ifdef PADDLE_WITH_MKLDNN #include "MKLDNNActivation.h" diff --git a/paddle/legacy/gserver/activations/ActivationFunction.h b/paddle/legacy/gserver/activations/ActivationFunction.h index 8e2e144769..8bc5b0f529 100644 --- a/paddle/legacy/gserver/activations/ActivationFunction.h +++ b/paddle/legacy/gserver/activations/ActivationFunction.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include -#include "paddle/utils/Error.h" +#include "paddle/legacy/utils/Error.h" namespace paddle { diff --git a/paddle/legacy/gserver/activations/MKLDNNActivation.cpp b/paddle/legacy/gserver/activations/MKLDNNActivation.cpp index 672444c656..2eed7af70a 100644 --- a/paddle/legacy/gserver/activations/MKLDNNActivation.cpp +++ b/paddle/legacy/gserver/activations/MKLDNNActivation.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "MKLDNNActivation.h" #include "mkldnn.hpp" -#include "paddle/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/ClassRegistrar.h" namespace paddle { diff --git a/paddle/legacy/gserver/dataproviders/DataProvider.cpp b/paddle/legacy/gserver/dataproviders/DataProvider.cpp index 580cf821c6..b67af8a326 100644 --- a/paddle/legacy/gserver/dataproviders/DataProvider.cpp +++ b/paddle/legacy/gserver/dataproviders/DataProvider.cpp @@ -16,10 +16,10 @@ limitations under the License. */ #include #include -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/StringUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/StringUtil.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/gserver/dataproviders/DataProvider.h b/paddle/legacy/gserver/dataproviders/DataProvider.h index b6f74afed0..c2e1c5fdd6 100644 --- a/paddle/legacy/gserver/dataproviders/DataProvider.h +++ b/paddle/legacy/gserver/dataproviders/DataProvider.h @@ -29,13 +29,13 @@ limitations under the License. */ #include "paddle/legacy/math/SparseMatrix.h" #include "paddle/legacy/math/Vector.h" #include "paddle/legacy/parameter/Argument.h" -#include "paddle/utils/ClassRegistrar.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Queue.h" -#include "paddle/utils/ThreadLocal.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Queue.h" +#include "paddle/legacy/utils/ThreadLocal.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { /** diff --git a/paddle/legacy/gserver/dataproviders/MultiDataProvider.cpp b/paddle/legacy/gserver/dataproviders/MultiDataProvider.cpp index f71947ef39..e5fc6d8a88 100644 --- a/paddle/legacy/gserver/dataproviders/MultiDataProvider.cpp +++ b/paddle/legacy/gserver/dataproviders/MultiDataProvider.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "MultiDataProvider.h" #include -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/gserver/dataproviders/PyDataProvider.cpp b/paddle/legacy/gserver/dataproviders/PyDataProvider.cpp index dadf1b4cf2..0827bd39d4 100644 --- a/paddle/legacy/gserver/dataproviders/PyDataProvider.cpp +++ b/paddle/legacy/gserver/dataproviders/PyDataProvider.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "PyDataProvider.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/gserver/dataproviders/PyDataProvider.h b/paddle/legacy/gserver/dataproviders/PyDataProvider.h index da50dd4e2e..4b8bea04a1 100644 --- a/paddle/legacy/gserver/dataproviders/PyDataProvider.h +++ b/paddle/legacy/gserver/dataproviders/PyDataProvider.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include +#include #include "DataFormat.pb.h" #include "DataProvider.h" diff --git a/paddle/legacy/gserver/dataproviders/PyDataProvider2.cpp b/paddle/legacy/gserver/dataproviders/PyDataProvider2.cpp index 54ee091e8f..8e931e4061 100644 --- a/paddle/legacy/gserver/dataproviders/PyDataProvider2.cpp +++ b/paddle/legacy/gserver/dataproviders/PyDataProvider2.cpp @@ -25,9 +25,9 @@ limitations under the License. */ #include "DataProvider.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/evaluators/CTCErrorEvaluator.cpp b/paddle/legacy/gserver/evaluators/CTCErrorEvaluator.cpp index 04335dc7cd..c145adda5e 100644 --- a/paddle/legacy/gserver/evaluators/CTCErrorEvaluator.cpp +++ b/paddle/legacy/gserver/evaluators/CTCErrorEvaluator.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "Evaluator.h" #include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/utils/StringUtil.h" namespace paddle { diff --git a/paddle/legacy/gserver/evaluators/ChunkEvaluator.cpp b/paddle/legacy/gserver/evaluators/ChunkEvaluator.cpp index ea5c609a63..0ff3f2fa8c 100644 --- a/paddle/legacy/gserver/evaluators/ChunkEvaluator.cpp +++ b/paddle/legacy/gserver/evaluators/ChunkEvaluator.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/legacy/math/Vector.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/utils/StringUtil.h" #include "Evaluator.h" diff --git a/paddle/legacy/gserver/evaluators/Evaluator.cpp b/paddle/legacy/gserver/evaluators/Evaluator.cpp index 436c33e43b..a956f40d02 100644 --- a/paddle/legacy/gserver/evaluators/Evaluator.cpp +++ b/paddle/legacy/gserver/evaluators/Evaluator.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "paddle/legacy/gserver/evaluators/Evaluator.h" #include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/StringUtil.h" DECLARE_int32(trainer_id); diff --git a/paddle/legacy/gserver/evaluators/Evaluator.h b/paddle/legacy/gserver/evaluators/Evaluator.h index 90989bb0b6..b3462819b1 100644 --- a/paddle/legacy/gserver/evaluators/Evaluator.h +++ b/paddle/legacy/gserver/evaluators/Evaluator.h @@ -18,8 +18,8 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/legacy/parameter/Argument.h" #include "paddle/legacy/pserver/ParameterClient2.h" -#include "paddle/utils/ClassRegistrar.h" -#include "paddle/utils/Error.h" +#include "paddle/legacy/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/Error.h" namespace paddle { diff --git a/paddle/legacy/gserver/gradientmachines/GradientMachine.cpp b/paddle/legacy/gserver/gradientmachines/GradientMachine.cpp index 654024e8a4..1c4034d8bb 100644 --- a/paddle/legacy/gserver/gradientmachines/GradientMachine.cpp +++ b/paddle/legacy/gserver/gradientmachines/GradientMachine.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "GradientMachine.h" #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "NeuralNetwork.h" #include "hl_gpu.h" diff --git a/paddle/legacy/gserver/gradientmachines/GradientMachine.h b/paddle/legacy/gserver/gradientmachines/GradientMachine.h index 48f5141ce1..d4f754a9f4 100644 --- a/paddle/legacy/gserver/gradientmachines/GradientMachine.h +++ b/paddle/legacy/gserver/gradientmachines/GradientMachine.h @@ -24,7 +24,7 @@ limitations under the License. */ #include "paddle/legacy/math/Matrix.h" #include "paddle/legacy/parameter/Parameter.h" #include "paddle/legacy/parameter/ParameterUpdaterBase.h" -#include "paddle/utils/Thread.h" +#include "paddle/legacy/utils/Thread.h" #ifndef PADDLE_MOBILE_INFERENCE #include "paddle/legacy/gserver/evaluators/Evaluator.h" diff --git a/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp index b8d4d28f0f..637686e443 100644 --- a/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp @@ -14,9 +14,9 @@ limitations under the License. */ #include "MultiGradientMachine.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" #include "NeuralNetwork.h" #include "ParallelNeuralNetwork.h" diff --git a/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.h b/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.h index eff7d5284c..674acd4124 100644 --- a/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.h +++ b/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.h @@ -19,8 +19,8 @@ limitations under the License. */ #include "GradientMachine.h" #include "hl_gpu.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/Queue.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Queue.h" namespace paddle { diff --git a/paddle/legacy/gserver/gradientmachines/MultiNetwork.cpp b/paddle/legacy/gserver/gradientmachines/MultiNetwork.cpp index 5f3d09dda2..1245c44103 100644 --- a/paddle/legacy/gserver/gradientmachines/MultiNetwork.cpp +++ b/paddle/legacy/gserver/gradientmachines/MultiNetwork.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "MultiNetwork.h" diff --git a/paddle/legacy/gserver/gradientmachines/MultiNetwork.h b/paddle/legacy/gserver/gradientmachines/MultiNetwork.h index 495d559201..afe15cb020 100644 --- a/paddle/legacy/gserver/gradientmachines/MultiNetwork.h +++ b/paddle/legacy/gserver/gradientmachines/MultiNetwork.h @@ -17,7 +17,7 @@ limitations under the License. */ #include "GradientMachine.h" #include "NeuralNetwork.h" -#include "paddle/utils/Locks.h" +#include "paddle/legacy/utils/Locks.h" namespace paddle { diff --git a/paddle/legacy/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/legacy/gserver/gradientmachines/NeuralNetwork.cpp index 339550c458..0f8048152f 100644 --- a/paddle/legacy/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/legacy/gserver/gradientmachines/NeuralNetwork.cpp @@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include "NeuralNetwork.h" #include "hl_gpu.h" -#include "paddle/utils/CustomStackTrace.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/CustomStackTrace.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" #ifdef PADDLE_WITH_MKLDNN #include "paddle/legacy/gserver/layers/MKLDNNLayer.h" diff --git a/paddle/legacy/gserver/gradientmachines/NeuralNetwork.h b/paddle/legacy/gserver/gradientmachines/NeuralNetwork.h index 5a0909b99b..566157c899 100644 --- a/paddle/legacy/gserver/gradientmachines/NeuralNetwork.h +++ b/paddle/legacy/gserver/gradientmachines/NeuralNetwork.h @@ -25,7 +25,7 @@ limitations under the License. */ #include "paddle/legacy/gserver/layers/DataLayer.h" #include "paddle/legacy/gserver/layers/Layer.h" #include "paddle/legacy/parameter/Parameter.h" -#include "paddle/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/ClassRegistrar.h" namespace paddle { /* diff --git a/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp b/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp index 85cfc59fbe..450514ca88 100644 --- a/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp +++ b/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "ParallelNeuralNetwork.h" diff --git a/paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.cpp b/paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.cpp index e749cf61f3..e49f042404 100644 --- a/paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.cpp +++ b/paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.cpp @@ -20,9 +20,9 @@ limitations under the License. */ #include #include "NeuralNetwork.h" #include "paddle/legacy/gserver/layers/AgentLayer.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" DEFINE_string(diy_beam_search_prob_so, "", "the diy beam search cost so"); diff --git a/paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.h b/paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.h index 7e943cebd3..0a13d4f6f8 100644 --- a/paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.h +++ b/paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.h @@ -18,7 +18,7 @@ limitations under the License. */ #include "GradientMachine.h" #include "NeuralNetwork.h" -#include "paddle/utils/Locks.h" +#include "paddle/legacy/utils/Locks.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/AddtoLayer.cpp b/paddle/legacy/gserver/layers/AddtoLayer.cpp index 75e17f52df..39c5603d93 100644 --- a/paddle/legacy/gserver/layers/AddtoLayer.cpp +++ b/paddle/legacy/gserver/layers/AddtoLayer.cpp @@ -14,9 +14,9 @@ limitations under the License. */ #include "AddtoLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/AddtoLayer.h b/paddle/legacy/gserver/layers/AddtoLayer.h index 1f948de475..ad3cefe1a4 100644 --- a/paddle/legacy/gserver/layers/AddtoLayer.h +++ b/paddle/legacy/gserver/layers/AddtoLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/AgentLayer.cpp b/paddle/legacy/gserver/layers/AgentLayer.cpp index e2f73f88f5..bae89b2fa3 100644 --- a/paddle/legacy/gserver/layers/AgentLayer.cpp +++ b/paddle/legacy/gserver/layers/AgentLayer.cpp @@ -14,9 +14,9 @@ limitations under the License. */ #include "AgentLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/AgentLayer.h b/paddle/legacy/gserver/layers/AgentLayer.h index f506db2f2d..a05eac5e70 100644 --- a/paddle/legacy/gserver/layers/AgentLayer.h +++ b/paddle/legacy/gserver/layers/AgentLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/AverageLayer.cpp b/paddle/legacy/gserver/layers/AverageLayer.cpp index b3787b1448..0539da7937 100644 --- a/paddle/legacy/gserver/layers/AverageLayer.cpp +++ b/paddle/legacy/gserver/layers/AverageLayer.cpp @@ -14,9 +14,9 @@ limitations under the License. */ #include "AverageLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/BatchNormBaseLayer.cpp b/paddle/legacy/gserver/layers/BatchNormBaseLayer.cpp index a3516f9423..4dcbd8dc27 100644 --- a/paddle/legacy/gserver/layers/BatchNormBaseLayer.cpp +++ b/paddle/legacy/gserver/layers/BatchNormBaseLayer.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "BatchNormBaseLayer.h" #include "BatchNormalizationLayer.h" #include "Layer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" #ifdef PADDLE_WITH_CUDA #include "CudnnBatchNormLayer.h" #endif diff --git a/paddle/legacy/gserver/layers/BatchNormBaseLayer.h b/paddle/legacy/gserver/layers/BatchNormBaseLayer.h index 5a446c0843..8dc1d78837 100644 --- a/paddle/legacy/gserver/layers/BatchNormBaseLayer.h +++ b/paddle/legacy/gserver/layers/BatchNormBaseLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/BatchNormalizationLayer.cpp b/paddle/legacy/gserver/layers/BatchNormalizationLayer.cpp index 59831dd904..0297bd44c7 100644 --- a/paddle/legacy/gserver/layers/BatchNormalizationLayer.cpp +++ b/paddle/legacy/gserver/layers/BatchNormalizationLayer.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" #ifdef PADDLE_WITH_CUDA #include "hl_batch_transpose.h" #endif diff --git a/paddle/legacy/gserver/layers/BilinearInterpLayer.cpp b/paddle/legacy/gserver/layers/BilinearInterpLayer.cpp index 9775914596..a091f51bc2 100644 --- a/paddle/legacy/gserver/layers/BilinearInterpLayer.cpp +++ b/paddle/legacy/gserver/layers/BilinearInterpLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "BilinearInterpLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/BlockExpandLayer.cpp b/paddle/legacy/gserver/layers/BlockExpandLayer.cpp index 793d24e884..24b5af67d4 100644 --- a/paddle/legacy/gserver/layers/BlockExpandLayer.cpp +++ b/paddle/legacy/gserver/layers/BlockExpandLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "BlockExpandLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/ConcatenateLayer.cpp b/paddle/legacy/gserver/layers/ConcatenateLayer.cpp index e6de329ff3..ce3f2ca950 100644 --- a/paddle/legacy/gserver/layers/ConcatenateLayer.cpp +++ b/paddle/legacy/gserver/layers/ConcatenateLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "Layer.h" #include "Projection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/ContextProjection.cpp b/paddle/legacy/gserver/layers/ContextProjection.cpp index 10c3cef0da..8bcf32663e 100644 --- a/paddle/legacy/gserver/layers/ContextProjection.cpp +++ b/paddle/legacy/gserver/layers/ContextProjection.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ContextProjection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/Conv3DLayer.cpp b/paddle/legacy/gserver/layers/Conv3DLayer.cpp index b38de86b15..d072a74234 100644 --- a/paddle/legacy/gserver/layers/Conv3DLayer.cpp +++ b/paddle/legacy/gserver/layers/Conv3DLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Conv3DLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/ConvBaseLayer.cpp b/paddle/legacy/gserver/layers/ConvBaseLayer.cpp index d8997527fb..76120915e4 100644 --- a/paddle/legacy/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/legacy/gserver/layers/ConvBaseLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "ConvBaseLayer.h" #include "paddle/legacy/math/MathUtils.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { bool ConvBaseLayer::init(const LayerMap& layerMap, diff --git a/paddle/legacy/gserver/layers/ConvBaseProjection.cpp b/paddle/legacy/gserver/layers/ConvBaseProjection.cpp index 39f433b78f..ff5d3412de 100644 --- a/paddle/legacy/gserver/layers/ConvBaseProjection.cpp +++ b/paddle/legacy/gserver/layers/ConvBaseProjection.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ConvBaseProjection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/ConvProjection.cpp b/paddle/legacy/gserver/layers/ConvProjection.cpp index f382e6cab1..b40cdac258 100644 --- a/paddle/legacy/gserver/layers/ConvProjection.cpp +++ b/paddle/legacy/gserver/layers/ConvProjection.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ConvProjection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/ConvShiftLayer.cpp b/paddle/legacy/gserver/layers/ConvShiftLayer.cpp index dda1a91e45..b7ecbe556c 100644 --- a/paddle/legacy/gserver/layers/ConvShiftLayer.cpp +++ b/paddle/legacy/gserver/layers/ConvShiftLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/ConvTransProjection.cpp b/paddle/legacy/gserver/layers/ConvTransProjection.cpp index 242ce34a60..00e34c8f2d 100644 --- a/paddle/legacy/gserver/layers/ConvTransProjection.cpp +++ b/paddle/legacy/gserver/layers/ConvTransProjection.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ConvTransProjection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/ConvexCombinationLayer.cpp b/paddle/legacy/gserver/layers/ConvexCombinationLayer.cpp index 29a71fc1d9..c38ab251f1 100644 --- a/paddle/legacy/gserver/layers/ConvexCombinationLayer.cpp +++ b/paddle/legacy/gserver/layers/ConvexCombinationLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/CosSimLayer.cpp b/paddle/legacy/gserver/layers/CosSimLayer.cpp index 4e44a5e8df..ab8d7cc1f6 100644 --- a/paddle/legacy/gserver/layers/CosSimLayer.cpp +++ b/paddle/legacy/gserver/layers/CosSimLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "CosSimLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/CosSimLayer.h b/paddle/legacy/gserver/layers/CosSimLayer.h index 2e53de414d..b08e2c6a35 100644 --- a/paddle/legacy/gserver/layers/CosSimLayer.h +++ b/paddle/legacy/gserver/layers/CosSimLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { /** diff --git a/paddle/legacy/gserver/layers/CosSimVecMatLayer.cpp b/paddle/legacy/gserver/layers/CosSimVecMatLayer.cpp index da3ddf11dc..03de0be815 100644 --- a/paddle/legacy/gserver/layers/CosSimVecMatLayer.cpp +++ b/paddle/legacy/gserver/layers/CosSimVecMatLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { /** diff --git a/paddle/legacy/gserver/layers/CostLayer.cpp b/paddle/legacy/gserver/layers/CostLayer.cpp index 2c0762be25..18b5b77bde 100644 --- a/paddle/legacy/gserver/layers/CostLayer.cpp +++ b/paddle/legacy/gserver/layers/CostLayer.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include #include #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "paddle/legacy/math/SparseMatrix.h" diff --git a/paddle/legacy/gserver/layers/CropLayer.cpp b/paddle/legacy/gserver/layers/CropLayer.cpp index bc97ca2f9e..d891375ecc 100644 --- a/paddle/legacy/gserver/layers/CropLayer.cpp +++ b/paddle/legacy/gserver/layers/CropLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "CropLayer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { REGISTER_LAYER(crop, CropLayer); diff --git a/paddle/legacy/gserver/layers/CudnnBatchNormLayer.cpp b/paddle/legacy/gserver/layers/CudnnBatchNormLayer.cpp index 3f4e17c018..051155e0d2 100644 --- a/paddle/legacy/gserver/layers/CudnnBatchNormLayer.cpp +++ b/paddle/legacy/gserver/layers/CudnnBatchNormLayer.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "CudnnBatchNormLayer.h" #include "Layer.h" #include "paddle/legacy/cuda/include/hl_batch_norm.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/CudnnBatchNormLayer.h b/paddle/legacy/gserver/layers/CudnnBatchNormLayer.h index 1bb4eff8d2..3b33b983b3 100644 --- a/paddle/legacy/gserver/layers/CudnnBatchNormLayer.h +++ b/paddle/legacy/gserver/layers/CudnnBatchNormLayer.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "BatchNormBaseLayer.h" #include "Layer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/CudnnConvBaseLayer.cpp b/paddle/legacy/gserver/layers/CudnnConvBaseLayer.cpp index 6d0a40a607..9353cca9c8 100644 --- a/paddle/legacy/gserver/layers/CudnnConvBaseLayer.cpp +++ b/paddle/legacy/gserver/layers/CudnnConvBaseLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "CudnnConvBaseLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { REGISTER_LAYER(cudnn_conv, CudnnConvBaseLayer); diff --git a/paddle/legacy/gserver/layers/CudnnPoolLayer.cpp b/paddle/legacy/gserver/layers/CudnnPoolLayer.cpp index 9739ed9da4..c790dfd71e 100644 --- a/paddle/legacy/gserver/layers/CudnnPoolLayer.cpp +++ b/paddle/legacy/gserver/layers/CudnnPoolLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "CudnnPoolLayer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/DataNormLayer.cpp b/paddle/legacy/gserver/layers/DataNormLayer.cpp index 86da4d6f95..6820dfa4d4 100644 --- a/paddle/legacy/gserver/layers/DataNormLayer.cpp +++ b/paddle/legacy/gserver/layers/DataNormLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "DataNormLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/DataNormLayer.h b/paddle/legacy/gserver/layers/DataNormLayer.h index 556d7f4d66..7bb8e92824 100644 --- a/paddle/legacy/gserver/layers/DataNormLayer.h +++ b/paddle/legacy/gserver/layers/DataNormLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/DeConv3DLayer.cpp b/paddle/legacy/gserver/layers/DeConv3DLayer.cpp index db6d6e073c..2cd635564c 100644 --- a/paddle/legacy/gserver/layers/DeConv3DLayer.cpp +++ b/paddle/legacy/gserver/layers/DeConv3DLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "DeConv3DLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/DotProdLayer.cpp b/paddle/legacy/gserver/layers/DotProdLayer.cpp index 445361b101..06060d93f7 100644 --- a/paddle/legacy/gserver/layers/DotProdLayer.cpp +++ b/paddle/legacy/gserver/layers/DotProdLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/EosIdCheckLayer.cpp b/paddle/legacy/gserver/layers/EosIdCheckLayer.cpp index 04400f2836..38671126c6 100644 --- a/paddle/legacy/gserver/layers/EosIdCheckLayer.cpp +++ b/paddle/legacy/gserver/layers/EosIdCheckLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { /** diff --git a/paddle/legacy/gserver/layers/ExpandConvLayer.cpp b/paddle/legacy/gserver/layers/ExpandConvLayer.cpp index 3a84786582..8a53db3806 100644 --- a/paddle/legacy/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/legacy/gserver/layers/ExpandConvLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ExpandConvLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" DEFINE_bool(use_nnpack, false, diff --git a/paddle/legacy/gserver/layers/ExpandLayer.cpp b/paddle/legacy/gserver/layers/ExpandLayer.cpp index 6b57767540..074fbab8ef 100644 --- a/paddle/legacy/gserver/layers/ExpandLayer.cpp +++ b/paddle/legacy/gserver/layers/ExpandLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ExpandLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/FactorizationMachineLayer.cpp b/paddle/legacy/gserver/layers/FactorizationMachineLayer.cpp index ddd202e1c6..6cf269fa3f 100644 --- a/paddle/legacy/gserver/layers/FactorizationMachineLayer.cpp +++ b/paddle/legacy/gserver/layers/FactorizationMachineLayer.cpp @@ -16,8 +16,8 @@ limitations under the License. */ #include #include #include "paddle/legacy/math/SparseMatrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/FactorizationMachineLayer.h b/paddle/legacy/gserver/layers/FactorizationMachineLayer.h index 1070ebd097..fc015ed727 100644 --- a/paddle/legacy/gserver/layers/FactorizationMachineLayer.h +++ b/paddle/legacy/gserver/layers/FactorizationMachineLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { /** diff --git a/paddle/legacy/gserver/layers/FeatureMapExpandLayer.cpp b/paddle/legacy/gserver/layers/FeatureMapExpandLayer.cpp index 417756a286..a3fe1433e4 100644 --- a/paddle/legacy/gserver/layers/FeatureMapExpandLayer.cpp +++ b/paddle/legacy/gserver/layers/FeatureMapExpandLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/FullMatrixProjection.h b/paddle/legacy/gserver/layers/FullMatrixProjection.h index a27aa4a123..c33d02a3ae 100644 --- a/paddle/legacy/gserver/layers/FullMatrixProjection.h +++ b/paddle/legacy/gserver/layers/FullMatrixProjection.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" #include "Projection.h" diff --git a/paddle/legacy/gserver/layers/FullyConnectedLayer.cpp b/paddle/legacy/gserver/layers/FullyConnectedLayer.cpp index 0ffb4876f8..07f4dfbe39 100644 --- a/paddle/legacy/gserver/layers/FullyConnectedLayer.cpp +++ b/paddle/legacy/gserver/layers/FullyConnectedLayer.cpp @@ -16,8 +16,8 @@ limitations under the License. */ #include #include #include "paddle/legacy/math/SparseMatrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/FullyConnectedLayer.h b/paddle/legacy/gserver/layers/FullyConnectedLayer.h index a8a1c54e55..7e29cac043 100644 --- a/paddle/legacy/gserver/layers/FullyConnectedLayer.h +++ b/paddle/legacy/gserver/layers/FullyConnectedLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { /** diff --git a/paddle/legacy/gserver/layers/GatedRecurrentLayer.cpp b/paddle/legacy/gserver/layers/GatedRecurrentLayer.cpp index 9d38849fdf..bdcd445cb4 100644 --- a/paddle/legacy/gserver/layers/GatedRecurrentLayer.cpp +++ b/paddle/legacy/gserver/layers/GatedRecurrentLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "GatedRecurrentLayer.h" #include "Layer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/GruCompute.cpp b/paddle/legacy/gserver/layers/GruCompute.cpp index d50c959e43..adad6285b7 100644 --- a/paddle/legacy/gserver/layers/GruCompute.cpp +++ b/paddle/legacy/gserver/layers/GruCompute.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "GruCompute.h" #include "hl_recurrent_apply.cuh" #include "paddle/legacy/function/GruFunctor.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/GruCompute.h b/paddle/legacy/gserver/layers/GruCompute.h index 50006325ce..6feea7aca8 100644 --- a/paddle/legacy/gserver/layers/GruCompute.h +++ b/paddle/legacy/gserver/layers/GruCompute.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "hl_gpu.h" -#include "paddle/utils/Common.h" +#include "paddle/legacy/utils/Common.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/GruStepLayer.cpp b/paddle/legacy/gserver/layers/GruStepLayer.cpp index 114f287411..2480e42d68 100644 --- a/paddle/legacy/gserver/layers/GruStepLayer.cpp +++ b/paddle/legacy/gserver/layers/GruStepLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "GruCompute.h" #include "Layer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/HierarchicalSigmoidLayer.cpp b/paddle/legacy/gserver/layers/HierarchicalSigmoidLayer.cpp index 3e720f179e..3449599409 100644 --- a/paddle/legacy/gserver/layers/HierarchicalSigmoidLayer.cpp +++ b/paddle/legacy/gserver/layers/HierarchicalSigmoidLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "HierarchicalSigmoidLayer.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/IdentityProjection.cpp b/paddle/legacy/gserver/layers/IdentityProjection.cpp index 34e9eb9016..f707642e09 100644 --- a/paddle/legacy/gserver/layers/IdentityProjection.cpp +++ b/paddle/legacy/gserver/layers/IdentityProjection.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Projection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/InterpolationLayer.cpp b/paddle/legacy/gserver/layers/InterpolationLayer.cpp index aabfdc55ba..ed2294e8a3 100644 --- a/paddle/legacy/gserver/layers/InterpolationLayer.cpp +++ b/paddle/legacy/gserver/layers/InterpolationLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/L2DistanceLayer.cpp b/paddle/legacy/gserver/layers/L2DistanceLayer.cpp index c8cca3762c..a3e627e570 100644 --- a/paddle/legacy/gserver/layers/L2DistanceLayer.cpp +++ b/paddle/legacy/gserver/layers/L2DistanceLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "L2DistanceLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/Layer.cpp b/paddle/legacy/gserver/layers/Layer.cpp index f580b8e697..890d33552d 100644 --- a/paddle/legacy/gserver/layers/Layer.cpp +++ b/paddle/legacy/gserver/layers/Layer.cpp @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include "CostLayer.h" #include "paddle/legacy/math/SparseMatrix.h" -#include "paddle/utils/Error.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Error.h" +#include "paddle/legacy/utils/Logging.h" #ifndef PADDLE_MOBILE_INFERENCE #include "ValidationLayer.h" diff --git a/paddle/legacy/gserver/layers/Layer.h b/paddle/legacy/gserver/layers/Layer.h index 65ec3bd03f..a7ff76dece 100644 --- a/paddle/legacy/gserver/layers/Layer.h +++ b/paddle/legacy/gserver/layers/Layer.h @@ -23,8 +23,8 @@ limitations under the License. */ #include "paddle/legacy/parameter/Argument.h" #include "paddle/legacy/parameter/Parameter.h" #include "paddle/legacy/parameter/Weight.h" -#include "paddle/utils/ClassRegistrar.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/Util.h" /// Macro for registering a layer type. /// Example: REGISTER_LAYER(crf_error, CRFDecodingErrorLayer); diff --git a/paddle/legacy/gserver/layers/LstmCompute.cpp b/paddle/legacy/gserver/layers/LstmCompute.cpp index ea30f6d6b1..70f08e1d4e 100644 --- a/paddle/legacy/gserver/layers/LstmCompute.cpp +++ b/paddle/legacy/gserver/layers/LstmCompute.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "LstmCompute.h" #include "hl_recurrent_apply.cuh" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/LstmCompute.h b/paddle/legacy/gserver/layers/LstmCompute.h index 80fb01cd18..ac40c35ef1 100644 --- a/paddle/legacy/gserver/layers/LstmCompute.h +++ b/paddle/legacy/gserver/layers/LstmCompute.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "hl_gpu.h" -#include "paddle/utils/Common.h" +#include "paddle/legacy/utils/Common.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/LstmLayer.cpp b/paddle/legacy/gserver/layers/LstmLayer.cpp index bb40ec0585..43a55d8d49 100644 --- a/paddle/legacy/gserver/layers/LstmLayer.cpp +++ b/paddle/legacy/gserver/layers/LstmLayer.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "LstmLayer.h" #include "paddle/legacy/math/BaseMatrix.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" DECLARE_bool(prev_batch_state); diff --git a/paddle/legacy/gserver/layers/LstmStepLayer.cpp b/paddle/legacy/gserver/layers/LstmStepLayer.cpp index c44768ddb2..f02f8ad62f 100644 --- a/paddle/legacy/gserver/layers/LstmStepLayer.cpp +++ b/paddle/legacy/gserver/layers/LstmStepLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "Layer.h" #include "LstmCompute.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/MKLDNNConvLayer.cpp b/paddle/legacy/gserver/layers/MKLDNNConvLayer.cpp index 01c20d240b..b47bf14821 100644 --- a/paddle/legacy/gserver/layers/MKLDNNConvLayer.cpp +++ b/paddle/legacy/gserver/layers/MKLDNNConvLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "MKLDNNConvLayer.h" #include "paddle/legacy/math/MathUtils.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" using namespace mkldnn; // NOLINT typedef memory::format format; diff --git a/paddle/legacy/gserver/layers/MKLDNNFcLayer.cpp b/paddle/legacy/gserver/layers/MKLDNNFcLayer.cpp index 0c7e6f16e2..f3747c7db8 100644 --- a/paddle/legacy/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/legacy/gserver/layers/MKLDNNFcLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MKLDNNFcLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" using namespace mkldnn; // NOLINT typedef memory::format format; diff --git a/paddle/legacy/gserver/layers/MKLDNNLRNLayer.cpp b/paddle/legacy/gserver/layers/MKLDNNLRNLayer.cpp index 88513ab8bc..739482348f 100644 --- a/paddle/legacy/gserver/layers/MKLDNNLRNLayer.cpp +++ b/paddle/legacy/gserver/layers/MKLDNNLRNLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MKLDNNLRNLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" using namespace mkldnn; // NOLINT typedef memory::format format; diff --git a/paddle/legacy/gserver/layers/MKLDNNLayer.h b/paddle/legacy/gserver/layers/MKLDNNLayer.h index b8f292684c..94dc8625f6 100644 --- a/paddle/legacy/gserver/layers/MKLDNNLayer.h +++ b/paddle/legacy/gserver/layers/MKLDNNLayer.h @@ -19,7 +19,7 @@ limitations under the License. */ #include "MKLDNNBase.h" #include "mkldnn.hpp" #include "paddle/legacy/math/MKLDNNMatrix.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" DECLARE_bool(use_mkldnn); diff --git a/paddle/legacy/gserver/layers/MKLDNNPoolLayer.cpp b/paddle/legacy/gserver/layers/MKLDNNPoolLayer.cpp index 99c419be88..83d980538d 100644 --- a/paddle/legacy/gserver/layers/MKLDNNPoolLayer.cpp +++ b/paddle/legacy/gserver/layers/MKLDNNPoolLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "MKLDNNPoolLayer.h" #include "paddle/legacy/math/MathUtils.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" using namespace mkldnn; // NOLINT typedef memory::format format; diff --git a/paddle/legacy/gserver/layers/MaxLayer.cpp b/paddle/legacy/gserver/layers/MaxLayer.cpp index 7ee2e0dd94..b51251b663 100644 --- a/paddle/legacy/gserver/layers/MaxLayer.cpp +++ b/paddle/legacy/gserver/layers/MaxLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MaxLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/MaxLayer.h b/paddle/legacy/gserver/layers/MaxLayer.h index 6b3491cde5..12d0128e39 100644 --- a/paddle/legacy/gserver/layers/MaxLayer.h +++ b/paddle/legacy/gserver/layers/MaxLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "SequencePoolLayer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/MaxPoolWithMaskLayer.cpp b/paddle/legacy/gserver/layers/MaxPoolWithMaskLayer.cpp index e594e22b5e..a1cc59a719 100644 --- a/paddle/legacy/gserver/layers/MaxPoolWithMaskLayer.cpp +++ b/paddle/legacy/gserver/layers/MaxPoolWithMaskLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MaxPoolWithMaskLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/MixedLayer.cpp b/paddle/legacy/gserver/layers/MixedLayer.cpp index 7dcb30b98d..63e658c09c 100644 --- a/paddle/legacy/gserver/layers/MixedLayer.cpp +++ b/paddle/legacy/gserver/layers/MixedLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MixedLayer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/MultinomialSampler.h b/paddle/legacy/gserver/layers/MultinomialSampler.h index 8cbb229f15..ed44535241 100644 --- a/paddle/legacy/gserver/layers/MultinomialSampler.h +++ b/paddle/legacy/gserver/layers/MultinomialSampler.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -#include "paddle/utils/Common.h" +#include "paddle/legacy/utils/Common.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/MultiplexLayer.cpp b/paddle/legacy/gserver/layers/MultiplexLayer.cpp index 54a554a1a9..9ca2b24175 100644 --- a/paddle/legacy/gserver/layers/MultiplexLayer.cpp +++ b/paddle/legacy/gserver/layers/MultiplexLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/NormLayer.cpp b/paddle/legacy/gserver/layers/NormLayer.cpp index 4678f6fa9a..443e26dbc8 100644 --- a/paddle/legacy/gserver/layers/NormLayer.cpp +++ b/paddle/legacy/gserver/layers/NormLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "NormLayer.h" #include "NormProjectionLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { REGISTER_LAYER_CREATE_FUNC(norm, &NormLayer::create); diff --git a/paddle/legacy/gserver/layers/NormProjectionLayer.cpp b/paddle/legacy/gserver/layers/NormProjectionLayer.cpp index 3013bbdbc7..72affaa1ce 100644 --- a/paddle/legacy/gserver/layers/NormProjectionLayer.cpp +++ b/paddle/legacy/gserver/layers/NormProjectionLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "NormProjectionLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { size_t CMRProjectionNormLayer::getSize() { diff --git a/paddle/legacy/gserver/layers/OuterProdLayer.cpp b/paddle/legacy/gserver/layers/OuterProdLayer.cpp index 7988560d5a..d0928be9d4 100644 --- a/paddle/legacy/gserver/layers/OuterProdLayer.cpp +++ b/paddle/legacy/gserver/layers/OuterProdLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/PadLayer.cpp b/paddle/legacy/gserver/layers/PadLayer.cpp index b1910e108b..7b92b3de2d 100644 --- a/paddle/legacy/gserver/layers/PadLayer.cpp +++ b/paddle/legacy/gserver/layers/PadLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "PadLayer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/ParameterReluLayer.cpp b/paddle/legacy/gserver/layers/ParameterReluLayer.cpp index 12d04fc1c3..23715d1975 100644 --- a/paddle/legacy/gserver/layers/ParameterReluLayer.cpp +++ b/paddle/legacy/gserver/layers/ParameterReluLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ParameterReluLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/ParameterReluLayer.h b/paddle/legacy/gserver/layers/ParameterReluLayer.h index a4abd7af75..3aac4b42f6 100644 --- a/paddle/legacy/gserver/layers/ParameterReluLayer.h +++ b/paddle/legacy/gserver/layers/ParameterReluLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/Pool3DLayer.cpp b/paddle/legacy/gserver/layers/Pool3DLayer.cpp index 3ac9eb0d81..ae3f55c27f 100644 --- a/paddle/legacy/gserver/layers/Pool3DLayer.cpp +++ b/paddle/legacy/gserver/layers/Pool3DLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "Pool3DLayer.h" #include "PoolProjectionLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/PoolLayer.cpp b/paddle/legacy/gserver/layers/PoolLayer.cpp index ee589e6be5..df172d9575 100644 --- a/paddle/legacy/gserver/layers/PoolLayer.cpp +++ b/paddle/legacy/gserver/layers/PoolLayer.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "PoolLayer.h" #include "MaxPoolWithMaskLayer.h" #include "PoolProjectionLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #ifdef PADDLE_WITH_CUDA #include "CudnnPoolLayer.h" #endif diff --git a/paddle/legacy/gserver/layers/PoolProjectionLayer.cpp b/paddle/legacy/gserver/layers/PoolProjectionLayer.cpp index 73d320e67e..e44b1d7ba1 100644 --- a/paddle/legacy/gserver/layers/PoolProjectionLayer.cpp +++ b/paddle/legacy/gserver/layers/PoolProjectionLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "PoolProjectionLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/PowerLayer.cpp b/paddle/legacy/gserver/layers/PowerLayer.cpp index 26a57fcfdd..5e94c64db6 100644 --- a/paddle/legacy/gserver/layers/PowerLayer.cpp +++ b/paddle/legacy/gserver/layers/PowerLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/RecurrentLayer.h b/paddle/legacy/gserver/layers/RecurrentLayer.h index 94e633e657..287ea27a09 100644 --- a/paddle/legacy/gserver/layers/RecurrentLayer.h +++ b/paddle/legacy/gserver/layers/RecurrentLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #include #include "Layer.h" #include "SequenceToBatch.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/RecurrentLayerGroup.cpp b/paddle/legacy/gserver/layers/RecurrentLayerGroup.cpp index 4f121bdb4a..3932124599 100644 --- a/paddle/legacy/gserver/layers/RecurrentLayerGroup.cpp +++ b/paddle/legacy/gserver/layers/RecurrentLayerGroup.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "paddle/legacy/gserver/layers/Layer.h" #include "paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/RowConvLayer.cpp b/paddle/legacy/gserver/layers/RowConvLayer.cpp index 63b499e486..1961557dc2 100644 --- a/paddle/legacy/gserver/layers/RowConvLayer.cpp +++ b/paddle/legacy/gserver/layers/RowConvLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "RowConvLayer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/ScaleSubRegionLayer.cpp b/paddle/legacy/gserver/layers/ScaleSubRegionLayer.cpp index 68a0ff7358..70d44d2a7e 100644 --- a/paddle/legacy/gserver/layers/ScaleSubRegionLayer.cpp +++ b/paddle/legacy/gserver/layers/ScaleSubRegionLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ScaleSubRegionLayer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { REGISTER_LAYER(scale_sub_region, ScaleSubRegionLayer); diff --git a/paddle/legacy/gserver/layers/ScalingLayer.cpp b/paddle/legacy/gserver/layers/ScalingLayer.cpp index e68ff8905e..a8286b6614 100644 --- a/paddle/legacy/gserver/layers/ScalingLayer.cpp +++ b/paddle/legacy/gserver/layers/ScalingLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.cpp b/paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.cpp index a181f55d91..72fb068148 100644 --- a/paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.cpp +++ b/paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.cpp @@ -16,8 +16,8 @@ limitations under the License. */ #include #include #include "paddle/legacy/math/SparseMatrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.h b/paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.h index 068da57d8d..3ba04d9b2a 100644 --- a/paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.h +++ b/paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/SequenceConcatLayer.cpp b/paddle/legacy/gserver/layers/SequenceConcatLayer.cpp index 024ca048b4..7b598e11ac 100644 --- a/paddle/legacy/gserver/layers/SequenceConcatLayer.cpp +++ b/paddle/legacy/gserver/layers/SequenceConcatLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/SequenceLastInstanceLayer.cpp b/paddle/legacy/gserver/layers/SequenceLastInstanceLayer.cpp index b00bf65997..8735d71ba3 100644 --- a/paddle/legacy/gserver/layers/SequenceLastInstanceLayer.cpp +++ b/paddle/legacy/gserver/layers/SequenceLastInstanceLayer.cpp @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "SequencePoolLayer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/SequencePoolLayer.cpp b/paddle/legacy/gserver/layers/SequencePoolLayer.cpp index 650ab425d1..243b795db4 100644 --- a/paddle/legacy/gserver/layers/SequencePoolLayer.cpp +++ b/paddle/legacy/gserver/layers/SequencePoolLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "SequencePoolLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/SequenceReshapeLayer.cpp b/paddle/legacy/gserver/layers/SequenceReshapeLayer.cpp index f72acadec9..e3d40cab50 100644 --- a/paddle/legacy/gserver/layers/SequenceReshapeLayer.cpp +++ b/paddle/legacy/gserver/layers/SequenceReshapeLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/SequenceSliceLayer.cpp b/paddle/legacy/gserver/layers/SequenceSliceLayer.cpp index 65b4787fed..3ed51c4ef2 100644 --- a/paddle/legacy/gserver/layers/SequenceSliceLayer.cpp +++ b/paddle/legacy/gserver/layers/SequenceSliceLayer.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" #include "paddle/legacy/math/Vector.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/SlopeInterceptLayer.cpp b/paddle/legacy/gserver/layers/SlopeInterceptLayer.cpp index beb288e4ad..9168fd7dda 100644 --- a/paddle/legacy/gserver/layers/SlopeInterceptLayer.cpp +++ b/paddle/legacy/gserver/layers/SlopeInterceptLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/SpatialPyramidPoolLayer.h b/paddle/legacy/gserver/layers/SpatialPyramidPoolLayer.h index 6cdfba33b3..6d8ed9c878 100644 --- a/paddle/legacy/gserver/layers/SpatialPyramidPoolLayer.h +++ b/paddle/legacy/gserver/layers/SpatialPyramidPoolLayer.h @@ -17,7 +17,7 @@ limitations under the License. */ #include "Layer.h" #include "PoolProjection.h" #include "paddle/legacy/math/MathUtils.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { /** diff --git a/paddle/legacy/gserver/layers/SubNestedSequenceLayer.cpp b/paddle/legacy/gserver/layers/SubNestedSequenceLayer.cpp index 4f648ec01c..f363c2ac8d 100644 --- a/paddle/legacy/gserver/layers/SubNestedSequenceLayer.cpp +++ b/paddle/legacy/gserver/layers/SubNestedSequenceLayer.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" #include "paddle/legacy/math/Vector.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/SubSequenceLayer.cpp b/paddle/legacy/gserver/layers/SubSequenceLayer.cpp index 6b27550048..36796f0473 100644 --- a/paddle/legacy/gserver/layers/SubSequenceLayer.cpp +++ b/paddle/legacy/gserver/layers/SubSequenceLayer.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" #include "paddle/legacy/math/Vector.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/SumToOneNormLayer.cpp b/paddle/legacy/gserver/layers/SumToOneNormLayer.cpp index 4cd173a8c7..410f4dd7c9 100644 --- a/paddle/legacy/gserver/layers/SumToOneNormLayer.cpp +++ b/paddle/legacy/gserver/layers/SumToOneNormLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/SwitchOrderLayer.cpp b/paddle/legacy/gserver/layers/SwitchOrderLayer.cpp index 704735de38..513f3df7bc 100644 --- a/paddle/legacy/gserver/layers/SwitchOrderLayer.cpp +++ b/paddle/legacy/gserver/layers/SwitchOrderLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "SwitchOrderLayer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/TensorLayer.cpp b/paddle/legacy/gserver/layers/TensorLayer.cpp index b2271c63ef..7f874bce0f 100644 --- a/paddle/legacy/gserver/layers/TensorLayer.cpp +++ b/paddle/legacy/gserver/layers/TensorLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "TensorLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/TensorLayer.h b/paddle/legacy/gserver/layers/TensorLayer.h index 1c30f7c889..fc491a7c9f 100644 --- a/paddle/legacy/gserver/layers/TensorLayer.h +++ b/paddle/legacy/gserver/layers/TensorLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/TransLayer.cpp b/paddle/legacy/gserver/layers/TransLayer.cpp index cf87ca53d1..fd1d435ea5 100644 --- a/paddle/legacy/gserver/layers/TransLayer.cpp +++ b/paddle/legacy/gserver/layers/TransLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "TransLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { REGISTER_LAYER(trans, TransLayer); diff --git a/paddle/legacy/gserver/layers/TransposedFullMatrixProjection.cpp b/paddle/legacy/gserver/layers/TransposedFullMatrixProjection.cpp index 45f5977989..c8533dc7d7 100644 --- a/paddle/legacy/gserver/layers/TransposedFullMatrixProjection.cpp +++ b/paddle/legacy/gserver/layers/TransposedFullMatrixProjection.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Projection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/UpsampleLayer.h b/paddle/legacy/gserver/layers/UpsampleLayer.h index ea12a711a8..2fe5938244 100644 --- a/paddle/legacy/gserver/layers/UpsampleLayer.h +++ b/paddle/legacy/gserver/layers/UpsampleLayer.h @@ -17,8 +17,8 @@ limitations under the License. */ #include #include "Layer.h" #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/legacy/gserver/layers/ValidationLayer.cpp b/paddle/legacy/gserver/layers/ValidationLayer.cpp index b626825a7b..9956fd2ed4 100644 --- a/paddle/legacy/gserver/layers/ValidationLayer.cpp +++ b/paddle/legacy/gserver/layers/ValidationLayer.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "ValidationLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/legacy/gserver/tests/test_BatchNorm.cpp b/paddle/legacy/gserver/tests/test_BatchNorm.cpp index c7a65a3051..e21fa16074 100644 --- a/paddle/legacy/gserver/tests/test_BatchNorm.cpp +++ b/paddle/legacy/gserver/tests/test_BatchNorm.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/legacy/gserver/layers/DataLayer.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/legacy/utils/GlobalConstants.h" #include "LayerGradUtil.h" #include "paddle/legacy/cuda/include/hl_batch_norm.h" diff --git a/paddle/legacy/gserver/tests/test_CompareSparse.cpp b/paddle/legacy/gserver/tests/test_CompareSparse.cpp index 26b23eac7c..11b633a588 100644 --- a/paddle/legacy/gserver/tests/test_CompareSparse.cpp +++ b/paddle/legacy/gserver/tests/test_CompareSparse.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include "paddle/legacy/trainer/Trainer.h" diff --git a/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp b/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp index 6e8f855c6b..e19c34abbd 100644 --- a/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp +++ b/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include +#include #include #include diff --git a/paddle/legacy/gserver/tests/test_ConvTrans.cpp b/paddle/legacy/gserver/tests/test_ConvTrans.cpp index 41a03f3b44..4ea0a3d379 100644 --- a/paddle/legacy/gserver/tests/test_ConvTrans.cpp +++ b/paddle/legacy/gserver/tests/test_ConvTrans.cpp @@ -18,7 +18,7 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/legacy/gserver/layers/DataLayer.h" #include "paddle/legacy/math/MathUtils.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/legacy/utils/GlobalConstants.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/legacy/gserver/tests/test_ConvUnify.cpp b/paddle/legacy/gserver/tests/test_ConvUnify.cpp index a01a2b6937..d4ca158352 100644 --- a/paddle/legacy/gserver/tests/test_ConvUnify.cpp +++ b/paddle/legacy/gserver/tests/test_ConvUnify.cpp @@ -18,7 +18,7 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "paddle/legacy/gserver/layers/DataLayer.h" #include "paddle/legacy/math/MathUtils.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/legacy/utils/GlobalConstants.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/legacy/gserver/tests/test_KmaxSeqScore.cpp b/paddle/legacy/gserver/tests/test_KmaxSeqScore.cpp index 6a1cfdc705..e15b4e5038 100644 --- a/paddle/legacy/gserver/tests/test_KmaxSeqScore.cpp +++ b/paddle/legacy/gserver/tests/test_KmaxSeqScore.cpp @@ -18,7 +18,7 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" #include "paddle/legacy/gserver/layers/DataLayer.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/legacy/utils/GlobalConstants.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/legacy/gserver/tests/test_LinearChainCRF.cpp b/paddle/legacy/gserver/tests/test_LinearChainCRF.cpp index 1c95492551..7082c1363a 100644 --- a/paddle/legacy/gserver/tests/test_LinearChainCRF.cpp +++ b/paddle/legacy/gserver/tests/test_LinearChainCRF.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include #include #include "paddle/legacy/gserver/layers/LinearChainCRF.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/legacy/gserver/tests/test_MKLDNN.cpp b/paddle/legacy/gserver/tests/test_MKLDNN.cpp index a20ccfb772..c79ccd1956 100644 --- a/paddle/legacy/gserver/tests/test_MKLDNN.cpp +++ b/paddle/legacy/gserver/tests/test_MKLDNN.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include +#include #include #include #include "MKLDNNTester.h" diff --git a/paddle/legacy/gserver/tests/test_MultinomialSampler.cpp b/paddle/legacy/gserver/tests/test_MultinomialSampler.cpp index ca1a588d83..25b1a1191d 100644 --- a/paddle/legacy/gserver/tests/test_MultinomialSampler.cpp +++ b/paddle/legacy/gserver/tests/test_MultinomialSampler.cpp @@ -18,10 +18,10 @@ limitations under the License. */ #include #undef PADDLE_DISABLE_TIMER -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" #include "paddle/legacy/gserver/layers/MultinomialSampler.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/legacy/gserver/tests/test_NetworkCompare.cpp b/paddle/legacy/gserver/tests/test_NetworkCompare.cpp index e07922b58c..c9f9f3e61b 100644 --- a/paddle/legacy/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/legacy/gserver/tests/test_NetworkCompare.cpp @@ -14,13 +14,13 @@ limitations under the License. */ #undef PADDLE_DISABLE_TIMER #include -#include +#include #include #include #include "paddle/legacy/trainer/Trainer.h" +#include "paddle/legacy/utils/Stat.h" #include "paddle/testing/TestUtil.h" -#include "paddle/utils/Stat.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/legacy/gserver/tests/test_PyDataProvider.cpp b/paddle/legacy/gserver/tests/test_PyDataProvider.cpp index 9cde4ecca5..0209e6818a 100644 --- a/paddle/legacy/gserver/tests/test_PyDataProvider.cpp +++ b/paddle/legacy/gserver/tests/test_PyDataProvider.cpp @@ -18,7 +18,7 @@ limitations under the License. */ #include #include "paddle/legacy/gserver/dataproviders/PyDataProvider.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/legacy/gserver/tests/test_PyDataProvider2.cpp b/paddle/legacy/gserver/tests/test_PyDataProvider2.cpp index 7f5a087b9a..de313ba82c 100644 --- a/paddle/legacy/gserver/tests/test_PyDataProvider2.cpp +++ b/paddle/legacy/gserver/tests/test_PyDataProvider2.cpp @@ -16,8 +16,8 @@ limitations under the License. */ #include #include #include "paddle/legacy/gserver/dataproviders/DataProvider.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Util.h" DEFINE_string(train_list, "unittest.list", "file list for unittest"); diff --git a/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp b/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp index 279f2c2fbb..153c3e7f36 100644 --- a/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp +++ b/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp @@ -17,9 +17,9 @@ limitations under the License. */ #include #include #include -#include -#include -#include +#include +#include +#include DECLARE_int32(seed); diff --git a/paddle/legacy/gserver/tests/test_RecurrentLayer.cpp b/paddle/legacy/gserver/tests/test_RecurrentLayer.cpp index 852a08d493..71198cb6a1 100644 --- a/paddle/legacy/gserver/tests/test_RecurrentLayer.cpp +++ b/paddle/legacy/gserver/tests/test_RecurrentLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include +#include #include #include "ModelConfig.pb.h" #include "paddle/legacy/gserver/layers/DataLayer.h" diff --git a/paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp b/paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp index 160d95f158..1975d9196d 100644 --- a/paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp +++ b/paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include #include -#include +#include #include #include #include diff --git a/paddle/legacy/gserver/tests/test_WarpCTCLayer.cpp b/paddle/legacy/gserver/tests/test_WarpCTCLayer.cpp index 34b88e6893..b1697e1616 100644 --- a/paddle/legacy/gserver/tests/test_WarpCTCLayer.cpp +++ b/paddle/legacy/gserver/tests/test_WarpCTCLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include +#include #include "ModelConfig.pb.h" #include "paddle/legacy/gserver/layers/CTCLayer.h" #include "paddle/legacy/gserver/layers/DataLayer.h" diff --git a/paddle/legacy/math/Allocator.h b/paddle/legacy/math/Allocator.h index c43a83891e..ffb5ec1cad 100644 --- a/paddle/legacy/math/Allocator.h +++ b/paddle/legacy/math/Allocator.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include "hl_gpu.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/legacy/math/BaseMatrix.cu b/paddle/legacy/math/BaseMatrix.cu index 7b57419e5a..7e7cdc57a9 100644 --- a/paddle/legacy/math/BaseMatrix.cu +++ b/paddle/legacy/math/BaseMatrix.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include #include #include "BaseMatrix.h" diff --git a/paddle/legacy/math/BaseMatrix.h b/paddle/legacy/math/BaseMatrix.h index 1958629aa0..4627f847d3 100644 --- a/paddle/legacy/math/BaseMatrix.h +++ b/paddle/legacy/math/BaseMatrix.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include #include "TensorExpression.h" -#include "paddle/utils/Common.h" +#include "paddle/legacy/utils/Common.h" namespace paddle { diff --git a/paddle/legacy/math/CpuSparseMatrix.cpp b/paddle/legacy/math/CpuSparseMatrix.cpp index 88683ec984..20c65a3a1d 100644 --- a/paddle/legacy/math/CpuSparseMatrix.cpp +++ b/paddle/legacy/math/CpuSparseMatrix.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include "float.h" #include "hl_gpu.h" #include "paddle/legacy/math/MathUtils.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/math/MathFunctions.cpp b/paddle/legacy/math/MathFunctions.cpp index 152aeb5d64..bbf34a32f3 100644 --- a/paddle/legacy/math/MathFunctions.cpp +++ b/paddle/legacy/math/MathFunctions.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "paddle/legacy/math/MathFunctions.h" #include "hl_matrix_apply.cuh" #include "hl_matrix_ops.cuh" -#include "paddle/utils/DynamicLoader.h" +#include "paddle/legacy/utils/DynamicLoader.h" namespace dynload { diff --git a/paddle/legacy/math/MathUtils.cpp b/paddle/legacy/math/MathUtils.cpp index b2afdbcd51..47ac9c187c 100644 --- a/paddle/legacy/math/MathUtils.cpp +++ b/paddle/legacy/math/MathUtils.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "MathUtils.h" #include #include "Vector.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/legacy/math/Matrix.cpp b/paddle/legacy/math/Matrix.cpp index 50b0bc5011..e53f95006c 100644 --- a/paddle/legacy/math/Matrix.cpp +++ b/paddle/legacy/math/Matrix.cpp @@ -26,11 +26,11 @@ limitations under the License. */ #include "hl_gpu.h" #include "hl_table_apply.h" #include "hl_top_k.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "NEONFunctions.h" #include "paddle/legacy/function/GemmFunctor.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" #include "SIMDFunctions.h" diff --git a/paddle/legacy/math/Matrix.h b/paddle/legacy/math/Matrix.h index 74dc690792..ff4f4cfc2a 100644 --- a/paddle/legacy/math/Matrix.h +++ b/paddle/legacy/math/Matrix.h @@ -18,16 +18,16 @@ limitations under the License. */ #include #include -#include "paddle/utils/Logging.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/ThreadLocal.h" #include #include "BaseMatrix.h" #include "MemoryHandle.h" #include "Vector.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { diff --git a/paddle/legacy/math/MatrixBitCode.cpp b/paddle/legacy/math/MatrixBitCode.cpp index f7a949294b..f35f266a30 100644 --- a/paddle/legacy/math/MatrixBitCode.cpp +++ b/paddle/legacy/math/MatrixBitCode.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Matrix.h" #include "hl_gpu.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/math/RowBuffer.h b/paddle/legacy/math/RowBuffer.h index 6950afaa21..9dfd5eff06 100644 --- a/paddle/legacy/math/RowBuffer.h +++ b/paddle/legacy/math/RowBuffer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include "MemoryHandle.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/math/SparseMatrix.cpp b/paddle/legacy/math/SparseMatrix.cpp index 1faa343dbc..6f68252b0a 100644 --- a/paddle/legacy/math/SparseMatrix.cpp +++ b/paddle/legacy/math/SparseMatrix.cpp @@ -18,7 +18,7 @@ limitations under the License. */ #include #include "hl_gpu.h" #include "hl_top_k.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/math/SparseRowMatrix.cpp b/paddle/legacy/math/SparseRowMatrix.cpp index 4254175aab..39bcdf2298 100644 --- a/paddle/legacy/math/SparseRowMatrix.cpp +++ b/paddle/legacy/math/SparseRowMatrix.cpp @@ -17,12 +17,12 @@ limitations under the License. */ #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "SIMDFunctions.h" -#include "paddle/utils/Thread.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Thread.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/math/SparseRowMatrix.h b/paddle/legacy/math/SparseRowMatrix.h index cf6779e8b0..e206747a41 100644 --- a/paddle/legacy/math/SparseRowMatrix.h +++ b/paddle/legacy/math/SparseRowMatrix.h @@ -21,7 +21,7 @@ limitations under the License. */ #include #include "Matrix.h" #include "RowBuffer.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/math/Storage.cpp b/paddle/legacy/math/Storage.cpp index 5982bf2e56..65d53aeaa9 100644 --- a/paddle/legacy/math/Storage.cpp +++ b/paddle/legacy/math/Storage.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Storage.h" #include "Allocator.h" -#include "paddle/utils/StringUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/StringUtil.h" +#include "paddle/legacy/utils/Util.h" #ifndef PADDLE_MOBILE_INFERENCE DEFINE_int32(pool_limit_size, diff --git a/paddle/legacy/math/Storage.h b/paddle/legacy/math/Storage.h index 61a9aa2a07..bd22dde2c8 100644 --- a/paddle/legacy/math/Storage.h +++ b/paddle/legacy/math/Storage.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include "PoolAllocator.h" -#include "paddle/utils/Locks.h" +#include "paddle/legacy/utils/Locks.h" namespace paddle { diff --git a/paddle/legacy/math/TensorAssign.h b/paddle/legacy/math/TensorAssign.h index 7d4726ddba..efbfce6c4f 100644 --- a/paddle/legacy/math/TensorAssign.h +++ b/paddle/legacy/math/TensorAssign.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/legacy/math/TensorEvaluate.h b/paddle/legacy/math/TensorEvaluate.h index 2a722016e7..3029dd35fb 100644 --- a/paddle/legacy/math/TensorEvaluate.h +++ b/paddle/legacy/math/TensorEvaluate.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "hl_base.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/legacy/math/TensorExpression.h b/paddle/legacy/math/TensorExpression.h index f6da9adfca..1c6cf07831 100644 --- a/paddle/legacy/math/TensorExpression.h +++ b/paddle/legacy/math/TensorExpression.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include #include "hl_tensor_ops.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/legacy/math/TrainingAlgorithmOp.cu b/paddle/legacy/math/TrainingAlgorithmOp.cu index b844768d3b..9e1eaa0f45 100644 --- a/paddle/legacy/math/TrainingAlgorithmOp.cu +++ b/paddle/legacy/math/TrainingAlgorithmOp.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "BaseMatrix.h" #include "TrainingAlgorithmOp.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #if __cplusplus > 199711L diff --git a/paddle/legacy/math/TrainingAlgorithmOp.h b/paddle/legacy/math/TrainingAlgorithmOp.h index fe40fc2d36..921c2742cf 100644 --- a/paddle/legacy/math/TrainingAlgorithmOp.h +++ b/paddle/legacy/math/TrainingAlgorithmOp.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "BaseMatrix.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/legacy/math/Vector.cpp b/paddle/legacy/math/Vector.cpp index 2a47ed7ef8..87f48bb162 100644 --- a/paddle/legacy/math/Vector.cpp +++ b/paddle/legacy/math/Vector.cpp @@ -13,17 +13,17 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Vector.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include #include "Matrix.h" #include "hl_gpu.h" #include "hl_matrix.h" #include "hl_table_apply.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Thread.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Thread.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { diff --git a/paddle/legacy/math/Vector.h b/paddle/legacy/math/Vector.h index 964b42cae5..63cb4651c5 100644 --- a/paddle/legacy/math/Vector.h +++ b/paddle/legacy/math/Vector.h @@ -21,8 +21,8 @@ limitations under the License. */ #include "BaseMatrix.h" #include "MemoryHandle.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Thread.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Thread.h" namespace paddle { diff --git a/paddle/legacy/math/tests/OriginalOptimizerApi.h b/paddle/legacy/math/tests/OriginalOptimizerApi.h index 1f942e28f4..f386e19958 100644 --- a/paddle/legacy/math/tests/OriginalOptimizerApi.h +++ b/paddle/legacy/math/tests/OriginalOptimizerApi.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/legacy/math/Vector.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/legacy/utils/GlobalConstants.h" using namespace paddle; // NOLINT diff --git a/paddle/legacy/math/tests/PerfUtils.h b/paddle/legacy/math/tests/PerfUtils.h index bee2351e2f..eaf4869e4c 100644 --- a/paddle/legacy/math/tests/PerfUtils.h +++ b/paddle/legacy/math/tests/PerfUtils.h @@ -21,7 +21,7 @@ limitations under the License. */ #else -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" using namespace paddle; // NOLINT #define EXPRESSION_PERFORMANCE(expression) \ diff --git a/paddle/legacy/math/tests/test_Allocator.cpp b/paddle/legacy/math/tests/test_Allocator.cpp index 710b55f57e..122be9082a 100644 --- a/paddle/legacy/math/tests/test_Allocator.cpp +++ b/paddle/legacy/math/tests/test_Allocator.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Util.h" #define private public #include "paddle/legacy/math/Allocator.h" #include "paddle/legacy/math/MemoryHandle.h" diff --git a/paddle/legacy/math/tests/test_CpuGpuVector.cpp b/paddle/legacy/math/tests/test_CpuGpuVector.cpp index 3807158200..010fef534d 100644 --- a/paddle/legacy/math/tests/test_CpuGpuVector.cpp +++ b/paddle/legacy/math/tests/test_CpuGpuVector.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/legacy/math/Vector.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include "test_matrixUtil.h" using namespace paddle; // NOLINT diff --git a/paddle/legacy/math/tests/test_ExecViaCpu.cpp b/paddle/legacy/math/tests/test_ExecViaCpu.cpp index 55a3f5f505..b2ce0bc7ed 100644 --- a/paddle/legacy/math/tests/test_ExecViaCpu.cpp +++ b/paddle/legacy/math/tests/test_ExecViaCpu.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include -#include +#include +#include #include #include "paddle/legacy/math/SparseMatrix.h" diff --git a/paddle/legacy/math/tests/test_FPException.cpp b/paddle/legacy/math/tests/test_FPException.cpp index 6fd17f2969..aa6aea71c8 100644 --- a/paddle/legacy/math/tests/test_FPException.cpp +++ b/paddle/legacy/math/tests/test_FPException.cpp @@ -31,7 +31,7 @@ limitations under the License. */ #include #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Common.h" +#include "paddle/legacy/utils/Common.h" using namespace paddle; // NOLINT diff --git a/paddle/legacy/math/tests/test_GpuProfiler.cpp b/paddle/legacy/math/tests/test_GpuProfiler.cpp index 450c9a035e..ee27109f21 100644 --- a/paddle/legacy/math/tests/test_GpuProfiler.cpp +++ b/paddle/legacy/math/tests/test_GpuProfiler.cpp @@ -17,9 +17,9 @@ limitations under the License. */ #include #include "paddle/legacy/math/Matrix.h" #include "paddle/legacy/math/SparseMatrix.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "paddle/testing/TestUtil.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/legacy/math/tests/test_SIMDFunctions.cpp b/paddle/legacy/math/tests/test_SIMDFunctions.cpp index eef281b3f7..c6490f70e3 100644 --- a/paddle/legacy/math/tests/test_SIMDFunctions.cpp +++ b/paddle/legacy/math/tests/test_SIMDFunctions.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/legacy/math/SIMDFunctions.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include diff --git a/paddle/legacy/math/tests/test_SparseMatrix.cpp b/paddle/legacy/math/tests/test_SparseMatrix.cpp index dbcbeb8d50..30896a945e 100644 --- a/paddle/legacy/math/tests/test_SparseMatrix.cpp +++ b/paddle/legacy/math/tests/test_SparseMatrix.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include #include "test_matrixUtil.h" diff --git a/paddle/legacy/math/tests/test_TrainingAlgorithm.cpp b/paddle/legacy/math/tests/test_TrainingAlgorithm.cpp index 3ae9cf111a..214ae8971a 100644 --- a/paddle/legacy/math/tests/test_TrainingAlgorithm.cpp +++ b/paddle/legacy/math/tests/test_TrainingAlgorithm.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include "PerfUtils.h" #include "TensorCheck.h" #include "paddle/legacy/math/TrainingAlgorithmOp.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" using namespace paddle; // NOLINT diff --git a/paddle/legacy/math/tests/test_matrixCompare.cpp b/paddle/legacy/math/tests/test_matrixCompare.cpp index 98521aeb04..a43adde46f 100644 --- a/paddle/legacy/math/tests/test_matrixCompare.cpp +++ b/paddle/legacy/math/tests/test_matrixCompare.cpp @@ -21,10 +21,10 @@ limitations under the License. */ #include "paddle/legacy/math/MathUtils.h" #include "paddle/legacy/math/Matrix.h" #include "paddle/legacy/math/SparseMatrix.h" +#include "paddle/legacy/utils/DynamicLoader.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "paddle/testing/TestUtil.h" -#include "paddle/utils/DynamicLoader.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/legacy/math/tests/test_matrixUtil.h b/paddle/legacy/math/tests/test_matrixUtil.h index bb80172b1e..58c93f746e 100644 --- a/paddle/legacy/math/tests/test_matrixUtil.h +++ b/paddle/legacy/math/tests/test_matrixUtil.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once #include -#include +#include #include "paddle/legacy/math/SparseMatrix.h" namespace paddle { diff --git a/paddle/legacy/math/tests/test_sparseMatrixCompare.cpp b/paddle/legacy/math/tests/test_sparseMatrixCompare.cpp index 959c9d40b0..492aa0a689 100644 --- a/paddle/legacy/math/tests/test_sparseMatrixCompare.cpp +++ b/paddle/legacy/math/tests/test_sparseMatrixCompare.cpp @@ -19,7 +19,7 @@ limitations under the License. */ #include #include "paddle/legacy/math/Matrix.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include "test_matrixUtil.h" using namespace paddle; // NOLINT diff --git a/paddle/legacy/optimizer/serialization.h b/paddle/legacy/optimizer/serialization.h index bf12eed15f..2067a8d8cf 100644 --- a/paddle/legacy/optimizer/serialization.h +++ b/paddle/legacy/optimizer/serialization.h @@ -19,7 +19,7 @@ #include #include #include "OptimizerConfig.pb.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "tensor.h" namespace paddle { diff --git a/paddle/legacy/optimizer/tensor.h b/paddle/legacy/optimizer/tensor.h index d2cef99074..2e58577d4d 100644 --- a/paddle/legacy/optimizer/tensor.h +++ b/paddle/legacy/optimizer/tensor.h @@ -18,8 +18,8 @@ #include #include -#include "paddle/utils/Common.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { namespace optimizer { diff --git a/paddle/legacy/parameter/Argument.h b/paddle/legacy/parameter/Argument.h index f936d944cb..ea8634896c 100644 --- a/paddle/legacy/parameter/Argument.h +++ b/paddle/legacy/parameter/Argument.h @@ -16,8 +16,8 @@ limitations under the License. */ #include "paddle/legacy/math/Matrix.h" #include "paddle/legacy/math/Vector.h" #include "paddle/legacy/parameter/Parameter.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/parameter/FirstOrderOptimizer.cpp b/paddle/legacy/parameter/FirstOrderOptimizer.cpp index 89bb840f82..4f82a115f7 100644 --- a/paddle/legacy/parameter/FirstOrderOptimizer.cpp +++ b/paddle/legacy/parameter/FirstOrderOptimizer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "FirstOrderOptimizer.h" #include "paddle/legacy/math/TrainingAlgorithmOp.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Util.h" #include diff --git a/paddle/legacy/parameter/LearningRateScheduler.cpp b/paddle/legacy/parameter/LearningRateScheduler.cpp index d57d2189a4..68c44a7ec4 100644 --- a/paddle/legacy/parameter/LearningRateScheduler.cpp +++ b/paddle/legacy/parameter/LearningRateScheduler.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "LearningRateScheduler.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/utils/StringUtil.h" namespace paddle { diff --git a/paddle/legacy/parameter/LearningRateScheduler.h b/paddle/legacy/parameter/LearningRateScheduler.h index 3fad970402..fc7e380a6a 100644 --- a/paddle/legacy/parameter/LearningRateScheduler.h +++ b/paddle/legacy/parameter/LearningRateScheduler.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "TrainerConfig.pb.h" -#include "paddle/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/ClassRegistrar.h" namespace paddle { // NOLINTNEXTLINES_4 diff --git a/paddle/legacy/parameter/Parameter.cpp b/paddle/legacy/parameter/Parameter.cpp index d00019027b..666d808f0c 100644 --- a/paddle/legacy/parameter/Parameter.cpp +++ b/paddle/legacy/parameter/Parameter.cpp @@ -25,7 +25,7 @@ limitations under the License. */ #include "paddle/legacy/math/CpuSparseMatrix.h" #include "paddle/legacy/math/MathUtils.h" #include "paddle/legacy/math/SparseRowMatrix.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" DEFINE_int32(enable_grad_share, (100 * 1024 * 1024), diff --git a/paddle/legacy/parameter/Parameter.h b/paddle/legacy/parameter/Parameter.h index 75cfb3f4aa..43b567dad0 100644 --- a/paddle/legacy/parameter/Parameter.h +++ b/paddle/legacy/parameter/Parameter.h @@ -26,11 +26,11 @@ limitations under the License. */ #include "ParameterUpdaterHook.h" #include "paddle/legacy/math/Matrix.h" #include "paddle/legacy/math/Vector.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/ThreadLocal.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/ThreadLocal.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/parameter/ParameterOptimizer.cpp b/paddle/legacy/parameter/ParameterOptimizer.cpp index 638daa58f1..b9dffa5afb 100644 --- a/paddle/legacy/parameter/ParameterOptimizer.cpp +++ b/paddle/legacy/parameter/ParameterOptimizer.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include diff --git a/paddle/legacy/parameter/ParameterUpdateFunctions.cpp b/paddle/legacy/parameter/ParameterUpdateFunctions.cpp index db1153c2d6..72c9841acf 100644 --- a/paddle/legacy/parameter/ParameterUpdateFunctions.cpp +++ b/paddle/legacy/parameter/ParameterUpdateFunctions.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #ifdef __AVX__ #include #include diff --git a/paddle/legacy/parameter/ParameterUpdateFunctions.h b/paddle/legacy/parameter/ParameterUpdateFunctions.h index 3dbde93b91..a7cc1c4c47 100644 --- a/paddle/legacy/parameter/ParameterUpdateFunctions.h +++ b/paddle/legacy/parameter/ParameterUpdateFunctions.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/legacy/math/Vector.h" -#include "paddle/utils/Common.h" +#include "paddle/legacy/utils/Common.h" namespace paddle { diff --git a/paddle/legacy/parameter/ParameterUpdaterBase.cpp b/paddle/legacy/parameter/ParameterUpdaterBase.cpp index 7815856b45..7d9d3fad63 100644 --- a/paddle/legacy/parameter/ParameterUpdaterBase.cpp +++ b/paddle/legacy/parameter/ParameterUpdaterBase.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "ParameterUpdaterBase.h" #include #include "hl_gpu.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/legacy/parameter/ParameterUpdaterHook.cpp b/paddle/legacy/parameter/ParameterUpdaterHook.cpp index e4677f894a..bfb9769fb6 100644 --- a/paddle/legacy/parameter/ParameterUpdaterHook.cpp +++ b/paddle/legacy/parameter/ParameterUpdaterHook.cpp @@ -24,8 +24,8 @@ limitations under the License. */ #include "paddle/legacy/math/Vector.h" #include "paddle/legacy/parameter/Parameter.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/parameter/Regularizer.cpp b/paddle/legacy/parameter/Regularizer.cpp index d223fd2df6..c1d5f4fa68 100644 --- a/paddle/legacy/parameter/Regularizer.cpp +++ b/paddle/legacy/parameter/Regularizer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Regularizer.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/parameter/Weight.cpp b/paddle/legacy/parameter/Weight.cpp index ba4ddce69f..9d94050a5c 100644 --- a/paddle/legacy/parameter/Weight.cpp +++ b/paddle/legacy/parameter/Weight.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Weight.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/legacy/parameter/tests/test_common.cpp b/paddle/legacy/parameter/tests/test_common.cpp index 3c4ee11934..8de9d6da98 100644 --- a/paddle/legacy/parameter/tests/test_common.cpp +++ b/paddle/legacy/parameter/tests/test_common.cpp @@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include #include #include -#include -#include -#include +#include +#include +#include using namespace paddle; // NOLINT diff --git a/paddle/legacy/pserver/BaseClient.cpp b/paddle/legacy/pserver/BaseClient.cpp index a6204ef47e..13bb8a1cc5 100644 --- a/paddle/legacy/pserver/BaseClient.cpp +++ b/paddle/legacy/pserver/BaseClient.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include #include #include -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" DECLARE_string(pservers); diff --git a/paddle/legacy/pserver/BaseClient.h b/paddle/legacy/pserver/BaseClient.h index 92bb0a8b6a..66e8f39cd6 100644 --- a/paddle/legacy/pserver/BaseClient.h +++ b/paddle/legacy/pserver/BaseClient.h @@ -17,8 +17,8 @@ limitations under the License. */ #include "ParameterService.pb.h" #include "paddle/legacy/math/Matrix.h" #include "paddle/legacy/pserver/ProtoServer.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Queue.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Queue.h" namespace paddle { diff --git a/paddle/legacy/pserver/LightNetwork.cpp b/paddle/legacy/pserver/LightNetwork.cpp index 4c0da2217e..469c95853e 100644 --- a/paddle/legacy/pserver/LightNetwork.cpp +++ b/paddle/legacy/pserver/LightNetwork.cpp @@ -27,8 +27,8 @@ limitations under the License. */ #include "LightNetwork.h" #include "RDMANetwork.h" -#include "paddle/utils/StringUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/StringUtil.h" +#include "paddle/legacy/utils/Util.h" /// quick ack can reduce the latency of small message DEFINE_bool(small_messages, diff --git a/paddle/legacy/pserver/LightNetwork.h b/paddle/legacy/pserver/LightNetwork.h index bcfc9655e9..380f86832f 100644 --- a/paddle/legacy/pserver/LightNetwork.h +++ b/paddle/legacy/pserver/LightNetwork.h @@ -21,7 +21,7 @@ limitations under the License. */ #include #include -#include "paddle/utils/Thread.h" +#include "paddle/legacy/utils/Thread.h" struct sxi_socket; diff --git a/paddle/legacy/pserver/ParameterClient2.cpp b/paddle/legacy/pserver/ParameterClient2.cpp index 98b3966250..4c544ddc28 100644 --- a/paddle/legacy/pserver/ParameterClient2.cpp +++ b/paddle/legacy/pserver/ParameterClient2.cpp @@ -16,9 +16,9 @@ limitations under the License. */ #include "ParameterClient2.h" #include "paddle/legacy/math/SparseRowMatrix.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/StringUtil.h" DEFINE_string(pservers, "127.0.0.1", "Comma separated addresses of pservers"); DEFINE_int32(parallel_thread_num, 1, "Thread number for parameter send"); diff --git a/paddle/legacy/pserver/ParameterClient2.h b/paddle/legacy/pserver/ParameterClient2.h index 2bc0e47866..9320e19c4d 100644 --- a/paddle/legacy/pserver/ParameterClient2.h +++ b/paddle/legacy/pserver/ParameterClient2.h @@ -23,11 +23,11 @@ limitations under the License. */ #include "paddle/legacy/math/Vector.h" #include "paddle/legacy/parameter/Parameter.h" #include "paddle/legacy/pserver/BaseClient.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/Queue.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Queue.h" +#include "paddle/legacy/utils/Util.h" #include "ParameterService.pb.h" diff --git a/paddle/legacy/pserver/ParameterServer2.cpp b/paddle/legacy/pserver/ParameterServer2.cpp index 293fc7ca69..8533a322d9 100644 --- a/paddle/legacy/pserver/ParameterServer2.cpp +++ b/paddle/legacy/pserver/ParameterServer2.cpp @@ -26,10 +26,10 @@ limitations under the License. */ #include "paddle/legacy/parameter/ParameterUpdateFunctions.h" #include "paddle/legacy/parameter/Regularizer.h" #include "paddle/legacy/parameter/ThreadLocalBuffer.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/StringUtil.h" DEFINE_int32(pserver_num_threads, 1, "number of threads for sync op exec"); DEFINE_double(async_lagged_ratio_min, diff --git a/paddle/legacy/pserver/ParameterServer2.h b/paddle/legacy/pserver/ParameterServer2.h index 040699878d..069e730ea4 100644 --- a/paddle/legacy/pserver/ParameterServer2.h +++ b/paddle/legacy/pserver/ParameterServer2.h @@ -29,10 +29,10 @@ limitations under the License. */ #include "paddle/legacy/math/Vector.h" #include "paddle/legacy/parameter/Parameter.h" #include "paddle/legacy/parameter/ParameterOptimizer.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/ThreadLocal.h" #include "ParameterService.pb.h" diff --git a/paddle/legacy/pserver/ParameterServerController.h b/paddle/legacy/pserver/ParameterServerController.h index 1308d62fb1..b90d0cbcea 100644 --- a/paddle/legacy/pserver/ParameterServerController.h +++ b/paddle/legacy/pserver/ParameterServerController.h @@ -17,7 +17,7 @@ limitations under the License. */ #include "ParameterServer2.h" #include "ParameterServerConfig.pb.h" #include "RDMANetwork.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/utils/StringUtil.h" namespace paddle { diff --git a/paddle/legacy/pserver/RDMANetwork.h b/paddle/legacy/pserver/RDMANetwork.h index 83db6b9df7..c87056f72c 100644 --- a/paddle/legacy/pserver/RDMANetwork.h +++ b/paddle/legacy/pserver/RDMANetwork.h @@ -19,7 +19,7 @@ limitations under the License. */ #else #define PROMPT_ERR() LOG(FATAL) << "Paddle is not compiled with rdma" #endif -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include struct sxi_sock; diff --git a/paddle/legacy/pserver/SocketChannel.cpp b/paddle/legacy/pserver/SocketChannel.cpp index 72e6943408..79c763c62b 100644 --- a/paddle/legacy/pserver/SocketChannel.cpp +++ b/paddle/legacy/pserver/SocketChannel.cpp @@ -22,7 +22,7 @@ limitations under the License. */ #include #include "RDMANetwork.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/pserver/SocketChannel.h b/paddle/legacy/pserver/SocketChannel.h index 8b45ac5609..a7b3cd42f0 100644 --- a/paddle/legacy/pserver/SocketChannel.h +++ b/paddle/legacy/pserver/SocketChannel.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include diff --git a/paddle/legacy/pserver/SparseParameterDistribution.cpp b/paddle/legacy/pserver/SparseParameterDistribution.cpp index bb247f389c..3f17b228f0 100644 --- a/paddle/legacy/pserver/SparseParameterDistribution.cpp +++ b/paddle/legacy/pserver/SparseParameterDistribution.cpp @@ -14,9 +14,9 @@ limitations under the License. */ #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/utils/Flags.h" +#include "paddle/legacy/utils/Flags.h" #include "SparseParameterDistribution.h" diff --git a/paddle/legacy/pserver/SparseParameterDistribution.h b/paddle/legacy/pserver/SparseParameterDistribution.h index e168f36c75..ee78029958 100644 --- a/paddle/legacy/pserver/SparseParameterDistribution.h +++ b/paddle/legacy/pserver/SparseParameterDistribution.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/legacy/pserver/test/SocketTest.cpp b/paddle/legacy/pserver/test/SocketTest.cpp index bb9ee355dd..3a781fcbf6 100644 --- a/paddle/legacy/pserver/test/SocketTest.cpp +++ b/paddle/legacy/pserver/test/SocketTest.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include #include @@ -23,7 +23,7 @@ limitations under the License. */ #include #include "paddle/legacy/math/Vector.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" struct MessageHeader { int64_t dataLength; diff --git a/paddle/legacy/pserver/test/test_ParameterServer2.cpp b/paddle/legacy/pserver/test/test_ParameterServer2.cpp index 60419f3a4a..542e80e046 100644 --- a/paddle/legacy/pserver/test/test_ParameterServer2.cpp +++ b/paddle/legacy/pserver/test/test_ParameterServer2.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #include #include #include -#include -#include +#include +#include using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/legacy/pserver/test/test_ProtoServer.cpp b/paddle/legacy/pserver/test/test_ProtoServer.cpp index 8d5e26f995..f7ab2e8af4 100644 --- a/paddle/legacy/pserver/test/test_ProtoServer.cpp +++ b/paddle/legacy/pserver/test/test_ProtoServer.cpp @@ -17,8 +17,8 @@ limitations under the License. */ #include "ParameterService.pb.h" #include "paddle/legacy/math/Vector.h" #include "paddle/legacy/pserver/ProtoServer.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" DEFINE_string(server_addr, "127.0.0.1", "Server address"); DEFINE_int64(dim, 50000000, "Data size"); diff --git a/paddle/legacy/trainer/MergeModel.cpp b/paddle/legacy/trainer/MergeModel.cpp index 6624d6d27b..8a3601f192 100644 --- a/paddle/legacy/trainer/MergeModel.cpp +++ b/paddle/legacy/trainer/MergeModel.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include "ParamUtil.h" #include "Trainer.h" #include "paddle/legacy/pserver/ParameterServer2.h" -#include "paddle/utils/PythonUtil.h" +#include "paddle/legacy/utils/PythonUtil.h" DEFINE_string(model_dir, "", "Directory for separated model files"); DEFINE_string(config_file, "", "Config file for the model"); diff --git a/paddle/legacy/trainer/NewRemoteParameterUpdater.cpp b/paddle/legacy/trainer/NewRemoteParameterUpdater.cpp index 410ac6d95c..cdd832acd1 100644 --- a/paddle/legacy/trainer/NewRemoteParameterUpdater.cpp +++ b/paddle/legacy/trainer/NewRemoteParameterUpdater.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "NewRemoteParameterUpdater.h" #include "Trainer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" DECLARE_int32(trainer_id); DECLARE_string(save_dir); diff --git a/paddle/legacy/trainer/NewRemoteParameterUpdater.h b/paddle/legacy/trainer/NewRemoteParameterUpdater.h index 33c1fa7bdf..707e9ceb9b 100644 --- a/paddle/legacy/trainer/NewRemoteParameterUpdater.h +++ b/paddle/legacy/trainer/NewRemoteParameterUpdater.h @@ -20,8 +20,8 @@ limitations under the License. */ #include "ParameterUpdater.h" #include "libpaddle_pserver_cclient.h" #include "paddle/legacy/pserver/ParameterClient2.h" -#include "paddle/utils/Queue.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Queue.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/trainer/ParamUtil.cpp b/paddle/legacy/trainer/ParamUtil.cpp index b577e3e868..b5aba32dee 100644 --- a/paddle/legacy/trainer/ParamUtil.cpp +++ b/paddle/legacy/trainer/ParamUtil.cpp @@ -23,12 +23,12 @@ limitations under the License. */ #include #include -#include +#include -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "TesterConfig.h" #include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" diff --git a/paddle/legacy/trainer/ParamUtil.h b/paddle/legacy/trainer/ParamUtil.h index c34e079b90..0778696776 100644 --- a/paddle/legacy/trainer/ParamUtil.h +++ b/paddle/legacy/trainer/ParamUtil.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include diff --git a/paddle/legacy/trainer/ParameterUpdater.cpp b/paddle/legacy/trainer/ParameterUpdater.cpp index 4e9e890c85..549fb0332d 100644 --- a/paddle/legacy/trainer/ParameterUpdater.cpp +++ b/paddle/legacy/trainer/ParameterUpdater.cpp @@ -14,9 +14,9 @@ limitations under the License. */ #include "ParameterUpdater.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/utils/Thread.h" +#include "paddle/legacy/utils/Thread.h" namespace paddle { diff --git a/paddle/legacy/trainer/ParameterUpdater.h b/paddle/legacy/trainer/ParameterUpdater.h index 0070254d1c..acddc3702d 100644 --- a/paddle/legacy/trainer/ParameterUpdater.h +++ b/paddle/legacy/trainer/ParameterUpdater.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Thread.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Thread.h" +#include "paddle/legacy/utils/Util.h" #include "paddle/legacy/parameter/AverageOptimizer.h" #include "paddle/legacy/parameter/FirstOrderOptimizer.h" diff --git a/paddle/legacy/trainer/RemoteParameterUpdater.cpp b/paddle/legacy/trainer/RemoteParameterUpdater.cpp index 7314266cb2..5de1cc7827 100644 --- a/paddle/legacy/trainer/RemoteParameterUpdater.cpp +++ b/paddle/legacy/trainer/RemoteParameterUpdater.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "RemoteParameterUpdater.h" #include "Trainer.h" -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/Stat.h" DECLARE_int32(trainer_id); DECLARE_string(save_dir); diff --git a/paddle/legacy/trainer/RemoteParameterUpdater.h b/paddle/legacy/trainer/RemoteParameterUpdater.h index 7a9b687ac2..6846853298 100644 --- a/paddle/legacy/trainer/RemoteParameterUpdater.h +++ b/paddle/legacy/trainer/RemoteParameterUpdater.h @@ -18,8 +18,8 @@ limitations under the License. */ #include #include "ParameterUpdater.h" #include "paddle/legacy/pserver/ParameterClient2.h" -#include "paddle/utils/Queue.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Queue.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/legacy/trainer/Tester.cpp b/paddle/legacy/trainer/Tester.cpp index f7daf1327b..d977ca9657 100644 --- a/paddle/legacy/trainer/Tester.cpp +++ b/paddle/legacy/trainer/Tester.cpp @@ -24,10 +24,10 @@ limitations under the License. */ #include -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "TesterConfig.h" #include "paddle/legacy/gserver/gradientmachines/GradientMachineMode.h" diff --git a/paddle/legacy/trainer/Tester.h b/paddle/legacy/trainer/Tester.h index bce9775a09..a298602d1d 100644 --- a/paddle/legacy/trainer/Tester.h +++ b/paddle/legacy/trainer/Tester.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include diff --git a/paddle/legacy/trainer/TesterConfig.h b/paddle/legacy/trainer/TesterConfig.h index ef10c7dbf7..6c78f7cda3 100644 --- a/paddle/legacy/trainer/TesterConfig.h +++ b/paddle/legacy/trainer/TesterConfig.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include diff --git a/paddle/legacy/trainer/ThreadParameterUpdater.cpp b/paddle/legacy/trainer/ThreadParameterUpdater.cpp index 39e63c333e..0601bdf24e 100644 --- a/paddle/legacy/trainer/ThreadParameterUpdater.cpp +++ b/paddle/legacy/trainer/ThreadParameterUpdater.cpp @@ -14,11 +14,11 @@ limitations under the License. */ #include "ThreadParameterUpdater.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "paddle/legacy/math/SparseRowMatrix.h" #include "paddle/legacy/parameter/ThreadLocalBuffer.h" -#include "paddle/utils/Thread.h" +#include "paddle/legacy/utils/Thread.h" DECLARE_int32(trainer_count); diff --git a/paddle/legacy/trainer/ThreadParameterUpdater.h b/paddle/legacy/trainer/ThreadParameterUpdater.h index bd0ce99078..172287d4eb 100644 --- a/paddle/legacy/trainer/ThreadParameterUpdater.h +++ b/paddle/legacy/trainer/ThreadParameterUpdater.h @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/legacy/parameter/OptimizerWithRegularizer.h" #include "paddle/legacy/parameter/Parameter.h" #include "paddle/legacy/parameter/Regularizer.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include #include diff --git a/paddle/legacy/trainer/Trainer.cpp b/paddle/legacy/trainer/Trainer.cpp index edfd72197e..2db754793c 100644 --- a/paddle/legacy/trainer/Trainer.cpp +++ b/paddle/legacy/trainer/Trainer.cpp @@ -23,11 +23,11 @@ limitations under the License. */ #include -#include "paddle/utils/Common.h" -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "RemoteParameterUpdater.h" #include "TesterConfig.h" diff --git a/paddle/legacy/trainer/Trainer.h b/paddle/legacy/trainer/Trainer.h index 58acec1781..b467f9af0c 100644 --- a/paddle/legacy/trainer/Trainer.h +++ b/paddle/legacy/trainer/Trainer.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include diff --git a/paddle/legacy/trainer/TrainerBenchmark.cpp b/paddle/legacy/trainer/TrainerBenchmark.cpp index 173653c816..7f5bd23354 100644 --- a/paddle/legacy/trainer/TrainerBenchmark.cpp +++ b/paddle/legacy/trainer/TrainerBenchmark.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #undef PADDLE_DISABLE_TIMER #include "Trainer.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" DECLARE_int32(test_period); diff --git a/paddle/legacy/trainer/TrainerConfigHelper.cpp b/paddle/legacy/trainer/TrainerConfigHelper.cpp index 2b68d89e48..4d31ba8d71 100644 --- a/paddle/legacy/trainer/TrainerConfigHelper.cpp +++ b/paddle/legacy/trainer/TrainerConfigHelper.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #include "TrainerConfigHelper.h" #include "ParamUtil.h" #include "TrainerConfig.pb.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/PythonUtil.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/PythonUtil.h" DECLARE_string(config); DECLARE_string(init_model_path); diff --git a/paddle/legacy/trainer/TrainerConfigHelper.h b/paddle/legacy/trainer/TrainerConfigHelper.h index b21dda964e..0e428bea2c 100644 --- a/paddle/legacy/trainer/TrainerConfigHelper.h +++ b/paddle/legacy/trainer/TrainerConfigHelper.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include -#include +#include +#include #include namespace paddle { diff --git a/paddle/legacy/trainer/TrainerInternal.cpp b/paddle/legacy/trainer/TrainerInternal.cpp index b4b1a87cd5..ee3dea6340 100644 --- a/paddle/legacy/trainer/TrainerInternal.cpp +++ b/paddle/legacy/trainer/TrainerInternal.cpp @@ -26,10 +26,10 @@ limitations under the License. */ #include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" #include "paddle/legacy/gserver/layers/ValidationLayer.h" -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "RemoteParameterUpdater.h" #include "ThreadParameterUpdater.h" diff --git a/paddle/legacy/trainer/TrainerInternal.h b/paddle/legacy/trainer/TrainerInternal.h index ecc87966dc..93919a68fc 100644 --- a/paddle/legacy/trainer/TrainerInternal.h +++ b/paddle/legacy/trainer/TrainerInternal.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include #include diff --git a/paddle/legacy/trainer/TrainerInternalConfig.h b/paddle/legacy/trainer/TrainerInternalConfig.h index 29d588e1be..b91b539323 100644 --- a/paddle/legacy/trainer/TrainerInternalConfig.h +++ b/paddle/legacy/trainer/TrainerInternalConfig.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include diff --git a/paddle/legacy/trainer/TrainerMain.cpp b/paddle/legacy/trainer/TrainerMain.cpp index 115e5d88a2..911aeba192 100644 --- a/paddle/legacy/trainer/TrainerMain.cpp +++ b/paddle/legacy/trainer/TrainerMain.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "paddle/legacy/pserver/ParameterServerController.h" -#include "paddle/utils/PythonUtil.h" +#include "paddle/legacy/utils/PythonUtil.h" #include "ParamUtil.h" #include "Trainer.h" diff --git a/paddle/legacy/trainer/tests/test_Compare.cpp b/paddle/legacy/trainer/tests/test_Compare.cpp index 9623c280eb..9bbb0a6013 100644 --- a/paddle/legacy/trainer/tests/test_Compare.cpp +++ b/paddle/legacy/trainer/tests/test_Compare.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include "paddle/legacy/trainer/Trainer.h" diff --git a/paddle/legacy/trainer/tests/test_PyDataProviderWrapper.cpp b/paddle/legacy/trainer/tests/test_PyDataProviderWrapper.cpp index 94eaba2e2f..847adcfaba 100644 --- a/paddle/legacy/trainer/tests/test_PyDataProviderWrapper.cpp +++ b/paddle/legacy/trainer/tests/test_PyDataProviderWrapper.cpp @@ -18,7 +18,7 @@ limitations under the License. */ #include #include #include -#include +#include #include #include #include diff --git a/paddle/legacy/trainer/tests/test_Trainer.cpp b/paddle/legacy/trainer/tests/test_Trainer.cpp index 9fb80762fe..14ad0a2652 100644 --- a/paddle/legacy/trainer/tests/test_Trainer.cpp +++ b/paddle/legacy/trainer/tests/test_Trainer.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include -#include +#include +#include #include "paddle/legacy/trainer/Trainer.h" #include diff --git a/paddle/legacy/trainer/tests/test_TrainerOnePass.cpp b/paddle/legacy/trainer/tests/test_TrainerOnePass.cpp index 0e25e35443..3e5c5ea723 100644 --- a/paddle/legacy/trainer/tests/test_TrainerOnePass.cpp +++ b/paddle/legacy/trainer/tests/test_TrainerOnePass.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include -#include +#include +#include #include "paddle/legacy/trainer/Trainer.h" #include "paddle/legacy/trainer/TrainerInternal.h" diff --git a/paddle/legacy/trainer/tests/test_recurrent_machine_generation.cpp b/paddle/legacy/trainer/tests/test_recurrent_machine_generation.cpp index bd6ee0f01d..47b4e82cd3 100644 --- a/paddle/legacy/trainer/tests/test_recurrent_machine_generation.cpp +++ b/paddle/legacy/trainer/tests/test_recurrent_machine_generation.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include #include -#include +#include #include diff --git a/paddle/utils/.gitignore b/paddle/legacy/utils/.gitignore similarity index 100% rename from paddle/utils/.gitignore rename to paddle/legacy/utils/.gitignore diff --git a/paddle/utils/Any.h b/paddle/legacy/utils/Any.h similarity index 100% rename from paddle/utils/Any.h rename to paddle/legacy/utils/Any.h diff --git a/paddle/utils/CMakeLists.txt b/paddle/legacy/utils/CMakeLists.txt similarity index 100% rename from paddle/utils/CMakeLists.txt rename to paddle/legacy/utils/CMakeLists.txt diff --git a/paddle/utils/ClassRegistrar.h b/paddle/legacy/utils/ClassRegistrar.h similarity index 100% rename from paddle/utils/ClassRegistrar.h rename to paddle/legacy/utils/ClassRegistrar.h diff --git a/paddle/utils/Common.h b/paddle/legacy/utils/Common.h similarity index 100% rename from paddle/utils/Common.h rename to paddle/legacy/utils/Common.h diff --git a/paddle/utils/CpuId.cpp b/paddle/legacy/utils/CpuId.cpp similarity index 96% rename from paddle/utils/CpuId.cpp rename to paddle/legacy/utils/CpuId.cpp index 7186feef04..66e7c6606f 100644 --- a/paddle/utils/CpuId.cpp +++ b/paddle/legacy/utils/CpuId.cpp @@ -9,8 +9,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/CpuId.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/CpuId.h" +#include "paddle/legacy/utils/Util.h" #ifdef _WIN32 diff --git a/paddle/utils/CpuId.h b/paddle/legacy/utils/CpuId.h similarity index 100% rename from paddle/utils/CpuId.h rename to paddle/legacy/utils/CpuId.h diff --git a/paddle/utils/CustomStackTrace.cpp b/paddle/legacy/utils/CustomStackTrace.cpp similarity index 100% rename from paddle/utils/CustomStackTrace.cpp rename to paddle/legacy/utils/CustomStackTrace.cpp diff --git a/paddle/utils/CustomStackTrace.h b/paddle/legacy/utils/CustomStackTrace.h similarity index 100% rename from paddle/utils/CustomStackTrace.h rename to paddle/legacy/utils/CustomStackTrace.h diff --git a/paddle/utils/DynamicLoader.cpp b/paddle/legacy/utils/DynamicLoader.cpp similarity index 100% rename from paddle/utils/DynamicLoader.cpp rename to paddle/legacy/utils/DynamicLoader.cpp diff --git a/paddle/utils/DynamicLoader.h b/paddle/legacy/utils/DynamicLoader.h similarity index 100% rename from paddle/utils/DynamicLoader.h rename to paddle/legacy/utils/DynamicLoader.h diff --git a/paddle/utils/Error.h b/paddle/legacy/utils/Error.h similarity index 100% rename from paddle/utils/Error.h rename to paddle/legacy/utils/Error.h diff --git a/paddle/utils/Excepts.h b/paddle/legacy/utils/Excepts.h similarity index 100% rename from paddle/utils/Excepts.h rename to paddle/legacy/utils/Excepts.h diff --git a/paddle/utils/Flags.cpp b/paddle/legacy/utils/Flags.cpp similarity index 100% rename from paddle/utils/Flags.cpp rename to paddle/legacy/utils/Flags.cpp diff --git a/paddle/utils/Flags.h b/paddle/legacy/utils/Flags.h similarity index 100% rename from paddle/utils/Flags.h rename to paddle/legacy/utils/Flags.h diff --git a/paddle/utils/GlobalConstants.cpp b/paddle/legacy/utils/GlobalConstants.cpp similarity index 100% rename from paddle/utils/GlobalConstants.cpp rename to paddle/legacy/utils/GlobalConstants.cpp diff --git a/paddle/utils/GlobalConstants.h b/paddle/legacy/utils/GlobalConstants.h similarity index 100% rename from paddle/utils/GlobalConstants.h rename to paddle/legacy/utils/GlobalConstants.h diff --git a/paddle/utils/Locks.h b/paddle/legacy/utils/Locks.h similarity index 100% rename from paddle/utils/Locks.h rename to paddle/legacy/utils/Locks.h diff --git a/paddle/utils/Logging.cpp b/paddle/legacy/utils/Logging.cpp similarity index 100% rename from paddle/utils/Logging.cpp rename to paddle/legacy/utils/Logging.cpp diff --git a/paddle/utils/Logging.h b/paddle/legacy/utils/Logging.h similarity index 100% rename from paddle/utils/Logging.h rename to paddle/legacy/utils/Logging.h diff --git a/paddle/utils/PythonUtil.cpp b/paddle/legacy/utils/PythonUtil.cpp similarity index 100% rename from paddle/utils/PythonUtil.cpp rename to paddle/legacy/utils/PythonUtil.cpp diff --git a/paddle/utils/PythonUtil.h b/paddle/legacy/utils/PythonUtil.h similarity index 99% rename from paddle/utils/PythonUtil.h rename to paddle/legacy/utils/PythonUtil.h index 6f8d7e0930..b0c8612c37 100644 --- a/paddle/utils/PythonUtil.h +++ b/paddle/legacy/utils/PythonUtil.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once // clang-format off -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #ifndef PADDLE_NO_PYTHON // must include the following two blocks, otherwise, diff --git a/paddle/utils/Queue.h b/paddle/legacy/utils/Queue.h similarity index 100% rename from paddle/utils/Queue.h rename to paddle/legacy/utils/Queue.h diff --git a/paddle/utils/Stat.cpp b/paddle/legacy/utils/Stat.cpp similarity index 100% rename from paddle/utils/Stat.cpp rename to paddle/legacy/utils/Stat.cpp diff --git a/paddle/utils/Stat.h b/paddle/legacy/utils/Stat.h similarity index 100% rename from paddle/utils/Stat.h rename to paddle/legacy/utils/Stat.h diff --git a/paddle/utils/StringUtil.cpp b/paddle/legacy/utils/StringUtil.cpp similarity index 100% rename from paddle/utils/StringUtil.cpp rename to paddle/legacy/utils/StringUtil.cpp diff --git a/paddle/utils/StringUtil.h b/paddle/legacy/utils/StringUtil.h similarity index 100% rename from paddle/utils/StringUtil.h rename to paddle/legacy/utils/StringUtil.h diff --git a/paddle/utils/Thread.h b/paddle/legacy/utils/Thread.h similarity index 100% rename from paddle/utils/Thread.h rename to paddle/legacy/utils/Thread.h diff --git a/paddle/utils/ThreadLocal.cpp b/paddle/legacy/utils/ThreadLocal.cpp similarity index 100% rename from paddle/utils/ThreadLocal.cpp rename to paddle/legacy/utils/ThreadLocal.cpp diff --git a/paddle/utils/ThreadLocal.h b/paddle/legacy/utils/ThreadLocal.h similarity index 100% rename from paddle/utils/ThreadLocal.h rename to paddle/legacy/utils/ThreadLocal.h diff --git a/paddle/utils/Util.cpp b/paddle/legacy/utils/Util.cpp similarity index 100% rename from paddle/utils/Util.cpp rename to paddle/legacy/utils/Util.cpp diff --git a/paddle/utils/Util.h b/paddle/legacy/utils/Util.h similarity index 100% rename from paddle/utils/Util.h rename to paddle/legacy/utils/Util.h diff --git a/paddle/utils/Version.cpp b/paddle/legacy/utils/Version.cpp similarity index 100% rename from paddle/utils/Version.cpp rename to paddle/legacy/utils/Version.cpp diff --git a/paddle/utils/Version.h b/paddle/legacy/utils/Version.h similarity index 100% rename from paddle/utils/Version.h rename to paddle/legacy/utils/Version.h diff --git a/paddle/utils/arch/linux/Locks.cpp b/paddle/legacy/utils/arch/linux/Locks.cpp similarity index 97% rename from paddle/utils/arch/linux/Locks.cpp rename to paddle/legacy/utils/arch/linux/Locks.cpp index 409af8bce3..32d351e332 100644 --- a/paddle/utils/arch/linux/Locks.cpp +++ b/paddle/legacy/utils/arch/linux/Locks.cpp @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Locks.h" +#include "paddle/legacy/utils/Locks.h" #include #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { class SemaphorePrivate { diff --git a/paddle/utils/arch/osx/Excepts.cpp b/paddle/legacy/utils/arch/osx/Excepts.cpp similarity index 97% rename from paddle/utils/arch/osx/Excepts.cpp rename to paddle/legacy/utils/arch/osx/Excepts.cpp index ac44461578..2b7d6dca84 100644 --- a/paddle/utils/arch/osx/Excepts.cpp +++ b/paddle/legacy/utils/arch/osx/Excepts.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Excepts.h" +#include "paddle/legacy/utils/Excepts.h" #if defined(__APPLE__) || defined(__OSX__) #if defined(__arm__) || defined(__arm64__) diff --git a/paddle/utils/arch/osx/Locks.cpp b/paddle/legacy/utils/arch/osx/Locks.cpp similarity index 97% rename from paddle/utils/arch/osx/Locks.cpp rename to paddle/legacy/utils/arch/osx/Locks.cpp index f3905091bd..b68c48f0c3 100644 --- a/paddle/utils/arch/osx/Locks.cpp +++ b/paddle/legacy/utils/arch/osx/Locks.cpp @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Locks.h" +#include "paddle/legacy/utils/Locks.h" #include #include #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/utils/enable_virtualenv.py b/paddle/legacy/utils/enable_virtualenv.py similarity index 100% rename from paddle/utils/enable_virtualenv.py rename to paddle/legacy/utils/enable_virtualenv.py diff --git a/paddle/utils/tests/CMakeLists.txt b/paddle/legacy/utils/tests/CMakeLists.txt similarity index 84% rename from paddle/utils/tests/CMakeLists.txt rename to paddle/legacy/utils/tests/CMakeLists.txt index c770ce1698..4af01db5c8 100644 --- a/paddle/utils/tests/CMakeLists.txt +++ b/paddle/legacy/utils/tests/CMakeLists.txt @@ -13,6 +13,6 @@ add_executable( link_paddle_exe(test_CustomStackTracePrint) if(NOT APPLE) add_test(NAME test_CustomStackTracePrint - COMMAND ${PADDLE_SOURCE_DIR}/paddle/utils/tests/test_CustomStackTracePrint.sh + COMMAND ${PADDLE_SOURCE_DIR}/paddle/legacy/utils/tests/test_CustomStackTracePrint.sh WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() diff --git a/paddle/utils/tests/test_CustomStackTrace.cpp b/paddle/legacy/utils/tests/test_CustomStackTrace.cpp similarity index 94% rename from paddle/utils/tests/test_CustomStackTrace.cpp rename to paddle/legacy/utils/tests/test_CustomStackTrace.cpp index 4d5540b24c..2a418e3ae2 100644 --- a/paddle/utils/tests/test_CustomStackTrace.cpp +++ b/paddle/legacy/utils/tests/test_CustomStackTrace.cpp @@ -15,10 +15,10 @@ limitations under the License. */ #include // NOLINT #include // NOLINT -#include "paddle/utils/CustomStackTrace.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/StringUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/CustomStackTrace.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/StringUtil.h" +#include "paddle/legacy/utils/Util.h" DEFINE_int32(test_thread_num, 10, "testing thread number"); diff --git a/paddle/utils/tests/test_CustomStackTracePrint.cpp b/paddle/legacy/utils/tests/test_CustomStackTracePrint.cpp similarity index 86% rename from paddle/utils/tests/test_CustomStackTracePrint.cpp rename to paddle/legacy/utils/tests/test_CustomStackTracePrint.cpp index 360c61c88a..78886a3ed9 100644 --- a/paddle/utils/tests/test_CustomStackTracePrint.cpp +++ b/paddle/legacy/utils/tests/test_CustomStackTracePrint.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/CustomStackTrace.h" -#include "paddle/utils/StringUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/CustomStackTrace.h" +#include "paddle/legacy/utils/StringUtil.h" +#include "paddle/legacy/utils/Util.h" int main(int argc, char** argv) { paddle::initMain(argc, argv); diff --git a/paddle/utils/tests/test_CustomStackTracePrint.sh b/paddle/legacy/utils/tests/test_CustomStackTracePrint.sh similarity index 100% rename from paddle/utils/tests/test_CustomStackTracePrint.sh rename to paddle/legacy/utils/tests/test_CustomStackTracePrint.sh diff --git a/paddle/utils/tests/test_Error.cpp b/paddle/legacy/utils/tests/test_Error.cpp similarity index 96% rename from paddle/utils/tests/test_Error.cpp rename to paddle/legacy/utils/tests/test_Error.cpp index 6f311fa6b8..250c4d58a6 100644 --- a/paddle/utils/tests/test_Error.cpp +++ b/paddle/legacy/utils/tests/test_Error.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Error.h" +#include "paddle/legacy/utils/Error.h" #include diff --git a/paddle/utils/tests/test_SIMDFlags.cpp b/paddle/legacy/utils/tests/test_SIMDFlags.cpp similarity index 94% rename from paddle/utils/tests/test_SIMDFlags.cpp rename to paddle/legacy/utils/tests/test_SIMDFlags.cpp index a808d456a6..6362210acd 100644 --- a/paddle/utils/tests/test_SIMDFlags.cpp +++ b/paddle/legacy/utils/tests/test_SIMDFlags.cpp @@ -11,9 +11,9 @@ limitations under the License. */ #include -#include "paddle/utils/CpuId.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/CpuId.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Util.h" using namespace paddle; // NOLINT diff --git a/paddle/utils/tests/test_SpinLock.cpp b/paddle/legacy/utils/tests/test_SpinLock.cpp similarity index 93% rename from paddle/utils/tests/test_SpinLock.cpp rename to paddle/legacy/utils/tests/test_SpinLock.cpp index cc34eb1f86..4cd7836d6a 100644 --- a/paddle/utils/tests/test_SpinLock.cpp +++ b/paddle/legacy/utils/tests/test_SpinLock.cpp @@ -17,9 +17,9 @@ limitations under the License. */ #include #include -#include "paddle/utils/Locks.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Util.h" DEFINE_int32(test_thread_num, 100, "testing thread number"); diff --git a/paddle/utils/tests/test_StringUtils.cpp b/paddle/legacy/utils/tests/test_StringUtils.cpp similarity index 95% rename from paddle/utils/tests/test_StringUtils.cpp rename to paddle/legacy/utils/tests/test_StringUtils.cpp index 248f58a7f2..61d2815f09 100644 --- a/paddle/utils/tests/test_StringUtils.cpp +++ b/paddle/legacy/utils/tests/test_StringUtils.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/utils/StringUtil.h" #include diff --git a/paddle/utils/tests/test_Thread.cpp b/paddle/legacy/utils/tests/test_Thread.cpp similarity index 98% rename from paddle/utils/tests/test_Thread.cpp rename to paddle/legacy/utils/tests/test_Thread.cpp index 6e2580c491..5e07da3236 100644 --- a/paddle/utils/tests/test_Thread.cpp +++ b/paddle/legacy/utils/tests/test_Thread.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include +#include #include using paddle::AsyncThreadPool; // NOLINT diff --git a/paddle/utils/tests/test_ThreadBarrier.cpp b/paddle/legacy/utils/tests/test_ThreadBarrier.cpp similarity index 94% rename from paddle/utils/tests/test_ThreadBarrier.cpp rename to paddle/legacy/utils/tests/test_ThreadBarrier.cpp index 554b1c1d4a..9c8851ae21 100644 --- a/paddle/utils/tests/test_ThreadBarrier.cpp +++ b/paddle/legacy/utils/tests/test_ThreadBarrier.cpp @@ -18,9 +18,9 @@ limitations under the License. */ #include #include -#include "paddle/utils/Locks.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Util.h" DEFINE_int32(test_thread_num, 100, "testing thread number"); diff --git a/paddle/testing/TestMain.cpp b/paddle/testing/TestMain.cpp index 3e14532d18..1811dbbd1a 100644 --- a/paddle/testing/TestMain.cpp +++ b/paddle/testing/TestMain.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); diff --git a/proto/README.md b/proto/README.md new file mode 100644 index 0000000000..dda7ed7b3c --- /dev/null +++ b/proto/README.md @@ -0,0 +1,3 @@ +## protos in this folder are legacy v2 protos. + +## Please refer to paddle/fluid for latest version. diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index ea25f3ab35..797c0fbcc4 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,4 +1,4 @@ -file(GLOB UTILS_PY_FILES . ./paddle/utils/*.py) +file(GLOB UTILS_PY_FILES . ./paddle/legacy/utils/*.py) file(GLOB_RECURSE FLUID_PY_FILES ./paddle/fluid/*.py) set(PY_FILES paddle/__init__.py ${UTILS_PY_FILES} diff --git a/tools/codestyle/cpplint_pre_commit.hook b/tools/codestyle/cpplint_pre_commit.hook index f4190fb876..2c65222c8a 100755 --- a/tools/codestyle/cpplint_pre_commit.hook +++ b/tools/codestyle/cpplint_pre_commit.hook @@ -4,7 +4,7 @@ TOTAL_ERRORS=0 # The trick to remove deleted files: https://stackoverflow.com/a/2413151 for file in $(git diff --cached --name-status | awk '$1 != "D" {print $2}'); do - if [[ $file =~ ^(paddle/legacy/api/.*|paddle/legacy/capi/.*|paddle/contrib/.*|paddle/legacy/cuda/.*|paddle/legacy/function/.*|paddle/legacy/gserver/.*|paddle/legacy/math/.*|paddle/legacy/optimizer/.*|paddle/legacy/parameter/.*|paddle/legacy/pserver/.*|paddle/legacy/trainer/.*|paddle/utils/.*|paddle/testing/TestUtil.*) ]]; then + if [[ $file =~ ^(paddle/legacy/api/.*|paddle/legacy/capi/.*|paddle/contrib/.*|paddle/legacy/cuda/.*|paddle/legacy/function/.*|paddle/legacy/gserver/.*|paddle/legacy/math/.*|paddle/legacy/optimizer/.*|paddle/legacy/parameter/.*|paddle/legacy/pserver/.*|paddle/legacy/trainer/.*|paddle/legacy/utils/.*|paddle/testing/TestUtil.*) ]]; then continue; else cpplint --filter=-readability/fn_size $file; From d70a38d8ec08e6dd1093672ddcd47d36a4578a5e Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 3 Jul 2018 15:09:29 +0800 Subject: [PATCH 30/34] fix --- go/CMakeLists.txt | 2 +- go/cmd/master/master.go | 2 +- go/pserver/etcd_client.go | 2 +- .../inference/paddle_inference_api_tensorrt_subgraph_engine.cc | 2 +- paddle/fluid/inference/tensorrt/convert/io_converter.h | 2 +- paddle/fluid/inference/tensorrt/convert/op_converter.h | 2 +- paddle/fluid/inference/tensorrt/convert/ut_helper.h | 2 +- paddle/fluid/inference/tensorrt/engine.h | 2 +- paddle/fluid/operators/tensorrt_engine_op.cc | 2 +- paddle/legacy/trainer/tests/test_Compare.cpp | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/go/CMakeLists.txt b/go/CMakeLists.txt index 839b75a259..f3a9296c2c 100644 --- a/go/CMakeLists.txt +++ b/go/CMakeLists.txt @@ -20,4 +20,4 @@ add_subdirectory(master/c) add_subdirectory(master) add_subdirectory(pserver) add_subdirectory(pserver/client) -add_subdirectory(legacy/utils/networkhelper) +add_subdirectory(utils/networkhelper) diff --git a/go/cmd/master/master.go b/go/cmd/master/master.go index 6c1e4c7198..537df59c86 100644 --- a/go/cmd/master/master.go +++ b/go/cmd/master/master.go @@ -28,8 +28,8 @@ import ( log "github.com/inconshreveable/log15" "github.com/namsral/flag" - "github.com/PaddlePaddle/Paddle/go/legacy/utils/networkhelper" "github.com/PaddlePaddle/Paddle/go/master" + "github.com/PaddlePaddle/Paddle/go/utils/networkhelper" ) func main() { diff --git a/go/pserver/etcd_client.go b/go/pserver/etcd_client.go index 80b1abee5e..719013b1bb 100644 --- a/go/pserver/etcd_client.go +++ b/go/pserver/etcd_client.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "github.com/PaddlePaddle/Paddle/go/legacy/utils/networkhelper" + "github.com/PaddlePaddle/Paddle/go/utils/networkhelper" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/clientv3/concurrency" log "github.com/inconshreveable/log15" diff --git a/paddle/contrib/inference/paddle_inference_api_tensorrt_subgraph_engine.cc b/paddle/contrib/inference/paddle_inference_api_tensorrt_subgraph_engine.cc index 14554545d9..a11396cee9 100644 --- a/paddle/contrib/inference/paddle_inference_api_tensorrt_subgraph_engine.cc +++ b/paddle/contrib/inference/paddle_inference_api_tensorrt_subgraph_engine.cc @@ -15,7 +15,7 @@ #include "paddle/contrib/inference/paddle_inference_api.h" #include "paddle/contrib/inference/paddle_inference_api_impl.h" #include "paddle/fluid/inference/analysis/analyzer.h" -#include "paddle/fluid/inference/legacy/utils/singleton.h" +#include "paddle/fluid/inference/utils/singleton.h" namespace paddle { diff --git a/paddle/fluid/inference/tensorrt/convert/io_converter.h b/paddle/fluid/inference/tensorrt/convert/io_converter.h index fc8881f80c..71c48e085d 100644 --- a/paddle/fluid/inference/tensorrt/convert/io_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/io_converter.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/inference/legacy/utils/singleton.h" +#include "paddle/fluid/inference/utils/singleton.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index bf4e07fed0..6697952051 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -19,8 +19,8 @@ limitations under the License. */ #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/inference/legacy/utils/singleton.h" #include "paddle/fluid/inference/tensorrt/engine.h" +#include "paddle/fluid/inference/utils/singleton.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/tensorrt/convert/ut_helper.h b/paddle/fluid/inference/tensorrt/convert/ut_helper.h index 0003b16d4e..3b1f531adc 100644 --- a/paddle/fluid/inference/tensorrt/convert/ut_helper.h +++ b/paddle/fluid/inference/tensorrt/convert/ut_helper.h @@ -25,9 +25,9 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/inference/analysis/helper.h" -#include "paddle/fluid/inference/legacy/utils/singleton.h" #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" #include "paddle/fluid/inference/tensorrt/engine.h" +#include "paddle/fluid/inference/utils/singleton.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index 42a596deb1..b06a9bbc67 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -20,8 +20,8 @@ limitations under the License. */ #include #include #include "paddle/fluid/inference/engine.h" -#include "paddle/fluid/inference/legacy/utils/singleton.h" #include "paddle/fluid/inference/tensorrt/helper.h" +#include "paddle/fluid/inference/utils/singleton.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/operators/tensorrt_engine_op.cc b/paddle/fluid/operators/tensorrt_engine_op.cc index b5d0578831..647cfc0a0a 100644 --- a/paddle/fluid/operators/tensorrt_engine_op.cc +++ b/paddle/fluid/operators/tensorrt_engine_op.cc @@ -18,9 +18,9 @@ #include #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/inference/legacy/utils/singleton.h" #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" #include "paddle/fluid/inference/tensorrt/engine.h" +#include "paddle/fluid/inference/utils/singleton.h" #include "paddle/fluid/operators/tensorrt_engine_op.h" namespace paddle { diff --git a/paddle/legacy/trainer/tests/test_Compare.cpp b/paddle/legacy/trainer/tests/test_Compare.cpp index 9bbb0a6013..e37e546be8 100644 --- a/paddle/legacy/trainer/tests/test_Compare.cpp +++ b/paddle/legacy/trainer/tests/test_Compare.cpp @@ -23,7 +23,7 @@ using namespace paddle; // NOLINT using namespace std; // NOLINT static const string& configFile = - "./legacy/trainer/tests/sample_trainer_config.conf"; + "legacy/trainer/tests/sample_trainer_config.conf"; DECLARE_int32(gpu_id); DECLARE_bool(use_gpu); From 2d0e5592b5438c9b6ba88723f1d5fd7e4436d4b5 Mon Sep 17 00:00:00 2001 From: yuyang18 Date: Tue, 3 Jul 2018 16:07:12 +0800 Subject: [PATCH 31/34] Use std::map for Place <--> DeviceContext --- paddle/fluid/framework/details/op_handle_base.cc | 10 ++-------- paddle/fluid/framework/details/op_handle_base.h | 6 ++---- .../fluid/framework/details/reduce_and_gather.h | 3 +-- paddle/fluid/platform/device_context.cc | 3 ++- paddle/fluid/platform/device_context.h | 8 +++----- paddle/fluid/platform/place.h | 15 +++------------ 6 files changed, 13 insertions(+), 32 deletions(-) diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index 3560fabb42..d80bdcf15d 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -124,16 +124,10 @@ void OpHandleBase::RunAndRecordEvent(const std::function &callback) { #ifdef PADDLE_WITH_CUDA if (!events_.empty()) { // Use event std::function method = callback; - // NOTE(zcd): device context must be ordered here because RecordEvent - // will use a mutex to ensure the safe of multi-threads. - std::map ordered_ctxes; for (auto &p : dev_ctxes_) { - ordered_ctxes.emplace(p.second, p.first); - } - for (auto &p : ordered_ctxes) { method = [method, p, this]() { - static_cast(p.first)->RecordEvent( - events_.at(boost::get(p.second).device), + static_cast(p.second)->RecordEvent( + events_.at(boost::get(p.first).device), method); }; } diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index fbd90a3296..6aec178831 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -13,9 +13,9 @@ // limitations under the License. #pragma once +#include #include #include - #include "paddle/fluid/framework/details/var_handle.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/macros.h" @@ -92,9 +92,7 @@ class OpHandleBase { std::vector inputs_; std::vector outputs_; - std::unordered_map - dev_ctxes_; + std::map dev_ctxes_; #ifdef PADDLE_WITH_CUDA std::unordered_map events_; diff --git a/paddle/fluid/framework/details/reduce_and_gather.h b/paddle/fluid/framework/details/reduce_and_gather.h index a6ffb37313..c0cd873a1d 100644 --- a/paddle/fluid/framework/details/reduce_and_gather.h +++ b/paddle/fluid/framework/details/reduce_and_gather.h @@ -54,8 +54,7 @@ struct ReduceLoDTensor { inline void GatherSelectedRows( const std::vector &src_selecte_rows_, const std::vector &in_places, - const std::unordered_map &dev_ctxes, + const std::map &dev_ctxes, const platform::Place &out_place, SelectedRows *dst_selecte_rows) { PADDLE_ENFORCE(!src_selecte_rows_.empty()); diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index 6c50ab2685..2cc26da013 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -10,6 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/platform/device_context.h" +#include #include #include #include @@ -35,7 +36,7 @@ DeviceContextPool::DeviceContextPool( const std::vector& places) { PADDLE_ENFORCE_GT(places.size(), 0); using PtrType = std::unique_ptr; - std::unordered_set set; + std::set set; for (auto& p : places) { set.insert(p); } diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index 292ffef1ae..88e0383146 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -27,12 +27,12 @@ limitations under the License. */ #include #endif +#include +#include "glog/logging.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/place.h" #include "unsupported/Eigen/CXX11/Tensor" -#include "glog/logging.h" - namespace paddle { namespace platform { @@ -201,9 +201,7 @@ class DeviceContextPool { private: static DeviceContextPool* pool; - std::unordered_map, PlaceHash> - device_contexts_; + std::map> device_contexts_; DISABLE_COPY_AND_ASSIGN(DeviceContextPool); }; diff --git a/paddle/fluid/platform/place.h b/paddle/fluid/platform/place.h index ad54a87899..e3ee504f3d 100644 --- a/paddle/fluid/platform/place.h +++ b/paddle/fluid/platform/place.h @@ -30,6 +30,7 @@ struct CPUPlace { // needed for variant equality comparison inline bool operator==(const CPUPlace &) const { return true; } inline bool operator!=(const CPUPlace &) const { return false; } + inline bool operator<(const CPUPlace &) const { return false; } }; struct CUDAPlace { @@ -42,6 +43,7 @@ struct CUDAPlace { return device == o.device; } inline bool operator!=(const CUDAPlace &o) const { return !(*this == o); } + inline bool operator<(const CUDAPlace &o) const { return device < o.device; } int device; }; @@ -52,6 +54,7 @@ struct CUDAPinnedPlace { // needed for variant equality comparison inline bool operator==(const CUDAPinnedPlace &) const { return true; } inline bool operator!=(const CUDAPinnedPlace &) const { return false; } + inline bool operator<(const CUDAPinnedPlace &) const { return false; } }; struct IsCUDAPlace : public boost::static_visitor { @@ -89,18 +92,6 @@ bool is_cuda_pinned_place(const Place &); bool places_are_same_class(const Place &, const Place &); bool is_same_place(const Place &, const Place &); -struct PlaceHash { - std::size_t operator()(const Place &p) const { - constexpr size_t num_dev_bits = 4; - std::hash ihash; - size_t dev_id = 0; - if (is_gpu_place(p)) { - dev_id = boost::get(p).device; - } - return ihash(dev_id << num_dev_bits | p.which()); - } -}; - std::ostream &operator<<(std::ostream &, const Place &); template From 0cefe857c48ca469f19164936531d52ba1278ecc Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 3 Jul 2018 17:03:29 +0800 Subject: [PATCH 32/34] add two versions of paddle.tgz for develop branch --- doc/v2/howto/capi/compile_paddle_lib_cn.md | 9 +++++++-- doc/v2/howto/capi/compile_paddle_lib_en.md | 9 +++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/doc/v2/howto/capi/compile_paddle_lib_cn.md b/doc/v2/howto/capi/compile_paddle_lib_cn.md index e223fd33a8..2c87e9afc6 100644 --- a/doc/v2/howto/capi/compile_paddle_lib_cn.md +++ b/doc/v2/howto/capi/compile_paddle_lib_cn.md @@ -18,7 +18,7 @@ cpu_avx_openblas -暂无 +paddle.tgz cpu_noavx_openblas @@ -35,7 +35,12 @@ cuda8.0_cudnn7_avx_mkl paddle.tgz - + + +cuda9.0_cudnn7_avx_mkl +paddle.tgz + + ### 从源码编译 diff --git a/doc/v2/howto/capi/compile_paddle_lib_en.md b/doc/v2/howto/capi/compile_paddle_lib_en.md index 6212a30811..3fa8a18a9f 100644 --- a/doc/v2/howto/capi/compile_paddle_lib_en.md +++ b/doc/v2/howto/capi/compile_paddle_lib_en.md @@ -17,7 +17,7 @@ cpu_avx_openblas -- +paddle.tgz cpu_noavx_openblas @@ -34,7 +34,12 @@ cuda8.0_cudnn7_avx_mkl paddle.tgz - + + +cuda9.0_cudnn7_avx_mkl +paddle.tgz + + ### From source From 93da8e27110c8ec045a65e76d18f4eac9fa6bad8 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 3 Jul 2018 20:12:04 +0800 Subject: [PATCH 33/34] update develop version --- python/setup.py.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/setup.py.in b/python/setup.py.in index d92abf6088..51380149d0 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -5,7 +5,7 @@ class BinaryDistribution(Distribution): return True MAJOR = 0 -MINOR = 11 +MINOR = 14 PATCH = 0 RC = 0 ISTAGED = False From 26ff5a53eeaf44856052b69b9b5d5bf6999376f1 Mon Sep 17 00:00:00 2001 From: Wu Yi Date: Tue, 3 Jul 2018 23:03:34 +0800 Subject: [PATCH 34/34] Add note when import core.so error (#11918) * add note when import core.so error * update --- python/paddle/fluid/framework.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 9dcd907451..93cd6b621a 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -19,7 +19,16 @@ import re import numpy as np import proto.framework_pb2 as framework_pb2 -from . import core +try: + from . import core +except ImportError, e: + raise ImportError( + """NOTE: You may need to run \"export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH\" + if you encounters \"libmkldnn.so not found\" errors. If you have python + installed in other directory, replace \"/usr/local/lib\" with your own + directory. The original error is: """ % str(e)) +except Exception, e: + raise e import unique_name __all__ = [