From 7efdf05ac295144e1c298da08b5970aba81007e8 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 31 Jul 2018 15:11:05 +0800 Subject: [PATCH 01/20] make look_up_op supporting tensor ids --- paddle/fluid/operators/lookup_table_op.cc | 14 +++++++++----- paddle/fluid/operators/lookup_table_op.cu | 8 ++++---- paddle/fluid/operators/lookup_table_op.h | 9 ++++----- 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index 3e8f3ec5c5..d77b095c5d 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -32,11 +32,16 @@ class LookupTableOp : public framework::OperatorWithKernel { auto table_dims = ctx->GetInputDim("W"); auto ids_dims = ctx->GetInputDim("Ids"); + int ids_rank = ids_dims.size(); - PADDLE_ENFORCE_EQ(ids_dims.size(), 2); - PADDLE_ENFORCE_EQ(ids_dims[1], 1); + PADDLE_ENFORCE_EQ(table_dims.size(), 2); + PADDLE_ENFORCE_EQ(ids_dims[ids_rank - 1], 1, + "The last dimension of the 'Ids' tensor must be 1."); - ctx->SetOutputDim("Out", {ids_dims[0], table_dims[1]}); + auto output_dims = + framework::vectorize(framework::slice_ddim(ids_dims, 0, ids_rank - 1)); + output_dims.push_back(table_dims[1]); + ctx->SetOutputDim("Out", framework::make_ddim(output_dims)); if (ctx->GetOutputsVarType("Out")[0] == framework::proto::VarType::LOD_TENSOR) { @@ -61,8 +66,7 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Ids", "An input with type int32 or int64 " "contains the ids to be looked up in W. " - "Ids must be a column vector with rank = 2. " - "The 2nd dimension size must be 1."); + "The last dimension size must be 1."); AddOutput("Out", "The lookup results, which have the same type as W."); AddAttr("is_sparse", "(boolean, default false) " diff --git a/paddle/fluid/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu index 27483372b9..e8462129fd 100644 --- a/paddle/fluid/operators/lookup_table_op.cu +++ b/paddle/fluid/operators/lookup_table_op.cu @@ -118,23 +118,23 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { auto *d_table = context.Output(framework::GradVarName("W")); auto *ids_data = ids->data(); - auto ids_dim = ids->dims(); + int64_t ids_num = ids->numel(); auto stream = dev_ctx.stream(); // copy GPU memory to CPU pinned memory framework::Vector new_rows; - new_rows.resize(ids_dim[0]); + new_rows.resize(ids_num); auto gpu_place = boost::get(context.GetPlace()); // TODO(yuyang18): Strange code here. memory::Copy(platform::CPUPlace(), new_rows.CUDAMutableData(context.GetPlace()), gpu_place, - ids_data, ids_dim[0] * sizeof(int64_t), stream); + ids_data, ids_num * sizeof(int64_t), stream); d_table->set_rows(new_rows); auto *d_table_value = d_table->mutable_value(); - d_table_value->Resize({ids_dim[0], table->dims()[1]}); + d_table_value->Resize({ids_num, table->dims()[1]}); d_table_value->mutable_data(context.GetPlace()); auto *d_table_data = d_table_value->data(); diff --git a/paddle/fluid/operators/lookup_table_op.h b/paddle/fluid/operators/lookup_table_op.h index c9f074ca0e..ccf77af6a6 100644 --- a/paddle/fluid/operators/lookup_table_op.h +++ b/paddle/fluid/operators/lookup_table_op.h @@ -109,17 +109,17 @@ class LookupTableGradKernel : public framework::OpKernel { auto *d_table = context.Output(framework::GradVarName("W")); auto *ids_data = ids->data(); - auto ids_dim = ids->dims(); + int64_t ids_num = ids->numel(); framework::Vector new_rows; - new_rows.reserve(ids_dim[0]); - for (int64_t i = 0; i < ids_dim[0]; i++) { + new_rows.reserve(ids_num); + for (int64_t i = 0; i < ids_num; i++) { new_rows.push_back(ids_data[i]); } d_table->set_rows(new_rows); auto *d_table_value = d_table->mutable_value(); - d_table_value->Resize({ids_dim[0], table_dim[1]}); + d_table_value->Resize({ids_num, table_dim[1]}); d_table_value->mutable_data(context.GetPlace()); d_table->set_height(table_dim[0]); @@ -135,7 +135,6 @@ class LookupTableGradKernel : public framework::OpKernel { auto *d_table = context.Output(framework::GradVarName("W")); auto *ids_data = ids->data(); - auto ids_dim = ids->dims(); int N = table_dim[0]; int D = d_output->dims()[1]; From caf10b474f0452c095ca7d78f7c643fbd1a5d109 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 31 Jul 2018 16:09:30 +0800 Subject: [PATCH 02/20] make profiler use thread_id from g_thread_id Add a few more RecordEvent. Cleanup --- .../framework/details/all_reduce_op_handle.cc | 2 + .../framework/details/reduce_op_handle.cc | 2 + .../scope_buffered_ssa_graph_executor.cc | 2 + .../details/threaded_ssa_graph_executor.cc | 4 + .../operators/distributed/send_recv.proto | 84 +++++++++++++++++++ paddle/fluid/operators/parallel_do_op.cc | 5 -- paddle/fluid/platform/device_tracer.cc | 10 --- paddle/fluid/platform/device_tracer.h | 4 - paddle/fluid/platform/profiler.cc | 14 +--- paddle/fluid/platform/profiler.h | 5 -- 10 files changed, 96 insertions(+), 36 deletions(-) create mode 100644 paddle/fluid/operators/distributed/send_recv.proto diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.cc b/paddle/fluid/framework/details/all_reduce_op_handle.cc index 700c73c745..bf493a3fa4 100644 --- a/paddle/fluid/framework/details/all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/all_reduce_op_handle.cc @@ -17,6 +17,7 @@ #include "paddle/fluid/framework/details/container_cast.h" #include "paddle/fluid/framework/details/reduce_and_gather.h" #include "paddle/fluid/framework/details/variable_visitor.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace framework { @@ -45,6 +46,7 @@ AllReduceOpHandle::AllReduceOpHandle(ir::Node *node, #endif void AllReduceOpHandle::RunImpl() { + platform::RecordEvent r("all_reduce", nullptr); if (NoDummyInputSize() == 1) { return; // No need to all reduce when GPU count = 1; } else { diff --git a/paddle/fluid/framework/details/reduce_op_handle.cc b/paddle/fluid/framework/details/reduce_op_handle.cc index 7160e346da..68bdfbaf52 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.cc +++ b/paddle/fluid/framework/details/reduce_op_handle.cc @@ -16,12 +16,14 @@ #include "paddle/fluid/framework/details/container_cast.h" #include "paddle/fluid/framework/details/reduce_and_gather.h" #include "paddle/fluid/framework/details/variable_visitor.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace framework { namespace details { void ReduceOpHandle::RunImpl() { + platform::RecordEvent r("reduce", nullptr); if (places_.size() == 1) return; // the input and output may have dummy var. auto in_var_handles = DynamicCast(inputs_); diff --git a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc index 1d80bab90f..5bd974d6b7 100644 --- a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc @@ -17,6 +17,7 @@ #include #include #include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace framework { @@ -62,6 +63,7 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run( eptr = std::current_exception(); } + platform::RecordEvent e("ScopeBufferedSSAGraphExecutorAfterRun", nullptr); drop_scope_counter_ += 1; if (!fetch_tensors.empty() || drop_scope_counter_ == strategy_.num_iteration_per_drop_scope_) { diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index e556c84b02..0eaf9a9c95 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" #include "paddle/fluid/framework/details/ssa_graph_builder.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace framework { @@ -34,6 +35,8 @@ ThreadedSSAGraphExecutor::ThreadedSSAGraphExecutor( FeedFetchList ThreadedSSAGraphExecutor::Run( const std::vector &fetch_tensors) { + std::unique_ptr event( + new platform::RecordEvent("ThreadedSSAGraphExecutorPrepare", nullptr)); std::unordered_map pending_ops; std::unordered_set pending_vars; BlockingQueue ready_vars; @@ -84,6 +87,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( // Clean run context run_op_futures_.clear(); exception_holder_.Clear(); + event.reset(nullptr); // Step 3. Execution while (!pending_vars.empty()) { diff --git a/paddle/fluid/operators/distributed/send_recv.proto b/paddle/fluid/operators/distributed/send_recv.proto new file mode 100644 index 0000000000..2b3536b33a --- /dev/null +++ b/paddle/fluid/operators/distributed/send_recv.proto @@ -0,0 +1,84 @@ + +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under +the Apache License, Version 2.0 (the "License"); you may not use this file +except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +syntax = "proto3"; +package sendrecv; + +option cc_generic_services = false; + +service SendRecvService { + // For parameter server round-robin like hashing, do not split tensors. + // Send and recv only one tensor + // TODO(typhoonzero): add streaming API + rpc SendVariable(VariableMessage) returns (VoidMessage) {} + // Argument VariableMessage for GetVariable should only contain varname. + rpc GetVariable(VariableMessage) returns (VariableMessage) {} + // pre-fetch variable by given variable name and Ids + rpc PrefetchVariable(VariableMessage) returns (VariableMessage) {} + + rpc CheckpointNotify(VariableMessage) returns (VoidMessage) {} +} + +// VariableMessage is serialized paddle variable message. +// It can be: +// LoDTensor +// SelectedRows +enum VarType { + LOD_TENSOR = 0; + SELECTED_ROWS = 1; + NCCL_ID = 2; +} + +// NOTICE(gongwb):don't modify this proto if you are not +// not familar with how we serialize in sendrecvop_utils.h +// and deserilize it in variable_response.h. +message VariableMessage { + enum Type { + // Pod Types + BOOL = 0; + INT16 = 1; + INT32 = 2; + INT64 = 3; + FP16 = 4; + FP32 = 5; + FP64 = 6; + } + + message LodData { repeated int64 lod_data = 1; } + string varname = 1; + // TODO(Yancey1989): reference framework::proto::VarDesc::VarType + VarType type = 2; + // bool persistable is not needed for sending. + // tensor info: + Type data_type = 3; + repeated int64 dims = 4; + + // lod details: + int64 lod_level = 5; + repeated LodData lod = 6; + // selected_rows height, aka. original dim0 + int64 slr_height = 7; + // tensor data + bytes serialized = 8; + // selected_rows data + bytes rows = 9; + // Look up table block execution output variable name. + string out_varname = 10; + // If 1, the ps server will start profiling, the ps + // server stops profiling and generates a profile to /tmp/profile_ps_* + // when profile switches from 1 to 2. + int64 profile = 11; +} + +message VoidMessage {} diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc index c9744db3d0..916cdad3fd 100644 --- a/paddle/fluid/operators/parallel_do_op.cc +++ b/paddle/fluid/operators/parallel_do_op.cc @@ -18,7 +18,6 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/threadpool.h" #include "paddle/fluid/operators/detail/safe_ref.h" -#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { @@ -166,8 +165,6 @@ class ParallelDoOp : public framework::OperatorBase { workers.emplace_back( framework::Async([program, cur_scope, place, block, place_idx] { - // Give the thread an id to distinguish parallel block with same id. - platform::RecordThread rt(static_cast(place_idx) + 1); framework::Executor executor(place); executor.Run(*program, cur_scope, block->ID(), false /*create_local_scope*/); @@ -244,8 +241,6 @@ class ParallelDoGradOp : public framework::OperatorBase { // execute workers.emplace_back( framework::Async([program, cur_scope, place, block, i] { - // Give the thread an id to distinguish parallel block with same id. - platform::RecordThread rt(static_cast(i) + 1); framework::Executor executor(place); executor.Run(*program, cur_scope, block->ID(), false /*create_local_scope*/); diff --git a/paddle/fluid/platform/device_tracer.cc b/paddle/fluid/platform/device_tracer.cc index d9e2afadaf..8fa8dbd67c 100644 --- a/paddle/fluid/platform/device_tracer.cc +++ b/paddle/fluid/platform/device_tracer.cc @@ -30,9 +30,6 @@ limitations under the License. */ namespace paddle { namespace platform { namespace { -// Current thread's id. Note, we don't distinguish nested threads -// for now. -thread_local int cur_thread_id = 0; // Tracking the nested block stacks of each thread. thread_local std::deque block_id_stack; // Tracking the nested event stacks. @@ -413,12 +410,5 @@ void SetCurBlock(int block_id) { block_id_stack.push_back(block_id); } void ClearCurBlock() { block_id_stack.pop_back(); } int BlockDepth() { return block_id_stack.size(); } - -void SetCurThread(int thread_id) { cur_thread_id = thread_id; } - -void ClearCurThread() { cur_thread_id = 0; } - -int CurThread() { return cur_thread_id; } - } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/device_tracer.h b/paddle/fluid/platform/device_tracer.h index 0375c7439c..d2a571f434 100644 --- a/paddle/fluid/platform/device_tracer.h +++ b/paddle/fluid/platform/device_tracer.h @@ -99,9 +99,5 @@ std::string CurAnnotation(); void SetCurBlock(int block_id); void ClearCurBlock(); int BlockDepth(); - -void SetCurThread(int thread_id); -void ClearCurThread(); -int CurThread(); } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 01de9d7041..f4a0e2a861 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -190,7 +190,7 @@ RecordEvent::~RecordEvent() { DeviceTracer* tracer = GetDeviceTracer(); if (tracer) { tracer->AddCPURecords(CurAnnotation(), start_ns_, PosixInNsec(), - BlockDepth(), CurThread()); + BlockDepth(), g_thread_id); } ClearCurAnnotation(); PopEvent(name_, dev_ctx_); @@ -211,21 +211,11 @@ RecordBlock::~RecordBlock() { // We try to put all blocks at the same nested depth in the // same timeline lane. and distinguish the using thread_id. tracer->AddCPURecords(name_, start_ns_, PosixInNsec(), BlockDepth(), - CurThread()); + g_thread_id); } ClearCurBlock(); } -RecordThread::RecordThread(int thread_id) { - if (g_state == ProfilerState::kDisabled) return; - SetCurThread(thread_id); -} - -RecordThread::~RecordThread() { - if (g_state == ProfilerState::kDisabled) return; - ClearCurThread(); -} - void EnableProfiler(ProfilerState state) { PADDLE_ENFORCE(state != ProfilerState::kDisabled, "Can't enbale profling, since the input state is ", diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h index bf43925373..c99d9c807d 100644 --- a/paddle/fluid/platform/profiler.h +++ b/paddle/fluid/platform/profiler.h @@ -95,11 +95,6 @@ struct RecordBlock { uint64_t start_ns_; }; -struct RecordThread { - explicit RecordThread(int thread_id); - ~RecordThread(); -}; - // Return the event list of all threads. Assumed the returned value calls // event_lists, event_lists[i][j] represents the j-th Event of i-th thread. std::vector> GetAllEvents(); From 9d0514bd466f54c1a995ff5ca71e9bce7ff7109e Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 31 Jul 2018 16:18:23 +0800 Subject: [PATCH 03/20] add doc --- doc/fluid/howto/optimization/timeline_cn.md | 20 ++++++++++++------- doc/fluid/howto/optimization/timeline_en.md | 22 +++++++++++++-------- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/doc/fluid/howto/optimization/timeline_cn.md b/doc/fluid/howto/optimization/timeline_cn.md index 5d061e1c00..5a74484b30 100644 --- a/doc/fluid/howto/optimization/timeline_cn.md +++ b/doc/fluid/howto/optimization/timeline_cn.md @@ -1,21 +1,27 @@ # 如何使用timeline工具做性能分析 -1. 在训练的主循环外加上`with profiler.profiler(...)`。运行之后,代码会在`/tmp/profile`目录下生成一个profile的记录文件。 +1. 在训练的主循环外加上`profiler.start_profiler(...)`和`profiler.start_profiler(...)`。运行之后,代码会在`/tmp/profile`目录下生成一个profile的记录文件。 **提示:** 请不要在timeline记录信息时运行太多次迭代,因为timeline中的记录数量和迭代次数是成正比的。 ```python - with profiler.profiler('All', 'total', '/tmp/profile') as prof: - for pass_id in range(pass_num): - for batch_id, data in enumerate(train_reader()): - exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[]) + for pass_id in range(pass_num): + for batch_id, data in enumerate(train_reader()): + if pass_id == 0 and batch_id == 5: + profiler.start_profiler("All") + elif pass_id == 0 and batch_id == 10: + profiler.stop_profiler("total", "/tmp/profile") + exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[]) ... ``` 1. 运行`python paddle/tools/timeline.py`来处理`/tmp/profile`,这个程序默认会生成一个`/tmp/timeline`文件,你也可以用命令行参数来修改这个路径,请参考[timeline.py](https://github.com/PaddlePaddle/Paddle/blob/develop/tools/timeline.py)。 +```python +python Paddle/tools/timeline.py --profile_path=/tmp/profile --timeline_path=timeline +``` 1. 打开chrome浏览器,访问,用`load`按钮来加载生成的`timeline`文件。 diff --git a/doc/fluid/howto/optimization/timeline_en.md b/doc/fluid/howto/optimization/timeline_en.md index 96481ae2a6..1c56f607a0 100644 --- a/doc/fluid/howto/optimization/timeline_en.md +++ b/doc/fluid/howto/optimization/timeline_en.md @@ -1,15 +1,17 @@ # how to use timeline tool to do profile -1. Add `with profiler.profiler(...)` to the main training loop. After run, the code will generate a profile record file `/tmp/profile`. **Warning**: Please do not run too many batches when use profiler to record timeline information, for the profile record will grow with the batch number. +1. Add `profiler.start_profiler(...)`和`profiler.start_profiler(...)` to the main training loop. After run, the code will generate a profile record file `/tmp/profile`. **Warning**: Please do not run too many batches when use profiler to record timeline information, for the profile record will grow with the batch number. ```python - with profiler.profiler('All', 'total', '/tmp/profile') as prof: - for pass_id in range(pass_num): - for batch_id, data in enumerate(train_reader()): - exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[], - use_program_cache=True) + for pass_id in range(pass_num): + for batch_id, data in enumerate(train_reader()): + if pass_id == 0 and batch_id == 5: + profiler.start_profiler("All") + elif pass_id == 0 and batch_id == 10: + profiler.stop_profiler("total", "/tmp/profile") + exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[]) ... ``` @@ -17,6 +19,10 @@ file `/tmp/timeline` by default. You can change the path by cmd parameter, please take a look at [timeline.py](https://github.com/PaddlePaddle/Paddle/blob/develop/tools/timeline.py) for details. +```python +python Paddle/tools/timeline.py --profile_path=/tmp/profile --timeline_path=timeline +``` + 1. Open chrome and visit , use `load` button to load the generated `timeline` file. ![chrome tracing](./tracing.jpeg) From 486345551d802873370bf6ad64a4716ddfd087f6 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 31 Jul 2018 16:43:00 +0800 Subject: [PATCH 04/20] clean --- .../operators/distributed/send_recv.proto | 84 ------------------- 1 file changed, 84 deletions(-) delete mode 100644 paddle/fluid/operators/distributed/send_recv.proto diff --git a/paddle/fluid/operators/distributed/send_recv.proto b/paddle/fluid/operators/distributed/send_recv.proto deleted file mode 100644 index 2b3536b33a..0000000000 --- a/paddle/fluid/operators/distributed/send_recv.proto +++ /dev/null @@ -1,84 +0,0 @@ - -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under -the Apache License, Version 2.0 (the "License"); you may not use this file -except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -syntax = "proto3"; -package sendrecv; - -option cc_generic_services = false; - -service SendRecvService { - // For parameter server round-robin like hashing, do not split tensors. - // Send and recv only one tensor - // TODO(typhoonzero): add streaming API - rpc SendVariable(VariableMessage) returns (VoidMessage) {} - // Argument VariableMessage for GetVariable should only contain varname. - rpc GetVariable(VariableMessage) returns (VariableMessage) {} - // pre-fetch variable by given variable name and Ids - rpc PrefetchVariable(VariableMessage) returns (VariableMessage) {} - - rpc CheckpointNotify(VariableMessage) returns (VoidMessage) {} -} - -// VariableMessage is serialized paddle variable message. -// It can be: -// LoDTensor -// SelectedRows -enum VarType { - LOD_TENSOR = 0; - SELECTED_ROWS = 1; - NCCL_ID = 2; -} - -// NOTICE(gongwb):don't modify this proto if you are not -// not familar with how we serialize in sendrecvop_utils.h -// and deserilize it in variable_response.h. -message VariableMessage { - enum Type { - // Pod Types - BOOL = 0; - INT16 = 1; - INT32 = 2; - INT64 = 3; - FP16 = 4; - FP32 = 5; - FP64 = 6; - } - - message LodData { repeated int64 lod_data = 1; } - string varname = 1; - // TODO(Yancey1989): reference framework::proto::VarDesc::VarType - VarType type = 2; - // bool persistable is not needed for sending. - // tensor info: - Type data_type = 3; - repeated int64 dims = 4; - - // lod details: - int64 lod_level = 5; - repeated LodData lod = 6; - // selected_rows height, aka. original dim0 - int64 slr_height = 7; - // tensor data - bytes serialized = 8; - // selected_rows data - bytes rows = 9; - // Look up table block execution output variable name. - string out_varname = 10; - // If 1, the ps server will start profiling, the ps - // server stops profiling and generates a profile to /tmp/profile_ps_* - // when profile switches from 1 to 2. - int64 profile = 11; -} - -message VoidMessage {} From b1af7e5d9b62bc5696833e437bf641076e629875 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 31 Jul 2018 18:55:58 +0800 Subject: [PATCH 05/20] Add unittests for lookup_table_op --- paddle/fluid/operators/lookup_table_op.cu | 5 +- paddle/fluid/operators/lookup_table_op.h | 7 +- .../tests/unittests/test_lookup_table_op.py | 78 ++++++++++++++++--- 3 files changed, 76 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu index e8462129fd..74823dab09 100644 --- a/paddle/fluid/operators/lookup_table_op.cu +++ b/paddle/fluid/operators/lookup_table_op.cu @@ -139,7 +139,10 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { auto *d_table_data = d_table_value->data(); auto *d_output_data = d_output->data(); - PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output->dims()); + auto d_output_dims = d_output->dims(); + PADDLE_ENFORCE_EQ( + d_table_value->dims(), + framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1)); memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data, d_output->numel() * sizeof(T), stream); diff --git a/paddle/fluid/operators/lookup_table_op.h b/paddle/fluid/operators/lookup_table_op.h index ccf77af6a6..f5c10ced83 100644 --- a/paddle/fluid/operators/lookup_table_op.h +++ b/paddle/fluid/operators/lookup_table_op.h @@ -127,7 +127,10 @@ class LookupTableGradKernel : public framework::OpKernel { auto *d_output_data = d_output->data(); auto *d_table_data = d_table_value->data(); - PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output->dims()); + auto d_output_dims = d_output->dims(); + PADDLE_ENFORCE_EQ( + d_table_value->dims(), + framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1)); memcpy(d_table_data, d_output_data, sizeof(T) * d_output->numel()); } else { auto *ids = context.Input("Ids"); @@ -137,7 +140,7 @@ class LookupTableGradKernel : public framework::OpKernel { auto *ids_data = ids->data(); int N = table_dim[0]; - int D = d_output->dims()[1]; + int D = table_dim[1]; auto *d_output_data = d_output->data(); auto *d_table_data = d_table->mutable_data(context.GetPlace()); diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index e16ab1d15f..ad0d555198 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -35,6 +35,22 @@ class TestLookupTableOp(OpTest): self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) +class TestLookupTableOpWithTensorIds(OpTest): + def setUp(self): + self.op_type = "lookup_table" + table = np.random.random((17, 31)).astype("float32") + ids = np.random.randint( + low=0, high=17, size=(2, 4, 5, 1)).astype("int64") + self.inputs = {'W': table, 'Ids': ids} + self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) + + class TestLookupTableOpWithPadding(TestLookupTableOp): def test_check_output(self): ids = np.squeeze(self.inputs['Ids']) @@ -44,21 +60,34 @@ class TestLookupTableOpWithPadding(TestLookupTableOp): self.check_output() def test_check_grad(self): - # Since paddings are not trainable and fixed in forward, the gradient of + # Since paddings are not trainable and fixed in forward, the gradient of # paddings makes no sense and we don't test the gradient here. pass -class TestLookupTableWIsSelectedRows(OpTest): - def check_with_place(self, place): - scope = core.Scope() +class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): + def test_check_output(self): + ids = self.inputs['Ids'] + flatten_idx = ids.flatten() + padding_idx = np.random.choice(flatten_idx, 1)[0] + self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) + self.attrs = {'padding_idx': long(padding_idx)} + self.check_output() + + def test_check_grad(self): + # Since paddings are not trainable and fixed in forward, the gradient of + # paddings makes no sense and we don't test the gradient here. + pass - # create and initialize Id Variable + +class TestLookupTableWIsSelectedRows(OpTest): + def prepare_ids(self, scope, place): ids_tensor = scope.var('Ids').get_tensor() ids_array = np.array([[0], [4], [3], [5]]).astype("int64") ids_tensor.set(ids_array, place) + return ids_array - # create and initialize W Variable + def prepare_w(self, scope, place): rows = [0, 1, 2, 3, 4, 5, 6] row_numel = 12 @@ -71,8 +100,22 @@ class TestLookupTableWIsSelectedRows(OpTest): w_tensor = w_selected_rows.get_tensor() w_tensor.set(w_array, place) - # create Out Variable - out_tensor = scope.var('Out').get_tensor() + def create_out_tensor(self, scope, place): + return scope.var('Out').get_tensor() + + def check_result(self, ids_array, result_array): + # all(): return True if all elements of the iterable are true (or if the iterable is empty) + for idx, row in enumerate(ids_array): + assert (row[0] == result_array[idx]).all() + + def check_with_place(self, place): + scope = core.Scope() + + ids_array = self.prepare_ids(scope, place) + + self.prepare_w(scope, place) + + out_tensor = self.create_out_tensor(scope, place) # create and run lookup_table operator lookup_table = Operator("lookup_table", W='W', Ids='Ids', Out='Out') @@ -80,9 +123,8 @@ class TestLookupTableWIsSelectedRows(OpTest): # get result from Out result_array = np.array(out_tensor) - # all(): return True if all elements of the iterable are true (or if the iterable is empty) - for idx, row in enumerate(ids_array): - assert (row[0] == result_array[idx]).all() + + self.check_result(ids_array, result_array) def test_w_is_selected_rows(self): places = [core.CPUPlace()] @@ -91,5 +133,19 @@ class TestLookupTableWIsSelectedRows(OpTest): self.check_with_place(place) +class TestLookupTableWithTensorIdsWIsSelectedRows( + TestLookupTableWIsSelectedRows): + def prepare_ids(self, scope, place): + ids_tensor = scope.var('Ids').get_tensor() + ids_array = np.random.randint( + low=0, high=6, size=(2, 4, 3, 1)).astype("int64") + ids_tensor.set(ids_array, place) + return ids_array + + def check_result(self, ids_array, result_array): + for idx, row in np.ndenumerate(ids_array): + assert (row == result_array[idx]).all() + + if __name__ == "__main__": unittest.main() From b314a69523195d1178f79a8854220ac395072adc Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 31 Jul 2018 19:53:59 +0800 Subject: [PATCH 06/20] make softmax supporting tensors --- paddle/fluid/operators/softmax_op.cc | 26 +++++++++++++------------- paddle/fluid/operators/softmax_op.h | 22 ++++++++++++++++++++-- 2 files changed, 33 insertions(+), 15 deletions(-) diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index 31a7458f63..f2821cd1e8 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -37,10 +37,7 @@ class SoftmaxOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of SoftmaxOp should not be null."); - auto x_dims = ctx->GetInputDim("X"); - PADDLE_ENFORCE(x_dims.size() == 2UL, - "The input of softmax op must be a matrix."); - ctx->SetOutputDim("Out", x_dims); + ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->ShareLoD("X", /*->*/ "Out"); } @@ -81,8 +78,8 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("X", - "The input tensor of softmax. " - "2-D with shape [batch_size, input_feature_dimensions]."); + "The input tensor of softmax, " + "whose last dimension is the input_feature_dimensions."); AddOutput("Out", "The normalized values with the same shape as X.") .Reuse("X"); AddAttr( @@ -105,20 +102,23 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Softmax Operator. -The input of the softmax operator is a 2-D tensor with shape N x K (N is the -batch_size, K is the dimension of input feature). The output tensor has the -same shape as the input tensor. +The input of the softmax operator is a tensor of any rank. The output tensor +has the same shape as the input. -For each row of the input tensor, the softmax operator squashes the -K-dimensional vector of arbitrary real values to a K-dimensional vector of real -values in the range [0, 1] that add up to 1. +The input tensor will first be logically flattened to a 2-D matrix. The matrix's +second dimension(row length) is as same as the last dimension of the input +tensor, and the first dimension(column length) is the product of all other +dimensions of the input tensor. For each row of the matrix, the softmax operator +squashes the K-dimensional(K is the width of the matrix, which is also the size +of the input tensor's last dimension) vector of arbitrary real values to a +K-dimensional vector of real values in the range [0, 1] that add up to 1. It computes the exponential of the given dimension and the sum of exponential values of all the other dimensions in the K-dimensional vector input. Then the ratio of the exponential of the given dimension and the sum of exponential values of all the other dimensions is the output of the softmax operator. -For each row $i$ and each column $j$ in Input(X), we have: +For each row $i$ and each column $j$ in the matrix, we have: $$Out[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$ )DOC"); diff --git a/paddle/fluid/operators/softmax_op.h b/paddle/fluid/operators/softmax_op.h index 600da45a0b..34c35ee421 100644 --- a/paddle/fluid/operators/softmax_op.h +++ b/paddle/fluid/operators/softmax_op.h @@ -31,8 +31,16 @@ class SoftmaxKernel : public framework::OpKernel { // allocate memory on device. Out->mutable_data(context.GetPlace()); + auto dims = X->dims(); + auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); + framework::LoDTensor flattened_x; + framework::LoDTensor flattened_out; + flattened_x.ShareDataWith(*X); + flattened_out.ShareDataWith(*Out); + math::SoftmaxFunctor()( - context.template device_context(), X, Out); + context.template device_context(), &flattened_x, + &flattened_out); } }; @@ -47,8 +55,18 @@ class SoftmaxGradKernel : public framework::OpKernel { // allocate memory on device. dX->mutable_data(context.GetPlace()); + auto dims = Out->dims(); + auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); + framework::LoDTensor flattened_out; + framework::LoDTensor flattened_d_out; + framework::LoDTensor flattened_d_x; + flattened_out.ShareDataWith(*Out); + flattened_d_out.ShareDataWith(*dOut); + flattened_d_x.ShareDataWith(*dX); + math::SoftmaxGradFunctor()( - context.template device_context(), Out, dOut, dX); + context.template device_context(), &flattened_out, + &flattened_d_out, &flattened_d_x); } }; From f7bd0b227b6cbf34947f95430449d259ef4635b8 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 31 Jul 2018 20:07:03 +0800 Subject: [PATCH 07/20] Add unittests for softmax_op --- paddle/fluid/operators/softmax_op.h | 10 ++--- .../fluid/tests/unittests/test_softmax_op.py | 42 ++++++++++++++++++- 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/operators/softmax_op.h b/paddle/fluid/operators/softmax_op.h index 34c35ee421..1205bd0587 100644 --- a/paddle/fluid/operators/softmax_op.h +++ b/paddle/fluid/operators/softmax_op.h @@ -35,8 +35,8 @@ class SoftmaxKernel : public framework::OpKernel { auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); framework::LoDTensor flattened_x; framework::LoDTensor flattened_out; - flattened_x.ShareDataWith(*X); - flattened_out.ShareDataWith(*Out); + flattened_x.ShareDataWith(*X).Resize(flattened_dims); + flattened_out.ShareDataWith(*Out).Resize(flattened_dims); math::SoftmaxFunctor()( context.template device_context(), &flattened_x, @@ -60,9 +60,9 @@ class SoftmaxGradKernel : public framework::OpKernel { framework::LoDTensor flattened_out; framework::LoDTensor flattened_d_out; framework::LoDTensor flattened_d_x; - flattened_out.ShareDataWith(*Out); - flattened_d_out.ShareDataWith(*dOut); - flattened_d_x.ShareDataWith(*dX); + flattened_out.ShareDataWith(*Out).Resize(flattened_dims); + flattened_d_out.ShareDataWith(*dOut).Resize(flattened_dims); + flattened_d_x.ShareDataWith(*dX).Resize(flattened_dims); math::SoftmaxGradFunctor()( context.template device_context(), &flattened_out, diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 0ab581cfb0..70ad05597c 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -26,15 +26,22 @@ def stable_softmax(x): class TestSoftmaxOp(OpTest): + def get_x_shape(self): + return [10, 10] + def setUp(self): self.op_type = "softmax" self.use_cudnn = False self.use_mkldnn = False self.dtype = np.float32 self.init_kernel_type() + self.shape = self.get_x_shape() + + x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) + out = np.apply_along_axis(stable_softmax, 1, + x.reshape([-1, self.shape[-1]])) + out = out.reshape(self.shape) - x = np.random.uniform(0.1, 1, [10, 10]).astype(self.dtype) - out = np.apply_along_axis(stable_softmax, 1, x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} self.attrs = { @@ -63,6 +70,11 @@ class TestSoftmaxOp(OpTest): self.check_grad(["X"], "Out", max_relative_error=0.01) +class TestSoftmaxOp2(TestSoftmaxOp): + def get_x_shape(self): + return [2, 3, 4, 5] + + @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestSoftmaxCUDNNOp(TestSoftmaxOp): @@ -70,6 +82,13 @@ class TestSoftmaxCUDNNOp(TestSoftmaxOp): self.use_cudnn = True +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp): + def get_x_shape(self): + return [2, 3, 4, 5] + + @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestSoftmaxFP16Op(TestSoftmaxOp): @@ -83,6 +102,13 @@ class TestSoftmaxFP16Op(TestSoftmaxOp): self.check_output_with_place(place, atol=1e-3) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestSoftmaxFP16Op2(TestSoftmaxFP16Op): + def get_x_shape(self): + return [2, 3, 4, 5] + + @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp): @@ -97,10 +123,22 @@ class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp): self.check_output_with_place(place, atol=1e-3) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp): + def get_x_shape(self): + return [2, 3, 4, 5] + + class TestSoftmaxMKLDNNOp(TestSoftmaxOp): def init_kernel_type(self): self.use_mkldnn = True +class TestSoftmaxMKLDNNOp2(TestSoftmaxMKLDNNOp): + def get_x_shape(self): + return [2, 3, 4, 5] + + if __name__ == "__main__": unittest.main() From dc111d3476c2a6e6cb8147011bf946b43719719d Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 1 Aug 2018 10:30:40 +0800 Subject: [PATCH 08/20] update softmax_cudnn_op --- paddle/fluid/operators/softmax_cudnn_op.cu.cc | 23 ++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/operators/softmax_cudnn_op.cu.cc b/paddle/fluid/operators/softmax_cudnn_op.cu.cc index 5596fa0648..2bdb23e999 100644 --- a/paddle/fluid/operators/softmax_cudnn_op.cu.cc +++ b/paddle/fluid/operators/softmax_cudnn_op.cu.cc @@ -30,8 +30,16 @@ class SoftmaxCUDNNKernel : public framework::OpKernel { // allocate memory on device. Out->mutable_data(context.GetPlace()); + auto dims = X->dims(); + auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); + framework::LoDTensor flattened_x; + framework::LoDTensor flattened_out; + flattened_x.ShareDataWith(*X).Resize(flattened_dims); + flattened_out.ShareDataWith(*Out).Resize(flattened_dims); + math::SoftmaxCUDNNFunctor()( - context.template device_context(), X, Out); + context.template device_context(), + &flattened_x, &flattened_out); } }; @@ -46,9 +54,18 @@ class SoftmaxGradCUDNNKernel : public framework::OpKernel { // allocate memory on device. dX->mutable_data(context.GetPlace()); + auto dims = Out->dims(); + auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); + framework::LoDTensor flattened_out; + framework::LoDTensor flattened_d_out; + framework::LoDTensor flattened_d_x; + flattened_out.ShareDataWith(*Out).Resize(flattened_dims); + flattened_d_out.ShareDataWith(*dOut).Resize(flattened_dims); + flattened_d_x.ShareDataWith(*dX).Resize(flattened_dims); + math::SoftmaxGradCUDNNFunctor()( - context.template device_context(), Out, - dOut, dX); + context.template device_context(), + &flattened_out, &flattened_d_out, &flattened_d_x); } }; From e7d8e16a669dccde3114b0aaa15208e41e373338 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 1 Aug 2018 11:21:42 +0800 Subject: [PATCH 09/20] update softmax_mkldnn_op --- paddle/fluid/operators/softmax_mkldnn_op.cc | 71 +++++++++++++-------- 1 file changed, 46 insertions(+), 25 deletions(-) diff --git a/paddle/fluid/operators/softmax_mkldnn_op.cc b/paddle/fluid/operators/softmax_mkldnn_op.cc index 6668e6b9e9..01819f53e3 100644 --- a/paddle/fluid/operators/softmax_mkldnn_op.cc +++ b/paddle/fluid/operators/softmax_mkldnn_op.cc @@ -26,9 +26,9 @@ using paddle::platform::MKLDNNMemDesc; using mkldnn::memory; // Note: paddle has also "memory" namespace using mkldnn::primitive; -using mkldnn::softmax_forward; -using mkldnn::softmax_backward; using mkldnn::prop_kind; +using mkldnn::softmax_backward; +using mkldnn::softmax_forward; using mkldnn::stream; using platform::to_void_cast; @@ -113,17 +113,27 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel { auto mkldnn_engine = dev_ctx.GetEngine(); const Tensor* input = ctx.Input("X"); Tensor* output = ctx.Output("Out"); - PADDLE_ENFORCE(input->dims().size() == 2UL, - "The input of softmax op must be a 2D matrix."); - const T* input_data = input->data(); - // allocate memory for output - T* output_data = output->mutable_data(ctx.GetPlace()); - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); - // MKL-DNN does support softmax over selected axis. Having 2D Tensor, - // we will make normalization after final eg. axis: 1 - PADDLE_ENFORCE(((src_tz[0] == dst_tz[0]) && (src_tz[1] == dst_tz[1])), - "Softmax input and output dimensions should match"); + PADDLE_ENFORCE_EQ( + input->dims(), output->dims(), + "The shape of softmax's input and output must be identical."); + + // make sure 'output' holds memory, which will be shared by + // 'flattened_output' later. + output->mutable_data(ctx.GetPlace()); + + // flatten input and output to 2-D matrixs + auto dims = input->dims(); // input and output share the same shape + auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); + framework::Tensor flattened_input; + framework::Tensor flattened_output; + flattened_input.ShareDataWith(*input).Resize(flattened_dims); + flattened_output.ShareDataWith(*output).Resize(flattened_dims); + + const T* input_data = flattened_input.data(); + T* output_data = flattened_output.mutable_data(ctx.GetPlace()); + + std::vector src_tz = paddle::framework::vectorize2int(flattened_dims); + std::vector dst_tz = src_tz; // Same memory descriptor to be used for input and output memory::dims softmax_tz = {src_tz[0], src_tz[1]}; // Generate keys for storing/retriving primitives for this operator @@ -174,23 +184,34 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel { auto& dev_ctx = ctx.template device_context(); auto mkldnn_engine = dev_ctx.GetEngine(); const Tensor* output = ctx.Input("Out"); - const T* dst_data = output->data(); - auto* dout = ctx.template Input(framework::GradVarName("Out")); - const auto* diff_dst_ptr = dout->template data(); - auto* dx = ctx.template Output(framework::GradVarName("X")); - T* diff_src_ptr = dx->template mutable_data(ctx.GetPlace()); - std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); + PADDLE_ENFORCE_EQ( + dout->dims(), dx->dims(), + "The shape of softmax_grad's input and output must be identical."); + + // make sure 'dx' holds memory, which will be shared by 'flattened_dx' + // later. + dx->template mutable_data(ctx.GetPlace()); + + auto dims = dout->dims(); // input and output share the same shape + auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); + framework::Tensor flattened_output; + framework::Tensor flattened_dout; + framework::Tensor flattened_dx; + flattened_output.ShareDataWith(*output).Resize(flattened_dims); + flattened_dout.ShareDataWith(*dout).Resize(flattened_dims); + flattened_dx.ShareDataWith(*dx).Resize(flattened_dims); + + const T* dst_data = flattened_output.data(); + const T* diff_dst_ptr = flattened_dout.template data(); + T* diff_src_ptr = flattened_dx.template mutable_data(ctx.GetPlace()); + + std::vector dst_tz = paddle::framework::vectorize2int(flattened_dims); std::vector src_tz(dst_tz); - PADDLE_ENFORCE(output->dims().size() == 2UL, - "The input of softmax op must be a 2D matrix."); - // MKL-DNN does support softmax over selected axis. Having 2D Tensor, - // we will make normalization after final eg. axis: 1 - PADDLE_ENFORCE(((src_tz[0] == dst_tz[0]) && (src_tz[1] == dst_tz[1])), - "Softmax input and output dimensions should match"); + // Same memory descriptor to be used for input and output memory::dims softmax_tz = {src_tz[0], src_tz[1]}; // Currently only supports NC data format From 23aebf0ea7a05b104894b0525525ac8748a06e3d Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 1 Aug 2018 14:01:24 +0800 Subject: [PATCH 10/20] update softmax layer comment --- python/paddle/fluid/layers/nn.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 058acd4a50..12e7170fc3 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1313,13 +1313,16 @@ def sequence_softmax(input, param_attr=None, bias_attr=None, use_cudnn=True): def softmax(input, param_attr=None, bias_attr=None, use_cudnn=True, name=None): """ - The input of the softmax layer is a 2-D tensor with shape N x K (N is the - batch_size, K is the dimension of input feature). The output tensor has the - same shape as the input tensor. + The input of the softmax operator is a tensor of any rank. The output tensor + has the same shape as the input. - For each row of the input tensor, the softmax operator squashes the - K-dimensional vector of arbitrary real values to a K-dimensional vector of real - values in the range [0, 1] that add up to 1. + The input tensor will first be logically flattened to a 2-D matrix. The matrix's + second dimension(row length) is as same as the last dimension of the input + tensor, and the first dimension(column length) is the product of all other + dimensions of the input tensor. For each row of the matrix, the softmax operator + squashes the K-dimensional(K is the width of the matrix, which is also the size + of the input tensor's last dimension) vector of arbitrary real values to a + K-dimensional vector of real values in the range [0, 1] that add up to 1. It computes the exponential of the given dimension and the sum of exponential values of all the other dimensions in the K-dimensional vector input. @@ -1327,7 +1330,7 @@ def softmax(input, param_attr=None, bias_attr=None, use_cudnn=True, name=None): exponential values of all the other dimensions is the output of the softmax operator. - For each row :math:`i` and each column :math:`j` in Input(X), we have: + For each row :math:`i` and each column :math:`j` in the matrix, we have: .. math:: From a3f9d6a38cc1587a7e331c33ffda0c0709406a66 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 5 Aug 2018 19:33:06 +0800 Subject: [PATCH 11/20] optimize profiler --- paddle/fluid/CMakeLists.txt | 2 +- paddle/fluid/framework/operator.cc | 9 ++++++--- paddle/fluid/operators/feed_op.cc | 1 - paddle/fluid/operators/fetch_barrier_op.cc | 6 ------ paddle/fluid/operators/fetch_op.cc | 3 --- paddle/fluid/operators/load_op.cc | 3 --- paddle/fluid/operators/recv_op.cc | 2 -- paddle/fluid/operators/send_barrier_op.cc | 5 ----- paddle/fluid/operators/send_op.cc | 3 --- 9 files changed, 7 insertions(+), 27 deletions(-) diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index d274d96c29..e2e26fc5d4 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -6,4 +6,4 @@ add_subdirectory(pybind) add_subdirectory(string) add_subdirectory(recordio) # NOTE: please add subdirectory inference at last. -add_subdirectory(inference) +#add_subdirectory(inference) diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index cdac00739b..18ec226801 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -136,6 +136,12 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) { platform::SetDeviceId(dev_id); #endif } + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); + auto* dev_ctx = pool.Get(place); + + // For profiling, don't move out of this function because that will result + // in the failure of multi-GPU profiling. + platform::RecordEvent record_event(Type(), dev_ctx); RunImpl(scope, place); VLOG(10) << "+ " << DebugStringEx(&scope); } @@ -639,9 +645,6 @@ void OperatorWithKernel::RunImpl(const Scope& scope, platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto* dev_ctx = pool.Get(place); - // For profiling, don't move out of this function because that will result - // in the failure of multi-GPU profiling. - platform::RecordEvent record_event(Type(), dev_ctx); // check if op[type] has kernel registered. auto& all_op_kernels = AllOpKernels(); auto kernels_iter = all_op_kernels.find(type_); diff --git a/paddle/fluid/operators/feed_op.cc b/paddle/fluid/operators/feed_op.cc index bcb3e63ed7..dc7ef66495 100644 --- a/paddle/fluid/operators/feed_op.cc +++ b/paddle/fluid/operators/feed_op.cc @@ -31,7 +31,6 @@ class FeedOp : public framework::OperatorBase { const platform::Place &place) const override { // get device context from pool auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place); - platform::RecordEvent record_event(Type(), dev_ctx); auto feed_var_name = Input("X"); auto *feed_var = scope.FindVar(feed_var_name); diff --git a/paddle/fluid/operators/fetch_barrier_op.cc b/paddle/fluid/operators/fetch_barrier_op.cc index 680fde19ee..d9cd956dfd 100644 --- a/paddle/fluid/operators/fetch_barrier_op.cc +++ b/paddle/fluid/operators/fetch_barrier_op.cc @@ -36,12 +36,6 @@ class FetchBarrierOp : public framework::OperatorBase { void RunImpl(const framework::Scope& scope, const platform::Place& place) const override { std::vector eps = Attr>("endpoints"); - - platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - auto& ctx = *pool.Get(place); - // For profiling - platform::RecordEvent record_event(Type(), &ctx); - distributed::RPCClient* rpc_client = distributed::RPCClient::GetInstance(); diff --git a/paddle/fluid/operators/fetch_op.cc b/paddle/fluid/operators/fetch_op.cc index 1640a2a22c..c197b45e81 100644 --- a/paddle/fluid/operators/fetch_op.cc +++ b/paddle/fluid/operators/fetch_op.cc @@ -30,9 +30,6 @@ class FetchOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope &scope, const platform::Place &place) const override { - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - platform::RecordEvent record_event(Type(), pool.Get(place)); - auto fetch_var_name = Input("X"); auto *fetch_var = scope.FindVar(fetch_var_name); PADDLE_ENFORCE(fetch_var != nullptr, diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc index ac35cf0b89..27e26cb1b5 100644 --- a/paddle/fluid/operators/load_op.cc +++ b/paddle/fluid/operators/load_op.cc @@ -31,9 +31,6 @@ class LoadOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope &scope, const platform::Place &place) const override { - auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place); - platform::RecordEvent record_event(Type(), dev_ctx); - // FIXME(yuyang18): We save variable to local file now, but we should change // it to save an output stream. auto filename = Attr("file_path"); diff --git a/paddle/fluid/operators/recv_op.cc b/paddle/fluid/operators/recv_op.cc index 1ba6840149..4a6ce938a5 100644 --- a/paddle/fluid/operators/recv_op.cc +++ b/paddle/fluid/operators/recv_op.cc @@ -40,8 +40,6 @@ class RecvOp : public framework::OperatorBase { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); - // For profiling - platform::RecordEvent record_event(Type(), &ctx); distributed::RPCClient* rpc_client = distributed::RPCClient::GetInstance(); diff --git a/paddle/fluid/operators/send_barrier_op.cc b/paddle/fluid/operators/send_barrier_op.cc index d7f8e994af..1866a86048 100644 --- a/paddle/fluid/operators/send_barrier_op.cc +++ b/paddle/fluid/operators/send_barrier_op.cc @@ -39,11 +39,6 @@ class SendBarrierOp : public framework::OperatorBase { std::vector eps = Attr>("endpoints"); bool sync_mode = Attr("sync_mode"); - platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - auto& ctx = *pool.Get(place); - // For profiling - platform::RecordEvent record_event(Type(), &ctx); - distributed::RPCClient* rpc_client = distributed::RPCClient::GetInstance(); diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index 829f310d42..3cd42f2d05 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -42,9 +42,6 @@ class SendOp : public framework::OperatorBase { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); - // For profiling - platform::RecordEvent record_event(Type(), &ctx); - distributed::RPCClient* rpc_client = distributed::RPCClient::GetInstance(); From d553e2ff3ff1a27edc18a9afed3c04875c2d9052 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 5 Aug 2018 20:09:08 +0800 Subject: [PATCH 12/20] revert inference --- paddle/fluid/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index e2e26fc5d4..d274d96c29 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -6,4 +6,4 @@ add_subdirectory(pybind) add_subdirectory(string) add_subdirectory(recordio) # NOTE: please add subdirectory inference at last. -#add_subdirectory(inference) +add_subdirectory(inference) From 19f40dab03961caece3b49591a02475b373afbe6 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Sun, 5 Aug 2018 21:06:45 +0800 Subject: [PATCH 13/20] polish doc --- doc/fluid/howto/optimization/timeline_cn.md | 2 +- doc/fluid/howto/optimization/timeline_en.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/fluid/howto/optimization/timeline_cn.md b/doc/fluid/howto/optimization/timeline_cn.md index 5a74484b30..faf39f276d 100644 --- a/doc/fluid/howto/optimization/timeline_cn.md +++ b/doc/fluid/howto/optimization/timeline_cn.md @@ -1,6 +1,6 @@ # 如何使用timeline工具做性能分析 -1. 在训练的主循环外加上`profiler.start_profiler(...)`和`profiler.start_profiler(...)`。运行之后,代码会在`/tmp/profile`目录下生成一个profile的记录文件。 +1. 在训练的主循环外加上`profiler.start_profiler(...)`和`profiler.stop_profiler(...)`。运行之后,代码会在`/tmp/profile`目录下生成一个profile的记录文件。 **提示:** 请不要在timeline记录信息时运行太多次迭代,因为timeline中的记录数量和迭代次数是成正比的。 diff --git a/doc/fluid/howto/optimization/timeline_en.md b/doc/fluid/howto/optimization/timeline_en.md index 1c56f607a0..6f963c6b4d 100644 --- a/doc/fluid/howto/optimization/timeline_en.md +++ b/doc/fluid/howto/optimization/timeline_en.md @@ -1,6 +1,6 @@ # how to use timeline tool to do profile -1. Add `profiler.start_profiler(...)`和`profiler.start_profiler(...)` to the main training loop. After run, the code will generate a profile record file `/tmp/profile`. **Warning**: Please do not run too many batches when use profiler to record timeline information, for the profile record will grow with the batch number. +1. Add `profiler.start_profiler(...)`和`profiler.stop_profiler(...)` to the main training loop. After run, the code will generate a profile record file `/tmp/profile`. **Warning**: Please do not run too many batches when use profiler to record timeline information, for the profile record will grow with the batch number. ```python for pass_id in range(pass_num): From 4c5bcd78597e495c08d883322f231cb99f43f8fd Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sun, 5 Aug 2018 22:16:02 +0800 Subject: [PATCH 14/20] add guard to profiler --- paddle/fluid/platform/profiler.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 01de9d7041..75cae95f83 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -176,6 +176,7 @@ void PopEvent(const std::string& name, const DeviceContext* dev_ctx) { RecordEvent::RecordEvent(const std::string& name, const DeviceContext* dev_ctx) : is_enabled_(false), start_ns_(PosixInNsec()) { + std::lock_guard l(profiler_mu); if (g_state == ProfilerState::kDisabled) return; is_enabled_ = true; dev_ctx_ = dev_ctx; @@ -186,6 +187,7 @@ RecordEvent::RecordEvent(const std::string& name, const DeviceContext* dev_ctx) } RecordEvent::~RecordEvent() { + std::lock_guard l(profiler_mu); if (g_state == ProfilerState::kDisabled || !is_enabled_) return; DeviceTracer* tracer = GetDeviceTracer(); if (tracer) { @@ -198,6 +200,7 @@ RecordEvent::~RecordEvent() { RecordBlock::RecordBlock(int block_id) : is_enabled_(false), start_ns_(PosixInNsec()) { + std::lock_guard l(profiler_mu); if (g_state == ProfilerState::kDisabled) return; is_enabled_ = true; SetCurBlock(block_id); @@ -205,6 +208,7 @@ RecordBlock::RecordBlock(int block_id) } RecordBlock::~RecordBlock() { + std::lock_guard l(profiler_mu); if (g_state == ProfilerState::kDisabled || !is_enabled_) return; DeviceTracer* tracer = GetDeviceTracer(); if (tracer) { @@ -217,11 +221,13 @@ RecordBlock::~RecordBlock() { } RecordThread::RecordThread(int thread_id) { + std::lock_guard l(profiler_mu); if (g_state == ProfilerState::kDisabled) return; SetCurThread(thread_id); } RecordThread::~RecordThread() { + std::lock_guard l(profiler_mu); if (g_state == ProfilerState::kDisabled) return; ClearCurThread(); } From 903b2c01626815fa77b56a5fae9990cac17ff162 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 6 Aug 2018 10:15:32 +0800 Subject: [PATCH 15/20] optimize code --- paddle/fluid/CMakeLists.txt | 2 +- paddle/fluid/framework/operator.cc | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index d274d96c29..e2e26fc5d4 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -6,4 +6,4 @@ add_subdirectory(pybind) add_subdirectory(string) add_subdirectory(recordio) # NOTE: please add subdirectory inference at last. -add_subdirectory(inference) +#add_subdirectory(inference) diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 18ec226801..0c8acf71bf 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -137,11 +137,7 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) { #endif } platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - auto* dev_ctx = pool.Get(place); - - // For profiling, don't move out of this function because that will result - // in the failure of multi-GPU profiling. - platform::RecordEvent record_event(Type(), dev_ctx); + platform::RecordEvent record_event(Type(), pool.Get(place)); RunImpl(scope, place); VLOG(10) << "+ " << DebugStringEx(&scope); } From 52576c5f38e90fe05caf4bc85e07606565a39e22 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 6 Aug 2018 10:32:22 +0800 Subject: [PATCH 16/20] revert inference --- paddle/fluid/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index e2e26fc5d4..d274d96c29 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -6,4 +6,4 @@ add_subdirectory(pybind) add_subdirectory(string) add_subdirectory(recordio) # NOTE: please add subdirectory inference at last. -#add_subdirectory(inference) +add_subdirectory(inference) From 3cea440060fb99954b928afb29b0d4d46733e8fd Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Mon, 6 Aug 2018 11:11:11 +0800 Subject: [PATCH 17/20] enforce test_parallel_executor_transformer and test_image_classification_resnet to run in serial (#12545) --- python/paddle/fluid/tests/unittests/CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index c8e881a672..a6a911721d 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -50,6 +50,8 @@ list(REMOVE_ITEM TEST_OPS test_parallel_executor_crf) list(REMOVE_ITEM TEST_OPS test_parallel_executor_fetch_feed) list(REMOVE_ITEM TEST_OPS test_dist_se_resnext) list(REMOVE_ITEM TEST_OPS test_dist_transformer) +list(REMOVE_ITEM TEST_OPS test_parallel_executor_transformer) +list(REMOVE_ITEM TEST_OPS test_image_classification_resnet) foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) endforeach(TEST_OP) @@ -64,3 +66,5 @@ py_test_modules(test_parallel_executor_crf MODULES test_parallel_executor_crf SE py_test_modules(test_parallel_executor_fetch_feed MODULES test_parallel_executor_fetch_feed SERIAL) py_test_modules(test_dist_transformer MODULES test_dist_transformer SERIAL) py_test_modules(test_dist_se_resnext MODULES test_dist_se_resnext SERIAL) +py_test_modules(test_parallel_executor_transformer MODULES test_parallel_executor_transformer SERIAL) +py_test_modules(test_image_classification_resnet MODULES test_image_classification_resnet SERIAL) From 3c5e15de03df36356c79c3d4573b31ad5664ad86 Mon Sep 17 00:00:00 2001 From: superjomn Date: Mon, 6 Aug 2018 08:00:40 +0000 Subject: [PATCH 18/20] disable anakin test --- paddle/fluid/inference/api/CMakeLists.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index 259d79bedb..d9f6f270f9 100644 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -74,9 +74,9 @@ if (WITH_ANAKIN) # only needed in CI target_link_libraries(inference_anakin_api anakin anakin_saber_common) target_link_libraries(inference_anakin_api_shared anakin anakin_saber_common) if (WITH_TESTING) - cc_test(inference_anakin_test SRCS api_anakin_engine_tester.cc - ARGS --model=${ANAKIN_INSTALL_DIR}/mobilenet_v2.anakin.bin - DEPS inference_anakin_api_shared) - target_compile_options(inference_anakin_test BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS}) + #cc_test(inference_anakin_test SRCS api_anakin_engine_tester.cc + #ARGS --model=${ANAKIN_INSTALL_DIR}/mobilenet_v2.anakin.bin + #DEPS inference_anakin_api_shared) + #target_compile_options(inference_anakin_test BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS}) endif(WITH_TESTING) endif() From ebe19206261a9e8e683869817525d23ff546e37b Mon Sep 17 00:00:00 2001 From: superjomn Date: Mon, 6 Aug 2018 08:01:18 +0000 Subject: [PATCH 19/20] add comment --- paddle/fluid/inference/api/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index d9f6f270f9..08d0f493ab 100644 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -74,6 +74,7 @@ if (WITH_ANAKIN) # only needed in CI target_link_libraries(inference_anakin_api anakin anakin_saber_common) target_link_libraries(inference_anakin_api_shared anakin anakin_saber_common) if (WITH_TESTING) + # this test is unstable, disable it first. #cc_test(inference_anakin_test SRCS api_anakin_engine_tester.cc #ARGS --model=${ANAKIN_INSTALL_DIR}/mobilenet_v2.anakin.bin #DEPS inference_anakin_api_shared) From 954d680b4022be31108fde1ef12e84b52e6fadbd Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 6 Aug 2018 19:49:41 +0800 Subject: [PATCH 20/20] fix test_parallel_do.py --- paddle/fluid/platform/profiler.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index fe811e0250..7c8d8a5964 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -110,6 +110,8 @@ Event::Event(EventType type, std::string name, uint32_t thread_id, has_cuda_ = dev_ctx ? platform::is_gpu_place(dev_ctx->GetPlace()) : false; if (has_cuda_) { auto* cuda_dev_ctx = static_cast(dev_ctx); + PADDLE_ENFORCE(cudaSetDevice( + boost::get(cuda_dev_ctx->GetPlace()).device)); PADDLE_ENFORCE(cudaGetDevice(&device_)); PADDLE_ENFORCE(cudaEventCreate(&event_)); auto stream = cuda_dev_ctx->stream();