From e8d802159e7d8a9c4b89d0c4f7b9e8c4fa0d31d4 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Tue, 24 Apr 2018 11:13:39 +0800 Subject: [PATCH 01/50] add lookup_sparse_table_op --- paddle/fluid/framework/lod_tensor_test.cc | 4 +- paddle/fluid/framework/selected_rows.cc | 10 +- paddle/fluid/framework/selected_rows.h | 8 +- paddle/fluid/framework/selected_rows_test.cc | 8 +- paddle/fluid/operators/detail/serde_test.cc | 2 +- .../fluid/operators/lookup_sparse_table_op.cc | 154 ++++++++++++++++++ .../unittests/test_lookup_sparse_table_op.py | 86 ++++++++++ 7 files changed, 257 insertions(+), 15 deletions(-) create mode 100644 paddle/fluid/operators/lookup_sparse_table_op.cc create mode 100644 python/paddle/fluid/tests/unittests/test_lookup_sparse_table_op.py diff --git a/paddle/fluid/framework/lod_tensor_test.cc b/paddle/fluid/framework/lod_tensor_test.cc index 97ab98f09b..77e5ec4c7d 100644 --- a/paddle/fluid/framework/lod_tensor_test.cc +++ b/paddle/fluid/framework/lod_tensor_test.cc @@ -255,11 +255,11 @@ TEST(LoDTensor, RecordIO) { std::unique_ptr stream_ptr(stream); recordio::Scanner scanner(std::move(stream_ptr)); auto tensors = ReadFromRecordIO(&scanner, ctx); - ASSERT_EQ(tensors.size(), 2); + ASSERT_EQ(tensors.size(), static_cast(2)); assert_tensor_ok(tensors[0]); assert_tensor_ok(tensors[1]); tensors = ReadFromRecordIO(&scanner, ctx); - ASSERT_EQ(tensors.size(), 2); + ASSERT_EQ(tensors.size(), static_cast(2)); assert_tensor_ok(tensors[0]); assert_tensor_ok(tensors[1]); } diff --git a/paddle/fluid/framework/selected_rows.cc b/paddle/fluid/framework/selected_rows.cc index 794e7f7434..56cf6693ca 100644 --- a/paddle/fluid/framework/selected_rows.cc +++ b/paddle/fluid/framework/selected_rows.cc @@ -120,11 +120,11 @@ bool SelectedRows::HasKey(int64_t key) const { : true; } -std::vector SelectedRows::Get(std::vector keys, - framework::Tensor* value) const { +std::vector> SelectedRows::Get( + std::vector keys, framework::Tensor* value) const { PADDLE_ENFORCE(value->IsInitialized(), "The value tensor should be initialized."); - std::vector non_keys; + std::vector> non_keys_pair; int64_t value_width = value_->numel() / value_->dims()[0]; PADDLE_ENFORCE_EQ(value_width, value->numel() / value->dims()[0], "output tensor should have the same shape with table " @@ -133,7 +133,7 @@ std::vector SelectedRows::Get(std::vector keys, for (size_t i = 0; i < keys.size(); ++i) { int64_t index = Index(keys[i]); if (index == -1) { - non_keys.push_back(keys[i]); + non_keys_pair.push_back(std::make_pair(keys[i], static_cast(i))); } else { framework::VisitDataType( framework::ToDataType(value_->type()), @@ -141,7 +141,7 @@ std::vector SelectedRows::Get(std::vector keys, index * value_width, value_width)); } } - return non_keys; + return non_keys_pair; } bool SelectedRows::Set(int64_t key, const framework::Tensor& value) { diff --git a/paddle/fluid/framework/selected_rows.h b/paddle/fluid/framework/selected_rows.h index d6c9507b16..c27c927ee7 100644 --- a/paddle/fluid/framework/selected_rows.h +++ b/paddle/fluid/framework/selected_rows.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include #include #include "paddle/fluid/framework/lod_tensor.h" @@ -78,10 +79,11 @@ class SelectedRows { /* * @brief Get value by the key list, if the * - * @return a list of keys which does not exists in table + * @return a list of pair which contains the non-exists key and the index in + * the value */ - std::vector Get(std::vector keys, - framework::Tensor* tensor) const; + std::vector> Get(std::vector keys, + framework::Tensor* value) const; /* * @brief Set a key-value pair into the table. diff --git a/paddle/fluid/framework/selected_rows_test.cc b/paddle/fluid/framework/selected_rows_test.cc index 39fe6d9294..eefcaa5672 100644 --- a/paddle/fluid/framework/selected_rows_test.cc +++ b/paddle/fluid/framework/selected_rows_test.cc @@ -59,7 +59,7 @@ TEST_F(SelectedRowsTester, SerializeAndDeseralize) { ASSERT_EQ(selected_rows_->GetCompleteDims(), dst_tensor.GetCompleteDims()); } -TEST_F(SelectedRowsTester, Table) { +TEST_F(SelectedRowsTester, SparseTable) { platform::CPUPlace cpu; SelectedRows table; // initialize a sparse table @@ -87,11 +87,11 @@ TEST_F(SelectedRowsTester, Table) { framework::Tensor get_value; get_value.mutable_data(framework::make_ddim({2, 100}), cpu); std::vector keys({non_key, key}); - auto non_keys = table.Get(keys, &get_value); + auto non_key_pairs = table.Get(keys, &get_value); ASSERT_EQ(get_value.data()[100], static_cast(10)); - ASSERT_EQ(non_keys.size(), static_cast(1)); - ASSERT_EQ(non_keys[0], non_key); + ASSERT_EQ(non_key_pairs.size(), static_cast(1)); + ASSERT_EQ(non_key_pairs[0].first, non_key); } } // namespace framework diff --git a/paddle/fluid/operators/detail/serde_test.cc b/paddle/fluid/operators/detail/serde_test.cc index 221d2f4c5b..e9eaaf1cbc 100644 --- a/paddle/fluid/operators/detail/serde_test.cc +++ b/paddle/fluid/operators/detail/serde_test.cc @@ -108,7 +108,7 @@ void RunSerdeTestSelectedRows(platform::Place place) { EXPECT_FLOAT_EQ(tensor_data2[i], 32.7); } for (size_t i = 0; i < rows2->size(); ++i) { - EXPECT_EQ(rows_data2[i], i); + EXPECT_EQ(rows_data2[i], static_cast(i)); } EXPECT_EQ(slr2->height(), 1000); } diff --git a/paddle/fluid/operators/lookup_sparse_table_op.cc b/paddle/fluid/operators/lookup_sparse_table_op.cc new file mode 100644 index 0000000000..249896993b --- /dev/null +++ b/paddle/fluid/operators/lookup_sparse_table_op.cc @@ -0,0 +1,154 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace operators { + +constexpr int64_t kNoPadding = -1; + +class LookupSparseTableInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of LookupSparseTableOp should not be null."); + auto shape_w = ctx->GetInputDim("W"); + auto shape_ids = ctx->GetInputDim("Ids"); + shape_w[0] = shape_ids.size(); + ctx->SetOutputDim("Out", shape_w); + } +}; + +class LookupSparseTableOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { + auto out_var = scope.FindVar(Output("Out")); + auto w_var = scope.FindVar(Input("W")); + auto ids_var = scope.FindVar(Input("Ids")); + unsigned int seed = static_cast(Attr("seed")); + float min = Attr("min"); + float max = Attr("max"); + + PADDLE_ENFORCE(out_var->IsType(), + "The type of Out var should be LodTensor."); + PADDLE_ENFORCE(w_var->IsType(), + "The type of W var should be SelectedRows."); + PADDLE_ENFORCE(ids_var->IsType(), + "The type of Ids var should be SelectedRows."); + auto &ids_t = ids_var->Get(); + auto out_t = out_var->GetMutable(); + auto w_t = w_var->GetMutable(); + auto keys = ids_t.rows(); + + // TODO(Yancey1989): support CUDA Place for the sparse table + platform::CPUPlace cpu; + auto out_shape = w_t->value().dims(); + out_shape[0] = keys.size(); + out_t->Resize(out_shape); + out_t->mutable_data(cpu, w_t->value().type()); + + PADDLE_ENFORCE_EQ(framework::ToDataType(w_t->value().type()), + framework::proto::VarType::FP32, + "The sparse table only support FP32"); + + auto non_keys_pair = w_t->Get(keys, out_t); + auto value_shape = w_t->value().dims(); + value_shape[0] = 1; + for (const auto &it : non_keys_pair) { + const auto key = it.first; + const auto index = it.second; + framework::Tensor value; + value.Resize(value_shape); + auto data = value.mutable_data(cpu); + + std::minstd_rand engine; + engine.seed(seed); + std::uniform_real_distribution dist(min, max); + int64_t size = value.numel(); + for (int64_t i = 0; i < size; ++i) { + data[i] = dist(engine); + } + w_t->Set(key, value); + memory::Copy(cpu, out_t->mutable_data(cpu) + index * value.numel(), + cpu, value.data(), value.numel() * sizeof(float)); + } + } +}; + +class LookupSparseTableOpMaker : public framework::OpProtoAndCheckerMaker { + public: + LookupSparseTableOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("W", + "(SelectedRows) The input represents embedding table, " + "which is a learnable parameter."); + AddInput("Ids", + "(SelectedRows) Ids's type should be SelectedRows " + "the rows of Ids contains the Ids to be looked up in W."); + AddOutput("Out", + "(SelectedRows) The lookup results, which have the " + "same type as W."); + AddAttr("padding_idx", + "(int64, default -1) " + "If the value is -1, it makes no effect to lookup. " + "Otherwise the given value indicates padding the output " + "with zeros whenever lookup encounters it in Ids.") + .SetDefault(kNoPadding); + AddAttr("min", + "(float, default -1.0) " + "Minimum value of uniform random") + .SetDefault(-1.0f); + AddAttr("max", + "(float, default 1.0) " + "Maximun value of uniform random") + .SetDefault(1.0f); + AddAttr("seed", + "(int, default 0) " + "Random seed used for generating samples. " + "0 means use a seed generated by the system." + "Note that if seed is not 0, this operator will always " + "generate the same random numbers every time.") + .SetDefault(0); + AddComment(R"DOC( +Lookup Sprase Tablel Operator. + +This operator is used to perform lookup on parameter W, +then concatenated into a sparse tensor. + +The type of Ids(Input) is SelectedRows, the rows of Ids contains +the ids to be looked up in W; +if the Id is not in the sparse table, this operator will return a +random value and set the value into the table for the next looking up. + +)DOC"); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(lookup_sparse_table, ops::LookupSparseTableOp, + ops::LookupSparseTableInferShape, + ops::LookupSparseTableOpMaker, + paddle::framework::EmptyGradOpMaker); diff --git a/python/paddle/fluid/tests/unittests/test_lookup_sparse_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_sparse_table_op.py new file mode 100644 index 0000000000..6c339cba83 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_lookup_sparse_table_op.py @@ -0,0 +1,86 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest +import paddle.fluid.core as core +from paddle.fluid.op import Operator + + +def output_hist(out): + hist, _ = np.histogram(out, range=(-5, 10)) + hist = hist.astype("float32") + hist /= float(out.size) + prob = 0.1 * np.ones((10)) + return hist, prob + + +class TestLookupSpraseTable(OpTest): + def check_with_place(self, place): + scope = core.Scope() + + # create and initialize Id Variable + ids = scope.var("Ids").get_selected_rows() + ids_array = [0, 2, 3, 5, 100] + ids.set_rows(ids_array) + + # create and initialize W Variable + rows = [0, 1, 2, 3, 4, 5, 6] + row_numel = 10000 + + w_selected_rows = scope.var('W').get_selected_rows() + w_selected_rows.set_height(len(rows)) + w_selected_rows.set_rows(rows) + w_array = np.ones((len(rows), row_numel)).astype("float32") + for i in range(len(rows)): + w_array[i] *= i + w_tensor = w_selected_rows.get_tensor() + w_tensor.set(w_array, place) + + # create Out Variable + out_tensor = scope.var('Out').get_tensor() + + # create and run lookup_table operator + lookup_table = Operator( + "lookup_sparse_table", + W='W', + Ids='Ids', + Out='Out', + min=-5.0, + max=10.0, + seed=10) + lookup_table.run(scope, place) + + # get result from Out + result_array = np.array(out_tensor) + # all(): return True if all elements of the iterable are true (or if the iterable is empty) + for idx, row in enumerate(ids_array[:-2]): + assert (row == result_array[idx]).all() + + # check the random value + hist, prob = output_hist(result_array[-1]) + self.assertTrue( + np.allclose( + hist, prob, rtol=0, atol=0.01), "hist: " + str(hist)) + + def test_w_is_selected_rows(self): + places = [core.CPUPlace()] + # currently only support CPU + for place in places: + self.check_with_place(place) + + +if __name__ == "__main__": + unittest.main() From 8aea5cac0a007ec5d6bd1746433230cb123fef9d Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Wed, 25 Apr 2018 15:29:50 +0800 Subject: [PATCH 02/50] add attr auto_grown_table --- paddle/fluid/operators/lookup_sparse_table_op.cc | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/lookup_sparse_table_op.cc b/paddle/fluid/operators/lookup_sparse_table_op.cc index 249896993b..88fa59c5fb 100644 --- a/paddle/fluid/operators/lookup_sparse_table_op.cc +++ b/paddle/fluid/operators/lookup_sparse_table_op.cc @@ -49,6 +49,7 @@ class LookupSparseTableOp : public framework::OperatorBase { unsigned int seed = static_cast(Attr("seed")); float min = Attr("min"); float max = Attr("max"); + bool auto_grown_table = Attr("auto_grown_table"); PADDLE_ENFORCE(out_var->IsType(), "The type of Out var should be LodTensor."); @@ -71,8 +72,11 @@ class LookupSparseTableOp : public framework::OperatorBase { PADDLE_ENFORCE_EQ(framework::ToDataType(w_t->value().type()), framework::proto::VarType::FP32, "The sparse table only support FP32"); - auto non_keys_pair = w_t->Get(keys, out_t); + if (!auto_grown_table) { + PADDLE_ENFORCE_EQ(non_keys_pair.size(), static_cast(0), + "there is some keys does exists in the sparse table."); + } auto value_shape = w_t->value().dims(); value_shape[0] = 1; for (const auto &it : non_keys_pair) { @@ -130,6 +134,10 @@ class LookupSparseTableOpMaker : public framework::OpProtoAndCheckerMaker { "Note that if seed is not 0, this operator will always " "generate the same random numbers every time.") .SetDefault(0); + AddAttr("auto_grown_table", + "(bool default false)" + "Whether create new value if for nonexistent key.") + .SetDefault(true); AddComment(R"DOC( Lookup Sprase Tablel Operator. From 82571deb890cc013831b51a996350e65ae76a3df Mon Sep 17 00:00:00 2001 From: yangyaming Date: Thu, 26 Apr 2018 10:25:55 +0800 Subject: [PATCH 03/50] Change `customize_loss_grad` to `use_default_grad_scale`. --- paddle/fluid/framework/parallel_executor.cc | 6 +++--- paddle/fluid/framework/parallel_executor.h | 2 +- paddle/fluid/pybind/pybind.cc | 10 +++++----- python/paddle/fluid/parallel_executor.py | 8 ++++++-- 4 files changed, 15 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index de644e8519..4712efeff6 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -58,7 +58,7 @@ ParallelExecutor::ParallelExecutor( const std::unordered_set &bcast_vars, const ProgramDesc &main_program, const std::string &loss_var_name, Scope *scope, const std::vector &local_scopes, bool allow_op_delay, - bool customize_scale_loss) + bool use_default_grad_scale) : member_(new ParallelExecutorPrivate(places)) { member_->global_scope_ = scope; @@ -93,11 +93,11 @@ ParallelExecutor::ParallelExecutor( #ifdef PADDLE_WITH_CUDA details::MultiDevSSAGraphBuilder builder( member_->places_, loss_var_name, params, member_->local_scopes_, - customize_scale_loss, member_->nccl_ctxs_.get()); + use_default_grad_scale, member_->nccl_ctxs_.get()); #else details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name, params, member_->local_scopes_, - customize_scale_loss); + use_default_grad_scale); #endif auto graph = builder.Build(main_program); diff --git a/paddle/fluid/framework/parallel_executor.h b/paddle/fluid/framework/parallel_executor.h index 49da123d98..ecd107d81f 100644 --- a/paddle/fluid/framework/parallel_executor.h +++ b/paddle/fluid/framework/parallel_executor.h @@ -40,7 +40,7 @@ class ParallelExecutor { const ProgramDesc& main_program, const std::string& loss_var_name, Scope* scope, const std::vector& local_scopes, - bool allow_op_delay, bool customize_scale_loss); + bool allow_op_delay, bool use_default_grad_scale); ~ParallelExecutor(); diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index b20b514fcd..c925686f83 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -502,11 +502,11 @@ All parameter, weight, gradient are variables in Paddle. const std::unordered_set &bcast_vars, const ProgramDesc &main_program, const std::string &loss_var_name, Scope *scope, std::vector &local_scopes, - bool allow_op_delay, bool customize_loss_grad) { - new (&self) ParallelExecutor(num_threads, use_event, places, - params, bcast_vars, main_program, - loss_var_name, scope, local_scopes, - allow_op_delay, customize_loss_grad); + bool allow_op_delay, bool use_default_grad_scale) { + new (&self) ParallelExecutor( + num_threads, use_event, places, params, bcast_vars, + main_program, loss_var_name, scope, local_scopes, + allow_op_delay, use_default_grad_scale); }) .def("bcast_params", &ParallelExecutor::BCastParamsToGPUs) // NOTE: even we return a vec* to Python use reference policy. diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 4adbb2ea99..d57341cfa9 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -30,7 +30,7 @@ class ParallelExecutor(object): num_threads=None, allow_op_delay=False, share_vars_from=None, - customize_loss_grad=False): + use_default_grad_scale=True): """ ParallelExecutor can run program in parallel. @@ -46,6 +46,10 @@ class ParallelExecutor(object): improve performance in some cases, defalut False. share_vars_from(ParallelExecutor, default None): If provied, it will share variables from the specified ParallelExecutor. + use_default_grad_scale(bool, default True): If set True, a default + scale value equal to `1./device_count` would be multiplied to + the gradients. Otherwise, a customized scale value should be + feeded to the network. Returns: A ParallelExecutor object. @@ -124,7 +128,7 @@ class ParallelExecutor(object): scope, local_scopes, allow_op_delay, - customize_loss_grad) + use_default_grad_scale) self.scope = scope def run(self, fetch_list, feed=None, feed_dict=None): From dccd013bd34c9bc63b67fbe0f3af5c72c3758a82 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 26 Apr 2018 11:35:22 +0800 Subject: [PATCH 04/50] refine distribute transpiler --- .../fluid/operators/lookup_sparse_table_op.cc | 11 +++++---- paddle/fluid/operators/sgd_op.cc | 21 +++++++++++++++- paddle/fluid/operators/uniform_random_op.cc | 24 +++++++++++++++++-- python/paddle/fluid/distribute_transpiler.py | 16 +++++++++---- 4 files changed, 61 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/operators/lookup_sparse_table_op.cc b/paddle/fluid/operators/lookup_sparse_table_op.cc index 88fa59c5fb..ff3734b8f0 100644 --- a/paddle/fluid/operators/lookup_sparse_table_op.cc +++ b/paddle/fluid/operators/lookup_sparse_table_op.cc @@ -55,12 +55,16 @@ class LookupSparseTableOp : public framework::OperatorBase { "The type of Out var should be LodTensor."); PADDLE_ENFORCE(w_var->IsType(), "The type of W var should be SelectedRows."); - PADDLE_ENFORCE(ids_var->IsType(), + PADDLE_ENFORCE(ids_var->IsType(), "The type of Ids var should be SelectedRows."); - auto &ids_t = ids_var->Get(); + auto &ids_t = ids_var->Get(); auto out_t = out_var->GetMutable(); auto w_t = w_var->GetMutable(); - auto keys = ids_t.rows(); + std::vector keys; + keys.resize(ids_t.numel()); + for (size_t i = 0; i < ids_t.numel(); ++i) { + keys[i] = ids_t.data()[i]; + } // TODO(Yancey1989): support CUDA Place for the sparse table platform::CPUPlace cpu; @@ -68,7 +72,6 @@ class LookupSparseTableOp : public framework::OperatorBase { out_shape[0] = keys.size(); out_t->Resize(out_shape); out_t->mutable_data(cpu, w_t->value().type()); - PADDLE_ENFORCE_EQ(framework::ToDataType(w_t->value().type()), framework::proto::VarType::FP32, "The sparse table only support FP32"); diff --git a/paddle/fluid/operators/sgd_op.cc b/paddle/fluid/operators/sgd_op.cc index 06cb0550ad..bd04c60ffa 100644 --- a/paddle/fluid/operators/sgd_op.cc +++ b/paddle/fluid/operators/sgd_op.cc @@ -48,6 +48,24 @@ class SGDOp : public framework::OperatorWithKernel { } }; +class SGDOpInferVarType : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc& op_desc, + framework::BlockDesc* block) const override { + auto input_var = op_desc.Input("Param")[0]; + for (auto& out_var : op_desc.Output("ParamOut")) { + if (block->FindRecursiveOrCreateVar(input_var).GetType() == + framework::proto::VarType::SELECTED_ROWS) { + block->FindRecursiveOrCreateVar(out_var).SetType( + framework::proto::VarType::SELECTED_ROWS); + } else { + block->FindRecursiveOrCreateVar(out_var).SetType( + framework::proto::VarType::LOD_TENSOR); + } + } + } +}; + class SGDOpMaker : public framework::OpProtoAndCheckerMaker { public: SGDOpMaker(OpProto* proto, OpAttrChecker* op_checker) @@ -74,5 +92,6 @@ $$param\_out = param - learning\_rate * grad$$ } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_WITHOUT_GRADIENT(sgd, ops::SGDOp, ops::SGDOpMaker); +REGISTER_OPERATOR(sgd, ops::SGDOp, ops::SGDOpMaker, + paddle::framework::EmptyGradOpMaker, ops::SGDOpInferVarType); REGISTER_OP_CPU_KERNEL(sgd, ops::SGDOpKernel, ops::SGDOpKernel); diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index acaefaacda..3b5cf68dd4 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -116,11 +116,31 @@ uniform distribution. .SetDefault(framework::proto::VarType::FP32); } }; + +class UniformRandomOpVarTypeInference : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc& op_desc, + framework::BlockDesc* block) const override { + auto out_var_name = op_desc.Output("Out").front(); + if (block->FindRecursiveOrCreateVar(out_var_name).GetType() == + framework::proto::VarType::SELECTED_ROWS) { + block->FindRecursiveOrCreateVar(out_var_name) + .SetType(framework::proto::VarType::SELECTED_ROWS); + } else { + block->FindRecursiveOrCreateVar(out_var_name) + .SetType(framework::proto::VarType::LOD_TENSOR); + } + } +}; + } // namespace operators } // namespace paddle -REGISTER_OP_WITHOUT_GRADIENT(uniform_random, paddle::operators::UniformRandomOp, - paddle::operators::UniformRandomOpMaker); +REGISTER_OPERATOR(uniform_random, paddle::operators::UniformRandomOp, + paddle::operators::UniformRandomOpMaker, + paddle::framework::EmptyGradOpMaker, + paddle::operators::UniformRandomOpVarTypeInference); + REGISTER_OP_CPU_KERNEL(uniform_random, paddle::operators::CPUUniformRandomKernel, paddle::operators::CPUUniformRandomKernel); diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index d07e0f696e..3e437ef799 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -650,7 +650,7 @@ class DistributeTranspiler: shape=trainer_out.shape, dtype=trainer_out.dtype) prefetch_block.append_op( - type=LOOKUP_TABLE_TYPE, + type="lookup_sparse_table", inputs={'Ids': pserver_ids, "W": table_var}, outputs={"Out": pserver_out}, @@ -674,9 +674,17 @@ class DistributeTranspiler: # STEP: create table optimize block # create table param and grad var in pserver program - param_var = _clone_var( - pserver_program.global_block(), - self.origin_program.global_block().vars[self.table_name]) + #param_var = _clone_var( + # pserver_program.global_block(), + # self.origin_program.global_block().vars[self.table_name]) + origin_param_var = self.origin_program.global_block().vars[ + self.table_name] + param_var = pserver_program.global_block().create_var( + name=origin_param_var.name, + shape=origin_param_var.shape, + dtype=origin_param_var.dtype, + type=core.VarDesc.VarType.SELECTED_ROWS, + persistable=True) grad_var = _clone_var( pserver_program.global_block(), self.origin_program.global_block().vars[framework.grad_var_name( From fb1167c31524509d3186ae56cc908ed4188bf4f2 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 26 Apr 2018 11:37:13 +0800 Subject: [PATCH 05/50] delete unused comment --- python/paddle/fluid/distribute_transpiler.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index 3e437ef799..779edff7c4 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -674,9 +674,6 @@ class DistributeTranspiler: # STEP: create table optimize block # create table param and grad var in pserver program - #param_var = _clone_var( - # pserver_program.global_block(), - # self.origin_program.global_block().vars[self.table_name]) origin_param_var = self.origin_program.global_block().vars[ self.table_name] param_var = pserver_program.global_block().create_var( From bf824d854adff4bbf2a527a1221af327916d7efd Mon Sep 17 00:00:00 2001 From: yangyaming Date: Thu, 26 Apr 2018 11:55:28 +0800 Subject: [PATCH 06/50] Refine doc. --- python/paddle/fluid/parallel_executor.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index d57341cfa9..f4128dcbe9 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -48,8 +48,9 @@ class ParallelExecutor(object): it will share variables from the specified ParallelExecutor. use_default_grad_scale(bool, default True): If set True, a default scale value equal to `1./device_count` would be multiplied to - the gradients. Otherwise, a customized scale value should be - feeded to the network. + gradients of each device and scaled gradients would be + aggregated. Otherwise, a customized scale value should be fed + to the network. Returns: A ParallelExecutor object. From 0d491b670ae380814030d661603cf7d5b547b4a5 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 26 Apr 2018 15:02:49 +0800 Subject: [PATCH 07/50] use-multi-thread-todo-update --- paddle/fluid/operators/listen_and_serv_op.cc | 68 +++++++++++--------- paddle/fluid/operators/listen_and_serv_op.h | 4 +- 2 files changed, 40 insertions(+), 32 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index ba2ea0d13e..616a89a413 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -45,20 +45,6 @@ static void split(const std::string &str, char sep, } } -static void AsyncExecuteBlock(framework::Executor *executor, - framework::ExecutorPrepareContext *prepared, - framework::Scope *scope) { - std::future future = framework::Async([&executor, &prepared, &scope]() { - try { - executor->RunPreparedContext(prepared, scope, false, false); - } catch (std::exception &e) { - LOG(ERROR) << "run sub program error " << e.what(); - } - }); - // TODO(qiao) maybe we can remove this - future.wait(); -} - static void ParallelExecuteBlocks( const std::vector ¶llel_blkids, framework::Executor *executor, const std::vector> @@ -201,14 +187,35 @@ void ListenAndServOp::RunSyncLoop(framework::Executor *executor, } // while(true) } +static void AsyncUpdateThread( + const bool &exit_flag, const std::shared_ptr &queue, + framework::Executor *executor, + framework::ExecutorPrepareContext *prepared) { + while (!exit_flag) { + const detail::ReceivedMessage v = queue->Pop(); + auto recv_var_name = v.first; + auto var = v.second->GetVar(); + if (var == nullptr) { + LOG(ERROR) << "Can not find server side var: " << recv_var_name; + PADDLE_THROW("Can not find server side var"); + } + try { + executor->RunPreparedContext(prepared, v.second->GetMutableLocalScope(), + false, false); + } catch (std::exception &e) { + LOG(ERROR) << "run sub program error " << e.what(); + } + } +} + void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, - framework::ProgramDesc *program, - framework::Scope *recv_scope, - framework::BlockDesc *prefetch_block) const { + framework::ProgramDesc *program) const { VLOG(3) << "RunAsyncLoop in"; // grad name to block id std::unordered_map grad_to_block_id; std::unordered_map id_to_grad; + std::unordered_map> + grad_to_queue; auto grad_to_block_id_str = Attr>("grad_to_block_id"); @@ -220,6 +227,7 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, PADDLE_ENFORCE_EQ(grad_to_block_id.count(pieces[0]), 0); int block_id = std::stoi(pieces[1]); grad_to_block_id[pieces[0]] = block_id; + grad_to_queue[pieces[0]] = std::make_shared(); id_to_grad[block_id] = pieces[0]; } size_t num_blocks = program->Size(); @@ -240,6 +248,18 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, VLOG(3) << "RunAsyncLoop into while"; bool exit_flag = false; + + VLOG(3) << "start async optimize threads"; + std::vector> fs; + for (auto iter = grad_to_queue.begin(); iter != grad_to_queue.end(); iter++) { + std::string grad_name = iter->first; + fs.push_back(framework::Async([grad_name, &exit_flag, &executor, + &grad_to_queue, &grad_to_prepared_block]() { + AsyncUpdateThread(exit_flag, grad_to_queue[grad_name], executor, + grad_to_prepared_block[grad_name].get()); + })); + } + while (!exit_flag) { const detail::ReceivedMessage v = rpc_service_->Get(); auto recv_var_name = v.first; @@ -249,17 +269,7 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, break; } else { VLOG(3) << "received grad: " << recv_var_name; - auto var = v.second->GetVar(); - if (var == nullptr) { - LOG(ERROR) << "Can not find server side var: " << recv_var_name; - PADDLE_THROW("Can not find server side var"); - } - AsyncExecuteBlock(executor, grad_to_prepared_block[recv_var_name].get(), - v.second->GetMutableLocalScope()); - // TODO(qiao): explain why - if (var->IsType()) { - var->GetMutable()->mutable_rows()->clear(); - } + grad_to_queue[recv_var_name]->Push(v); } if (exit_flag) { @@ -308,7 +318,7 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, if (sync_mode) { RunSyncLoop(&executor, program, &recv_scope, prefetch_block); } else { - RunAsyncLoop(&executor, program, &recv_scope, prefetch_block); + RunAsyncLoop(&executor, program); } } diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h index 3cc0f30477..5c8fc31c97 100644 --- a/paddle/fluid/operators/listen_and_serv_op.h +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -47,9 +47,7 @@ class ListenAndServOp : public framework::OperatorBase { framework::BlockDesc* prefetch_block) const; void RunAsyncLoop(framework::Executor* executor, - framework::ProgramDesc* program, - framework::Scope* recv_scope, - framework::BlockDesc* prefetch_block) const; + framework::ProgramDesc* program) const; void Stop() override; From 08fbab9429528a5c25d9fa75b311f90da98b916f Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 26 Apr 2018 19:27:53 +0800 Subject: [PATCH 08/50] complete ListenAndServ layer in layers/io.py --- python/paddle/fluid/layers/io.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index cc71c2136a..e8eff33bd1 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -168,7 +168,9 @@ class ListenAndServ(object): 'endpoint': self.endpoint, 'Fanin': self.fan_in, 'OptimizeBlock': current_block, - 'PrefetchBlock': empty_block + 'PrefetchBlock': empty_block, + 'sync_mode': True, # did not support async now in layers + 'grad_to_block_id': [] }) From 98a2179f80f587ab50d12f0bb41b8110306906ba Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 27 Apr 2018 13:45:01 +0800 Subject: [PATCH 09/50] fix ListenAndServ op --- python/paddle/fluid/layers/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index e8eff33bd1..acfad45704 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -170,7 +170,7 @@ class ListenAndServ(object): 'OptimizeBlock': current_block, 'PrefetchBlock': empty_block, 'sync_mode': True, # did not support async now in layers - 'grad_to_block_id': [] + 'grad_to_block_id': [""] }) From 1a93253f1665e7d85b11048c04df44c4753a46b3 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Fri, 27 Apr 2018 13:54:38 +0800 Subject: [PATCH 10/50] fix unittest --- paddle/fluid/operators/lookup_sparse_table_op.cc | 8 ++++---- .../fluid/tests/unittests/test_lookup_sparse_table_op.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/operators/lookup_sparse_table_op.cc b/paddle/fluid/operators/lookup_sparse_table_op.cc index ff3734b8f0..f1839e456d 100644 --- a/paddle/fluid/operators/lookup_sparse_table_op.cc +++ b/paddle/fluid/operators/lookup_sparse_table_op.cc @@ -56,7 +56,7 @@ class LookupSparseTableOp : public framework::OperatorBase { PADDLE_ENFORCE(w_var->IsType(), "The type of W var should be SelectedRows."); PADDLE_ENFORCE(ids_var->IsType(), - "The type of Ids var should be SelectedRows."); + "The type of Ids var should be LoDTensor."); auto &ids_t = ids_var->Get(); auto out_t = out_var->GetMutable(); auto w_t = w_var->GetMutable(); @@ -111,10 +111,10 @@ class LookupSparseTableOpMaker : public framework::OpProtoAndCheckerMaker { "(SelectedRows) The input represents embedding table, " "which is a learnable parameter."); AddInput("Ids", - "(SelectedRows) Ids's type should be SelectedRows " - "the rows of Ids contains the Ids to be looked up in W."); + "(LoDTensor) Ids's type should be LoDTensor" + "THe ids to be looked up in W."); AddOutput("Out", - "(SelectedRows) The lookup results, which have the " + "(LoDTensor) The lookup results, which have the " "same type as W."); AddAttr("padding_idx", "(int64, default -1) " diff --git a/python/paddle/fluid/tests/unittests/test_lookup_sparse_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_sparse_table_op.py index 6c339cba83..aa9eae1e88 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_sparse_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_sparse_table_op.py @@ -32,9 +32,9 @@ class TestLookupSpraseTable(OpTest): scope = core.Scope() # create and initialize Id Variable - ids = scope.var("Ids").get_selected_rows() - ids_array = [0, 2, 3, 5, 100] - ids.set_rows(ids_array) + ids = scope.var("Ids").get_tensor() + ids_array = np.array([0, 2, 3, 5, 100]).astype("int64") + ids.set(ids_array, place) # create and initialize W Variable rows = [0, 1, 2, 3, 4, 5, 6] From f82cb635cf0c8554d6e0ca0502e76c4f89ea23f5 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 27 Apr 2018 17:57:20 +0800 Subject: [PATCH 11/50] optimize code, add more log --- paddle/fluid/operators/detail/grpc_server.cc | 2 +- paddle/fluid/operators/listen_and_serv_op.cc | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 95f4738b4f..25c6bb9ee2 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -336,9 +336,9 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, switch (base->Status()) { case PROCESS: { - VLOG(4) << cq_name << " PROCESS status:" << base->Status(); TryToRegisterNewOne(); base->Process(); + VLOG(4) << cq_name << " PROCESS status:" << base->Status(); break; } case FINISH: { diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 88b07d865f..36bd1d3993 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -188,9 +188,11 @@ void ListenAndServOp::RunSyncLoop(framework::Executor *executor, } static void AsyncUpdateThread( - const bool &exit_flag, const std::shared_ptr &queue, + const std::string &var_name, const bool &exit_flag, + const std::shared_ptr &queue, framework::Executor *executor, framework::ExecutorPrepareContext *prepared) { + VLOG(3) << "update thread for " << var_name << " started"; while (!exit_flag) { const detail::ReceivedMessage v = queue->Pop(); auto recv_var_name = v.first; @@ -206,6 +208,7 @@ static void AsyncUpdateThread( LOG(ERROR) << "run sub program error " << e.what(); } } + VLOG(3) << "update thread for " << var_name << " ended"; } void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, @@ -246,7 +249,6 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, grad_to_prepared_ctx[id_to_grad[block_list[i]]] = optimize_prepared[i]; } - VLOG(3) << "RunAsyncLoop into while"; bool exit_flag = false; VLOG(3) << "start async optimize threads"; @@ -255,11 +257,12 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, std::string grad_name = iter->first; fs.push_back(framework::Async([grad_name, &exit_flag, &executor, &grad_to_queue, &grad_to_prepared_ctx]() { - AsyncUpdateThread(exit_flag, grad_to_queue[grad_name], executor, - grad_to_prepared_ctx[grad_name].get()); + AsyncUpdateThread(grad_name, exit_flag, grad_to_queue[grad_name], + executor, grad_to_prepared_ctx[grad_name].get()); })); } + VLOG(3) << "RunAsyncLoop into while"; while (!exit_flag) { const detail::ReceivedMessage v = rpc_service_->Get(); auto recv_var_name = v.first; From 6c88f1ae6e2a517492280a3e355dba6f0fdb10b6 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Fri, 27 Apr 2018 10:14:49 -0700 Subject: [PATCH 12/50] add save op float16 support --- paddle/fluid/operators/save_load_op_test.cc | 32 +++++++++++++++++++++ paddle/fluid/operators/save_op.cc | 21 +++++++++++++- 2 files changed, 52 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/save_load_op_test.cc b/paddle/fluid/operators/save_load_op_test.cc index a7ba1e0ae1..0cfb7fb730 100644 --- a/paddle/fluid/operators/save_load_op_test.cc +++ b/paddle/fluid/operators/save_load_op_test.cc @@ -61,3 +61,35 @@ TEST(SaveLoadOp, CPU) { } } } + +TEST(SaveLoadFP16Op, CPU) { + paddle::framework::Scope scope; + paddle::platform::CPUPlace place; + + auto var = scope.Var("test_var"); + auto tensor = var->GetMutable(); + tensor->Resize({3, 10}); + + float* expect = tensor->mutable_data(place); + for (int64_t i = 0; i < tensor->numel(); ++i) { + expect[i] = static_cast(i); + } + + paddle::framework::AttributeMap attrs; + attrs.insert({"file_path", std::string("tensor.save")}); + attrs.insert({"save_as_fp16", true}); + + auto save_op = paddle::framework::OpRegistry::CreateOp( + "save", {{"X", {"test_var"}}}, {}, attrs); + save_op->Run(scope, place); + + auto load_var = scope.Var("out_var"); + auto target = load_var->GetMutable(); + auto load_op = paddle::framework::OpRegistry::CreateOp( + "load", {}, {{"Out", {"out_var"}}}, attrs); + load_op->Run(scope, place); + paddle::platform::float16* actual = target->data(); + for (int64_t i = 0; i < tensor->numel(); ++i) { + EXPECT_EQ(expect[i], static_cast(actual[i])); + } +} diff --git a/paddle/fluid/operators/save_op.cc b/paddle/fluid/operators/save_op.cc index 4a715c4baa..f45d07ed90 100644 --- a/paddle/fluid/operators/save_op.cc +++ b/paddle/fluid/operators/save_op.cc @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" @@ -68,6 +69,7 @@ class SaveOp : public framework::OperatorBase { const platform::Place &place) const override { auto filename = Attr("file_path"); auto overwrite = Attr("overwrite"); + auto save_as_fp16 = Attr("save_as_fp16"); if (FileExists(filename) && !overwrite) { PADDLE_THROW("%s is existed, cannot save to it when overwrite=false", @@ -96,7 +98,18 @@ class SaveOp : public framework::OperatorBase { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(place); - framework::SerializeToStream(fout, tensor, dev_ctx); + auto in_dtype = framework::ToDataType(tensor.type()); + auto out_dtype = save_as_fp16 ? framework::proto::VarType::FP16 : in_dtype; + + if (in_dtype != out_dtype) { + auto in_kernel_type = framework::OpKernelType(in_dtype, place); + auto out_kernel_type = framework::OpKernelType(out_dtype, place); + framework::LoDTensor out; + framework::TransDataType(in_kernel_type, out_kernel_type, tensor, &out); + framework::SerializeToStream(fout, out, dev_ctx); + } else { + framework::SerializeToStream(fout, tensor, dev_ctx); + } } }; @@ -114,6 +127,12 @@ This operator will serialize and write a tensor variable to file on disk. "(boolean, default true)" "Overwrite the output file if exist") .SetDefault(true); + AddAttr("save_as_fp16", + "(boolean, default false)" + "If true, the tensor will be converted to float16 data " + "type and then saved. Otherwise, the tensor will be " + "directly saved without data type conversion.") + .SetDefault(false); AddAttr("file_path", "(string)" "The \"file_path\" where the variable will be saved.") From efba1c7dcb46ededcf1e2b70b3955b1e83a02359 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Fri, 27 Apr 2018 11:48:50 -0700 Subject: [PATCH 13/50] address comments --- paddle/fluid/operators/save_load_op_test.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/save_load_op_test.cc b/paddle/fluid/operators/save_load_op_test.cc index 0cfb7fb730..74385ee475 100644 --- a/paddle/fluid/operators/save_load_op_test.cc +++ b/paddle/fluid/operators/save_load_op_test.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "gtest/gtest.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/float16.h" USE_NO_KERNEL_OP(save); USE_NO_KERNEL_OP(load); @@ -72,7 +73,7 @@ TEST(SaveLoadFP16Op, CPU) { float* expect = tensor->mutable_data(place); for (int64_t i = 0; i < tensor->numel(); ++i) { - expect[i] = static_cast(i); + expect[i] = static_cast(paddle::platform::float16(i)); } paddle::framework::AttributeMap attrs; From 3948b58b6e1637c7009f56ebc67e3d17577764ed Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 28 Apr 2018 11:04:56 +0800 Subject: [PATCH 14/50] Add unittest of cross entropy. It is not stable on CUDA --- .../tests/unittests/test_cross_entropy_op.py | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py index c5b9e92d69..c8e5bd1a8f 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py @@ -15,6 +15,7 @@ import unittest import numpy as np from op_test import OpTest, randomize_probability +import paddle.fluid as fluid class TestCrossEntropyOp1(OpTest): @@ -105,5 +106,60 @@ class TestCrossEntropyOp3(OpTest): ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) +class TestCrossEntropyStable(unittest.TestCase): + def main(self, place): + if isinstance( + place, + fluid.CUDAPlace) and not fluid.core.is_compiled_with_cuda(): + return + + class DataRandom(object): + def __init__(self): + self.random = np.random.RandomState(seed=1) + + def next(self): + return { + 'input': self.random.uniform( + low=-1, high=1, size=(64, 200)).astype('float32'), + 'label': self.random.uniform( + low=0, high=10000, size=(64, 1)).astype('int64'), + } + + losses = [] + for _ in xrange(2): + startup = fluid.Program() + startup.random_seed = 1 + main = fluid.Program() + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(main, startup): + img = fluid.layers.data('input', shape=[200]) + label = fluid.layers.data('label', shape=[1], dtype='int64') + prediction = fluid.layers.fc(input=img, + size=10000, + act='softmax') + xe = fluid.layers.cross_entropy( + input=prediction, label=label) + loss = fluid.layers.mean(xe) + adam = fluid.optimizer.Adam() + adam.minimize(loss) + + exe = fluid.Executor(place) + exe.run(startup) + data = DataRandom() + for i in xrange(1000): + exe.run(feed=next(data)) + losses.append( + exe.run(feed=next(data), fetch_list=[loss])[0]) + print losses + self.assertAlmostEqual(losses[0][0], losses[1][0]) + + def test_cpu(self): + self.main(fluid.CPUPlace()) + + def test_cuda(self): + self.main(fluid.CUDAPlace(0)) + + if __name__ == "__main__": unittest.main() From 76174ec0e9e81ebb049663dc9abf534a241dc143 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 28 Apr 2018 12:49:38 +0800 Subject: [PATCH 15/50] Clean cross entropy and add sync in executor --- paddle/fluid/framework/executor.cc | 3 + paddle/fluid/operators/cross_entropy_op.cc | 10 +- paddle/fluid/operators/cross_entropy_op.cu | 99 +-------------- paddle/fluid/operators/cross_entropy_op.h | 117 ++++++++++++------ .../tests/unittests/test_cross_entropy_op.py | 55 -------- 5 files changed, 92 insertions(+), 192 deletions(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 766bf0ab0c..b719568c65 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -348,6 +348,9 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, } } } + + platform::DeviceContextPool::Instance().Get(place_)->Wait(); + if (create_vars && create_local_scope) { scope->DeleteScope(local_scope); } diff --git a/paddle/fluid/operators/cross_entropy_op.cc b/paddle/fluid/operators/cross_entropy_op.cc index 0e0622e290..2b2a9dc831 100644 --- a/paddle/fluid/operators/cross_entropy_op.cc +++ b/paddle/fluid/operators/cross_entropy_op.cc @@ -164,11 +164,13 @@ or not. But the output only shares the LoD information with input X. } // namespace paddle namespace ops = paddle::operators; +using CPUCtx = paddle::platform::CPUDeviceContext; + REGISTER_OPERATOR(cross_entropy, ops::CrossEntropyOp, ops::CrossEntropyOpMaker, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(cross_entropy_grad, ops::CrossEntropyGradientOp); -REGISTER_OP_CPU_KERNEL(cross_entropy, ops::CrossEntropyOpKernel, - ops::CrossEntropyOpKernel); +REGISTER_OP_CPU_KERNEL(cross_entropy, ops::CrossEntropyOpKernel, + ops::CrossEntropyOpKernel); REGISTER_OP_CPU_KERNEL(cross_entropy_grad, - ops::CrossEntropyGradientOpKernel, - ops::CrossEntropyGradientOpKernel); + ops::CrossEntropyGradientOpKernel, + ops::CrossEntropyGradientOpKernel); diff --git a/paddle/fluid/operators/cross_entropy_op.cu b/paddle/fluid/operators/cross_entropy_op.cu index 6449149d4b..30dbd5bd3d 100644 --- a/paddle/fluid/operators/cross_entropy_op.cu +++ b/paddle/fluid/operators/cross_entropy_op.cu @@ -14,98 +14,11 @@ limitations under the License. */ #include "paddle/fluid/operators/cross_entropy_op.h" -namespace paddle { -namespace operators { - -namespace { - -template -__global__ void CrossEntropyGradientKernel(T* dX, const T* dY, const T* X, - const int64_t* label, const int N, - const int D) { - for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; - i += blockDim.x * gridDim.x) { - int idx = i * D + label[i]; - dX[idx] = -dY[i] / X[idx]; - } -} - -template -__global__ void SoftCrossEntropyGradientKernel(T* dX, const T* dY, const T* X, - const T* label, const int N, - const int D) { - int ids = blockIdx.x * blockDim.x + threadIdx.x; - if (ids < N * D) { - int row_ids = ids / D; - dX[ids] = -label[ids] * dY[row_ids] / X[ids]; - } -} -} // namespace - -template -class CrossEntropyOpCUDAKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "This kernel only runs on GPU device."); - const Tensor* x = ctx.Input("X"); - const Tensor* label = ctx.Input("Label"); - Tensor* y = ctx.Output("Y"); - y->mutable_data(ctx.GetPlace()); - - math::CrossEntropyFunctor()( - ctx.template device_context(), y, x, label, - ctx.Attr("soft_label")); - } -}; - -template -class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "This kernel only runs on GPU device."); - - const Tensor* x = ctx.Input("X"); - const Tensor* label = ctx.Input("Label"); - Tensor* dx = ctx.Output(framework::GradVarName("X")); - dx->mutable_data(ctx.GetPlace()); - - const T* dy_data = - ctx.Input(framework::GradVarName("Y"))->data(); - T* dx_data = dx->mutable_data(ctx.GetPlace()); - const T* x_data = x->data(); - - int64_t batch_size = x->dims()[0]; - int64_t class_num = x->dims()[1]; - - int block = 512; - int grid = (batch_size * class_num + block - 1) / block; - - auto& dev_ctx = ctx.template device_context(); - auto stream = dev_ctx.stream(); - - if (ctx.Attr("soft_label")) { - auto* label_data = label->data(); - SoftCrossEntropyGradientKernel<<>>( - dx_data, dy_data, x_data, label_data, batch_size, class_num); - } else { - math::SetConstant functor; - functor(dev_ctx, dx, 0); - auto* label_data = label->data(); - grid = (batch_size + block - 1) / block; - CrossEntropyGradientKernel<<>>( - dx_data, dy_data, x_data, label_data, batch_size, class_num); - } - } -}; - -} // namespace operators -} // namespace paddle - namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(cross_entropy, ops::CrossEntropyOpCUDAKernel, - ops::CrossEntropyOpCUDAKernel); +using CUDACtx = paddle::platform::CUDADeviceContext; +REGISTER_OP_CUDA_KERNEL(cross_entropy, + ops::CrossEntropyOpKernel, + ops::CrossEntropyOpKernel); REGISTER_OP_CUDA_KERNEL(cross_entropy_grad, - ops::CrossEntropyGradientOpCUDAKernel, - ops::CrossEntropyGradientOpCUDAKernel); + ops::CrossEntropyGradientOpKernel, + ops::CrossEntropyGradientOpKernel); diff --git a/paddle/fluid/operators/cross_entropy_op.h b/paddle/fluid/operators/cross_entropy_op.h index 6da3a24dc8..822a83712d 100644 --- a/paddle/fluid/operators/cross_entropy_op.h +++ b/paddle/fluid/operators/cross_entropy_op.h @@ -17,69 +17,106 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; -template -using EigenMatrix = framework::EigenMatrix; -template +template class CrossEntropyOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), - "This kernel only runs on CPU."); - const Tensor* x = ctx.Input("X"); - const Tensor* labels = ctx.Input("Label"); - Tensor* y = ctx.Output("Y"); + auto* x = ctx.Input("X"); + auto* labels = ctx.Input("Label"); + auto* y = ctx.Output("Y"); y->mutable_data(ctx.GetPlace()); - math::CrossEntropyFunctor()( - ctx.template device_context(), y, x, labels, + math::CrossEntropyFunctor()( + ctx.template device_context(), y, x, labels, ctx.Attr("soft_label")); } }; template +class XeSoftlabelGradFunctor { + public: + XeSoftlabelGradFunctor(T* dx, + const T* dy, // NOLINT + const T* x, // NOLINT + const T* label, // NOLINT + size_t num_classes) + : dx_(dx), dy_(dy), x_(x), label_(label), num_classes_(num_classes) {} + + HOSTDEVICE void operator()(size_t i) { + auto row_ids = i / num_classes_; + dx_[i] = -label_[i] * dy_[row_ids] / x_[i]; + } + + private: + T* dx_; + const T* dy_; + const T* x_; + const T* label_; + size_t num_classes_; +}; + +template +class XeGradFunctor { + public: + XeGradFunctor(T* dx, + const T* dy, // NOLINT + const T* x, // NOLINT + const int64_t* label, // NOLINT + size_t num_classes) + : dx_(dx), dy_(dy), x_(x), label_(label), num_classes_(num_classes) {} + + HOSTDEVICE void operator()(size_t label_id) { + auto x_is_true_offset = label_id * num_classes_ + label_[label_id]; + for (size_t x_offset = label_id * num_classes_; + x_offset < (label_id + 1) * num_classes_; ++x_offset) { + dx_[x_offset] = x_offset != x_is_true_offset + ? static_cast(0) + : -dy_[label_id] / x_[x_offset]; + } + } + + private: + T* dx_; + const T* dy_; + const T* x_; + const int64_t* label_; + size_t num_classes_; +}; + +template class CrossEntropyGradientOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), - "This kernel only runs on CPU."); - const Tensor* x = ctx.Input("X"); - const Tensor* dy = ctx.Input(framework::GradVarName("Y")); - const Tensor* label = ctx.Input("Label"); - Tensor* dx = ctx.Output(framework::GradVarName("X")); - T* dx_data = dx->mutable_data(ctx.GetPlace()); + auto* x = ctx.Input("X"); + auto* dy = ctx.Input(framework::GradVarName("Y")); + auto* label = ctx.Input("Label"); + auto* dx = ctx.Output(framework::GradVarName("X")); + auto* dx_data = dx->mutable_data(ctx.GetPlace()); int64_t class_num = x->dims()[1]; if (ctx.Attr("soft_label")) { - auto x_mat = EigenMatrix::From(*x); - auto dy_mat = EigenMatrix::From(*dy); - auto lbl_mat = EigenMatrix::From(*label); - auto dx_mat = EigenMatrix::From(*dx); - - dx_mat.device(*ctx.template device_context() - .eigen_device()) = - -(lbl_mat * - dy_mat.broadcast(Eigen::DSizes(1, class_num)) / x_mat); + XeSoftlabelGradFunctor functor(dx_data, dy->data(), x->data(), + label->data(), + static_cast(class_num)); + platform::ForRange for_range( + ctx.template device_context(), + static_cast(dx->numel())); + for_range(functor); } else { - int64_t batch_size = x->dims()[0]; - const T* dy_data = dy->data(); - const T* x_data = x->data(); - const int64_t* label_data = label->data(); - - math::SetConstant functor; - functor(ctx.template device_context(), dx, 0); - - for (int64_t i = 0; i < batch_size; ++i) { - PADDLE_ASSERT(label_data[i] >= 0 || label_data[i] < class_num); - int64_t index = i * class_num + label_data[i]; - dx_data[index] = math::TolerableValue()(-dy_data[i] / x_data[index]); - } + XeGradFunctor functor(dx_data, dy->data(), x->data(), + label->data(), + static_cast(class_num)); + platform::ForRange for_range( + ctx.template device_context(), + static_cast(dy->numel())); + for_range(functor); } } }; diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py index c8e5bd1a8f..25dde7b334 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py @@ -106,60 +106,5 @@ class TestCrossEntropyOp3(OpTest): ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) -class TestCrossEntropyStable(unittest.TestCase): - def main(self, place): - if isinstance( - place, - fluid.CUDAPlace) and not fluid.core.is_compiled_with_cuda(): - return - - class DataRandom(object): - def __init__(self): - self.random = np.random.RandomState(seed=1) - - def next(self): - return { - 'input': self.random.uniform( - low=-1, high=1, size=(64, 200)).astype('float32'), - 'label': self.random.uniform( - low=0, high=10000, size=(64, 1)).astype('int64'), - } - - losses = [] - for _ in xrange(2): - startup = fluid.Program() - startup.random_seed = 1 - main = fluid.Program() - scope = fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(main, startup): - img = fluid.layers.data('input', shape=[200]) - label = fluid.layers.data('label', shape=[1], dtype='int64') - prediction = fluid.layers.fc(input=img, - size=10000, - act='softmax') - xe = fluid.layers.cross_entropy( - input=prediction, label=label) - loss = fluid.layers.mean(xe) - adam = fluid.optimizer.Adam() - adam.minimize(loss) - - exe = fluid.Executor(place) - exe.run(startup) - data = DataRandom() - for i in xrange(1000): - exe.run(feed=next(data)) - losses.append( - exe.run(feed=next(data), fetch_list=[loss])[0]) - print losses - self.assertAlmostEqual(losses[0][0], losses[1][0]) - - def test_cpu(self): - self.main(fluid.CPUPlace()) - - def test_cuda(self): - self.main(fluid.CUDAPlace(0)) - - if __name__ == "__main__": unittest.main() From 6c184104873f2e6137434c148f51a9f8f94b6ada Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 28 Apr 2018 12:55:53 +0800 Subject: [PATCH 16/50] Revert code to develop --- paddle/fluid/framework/executor.cc | 3 --- python/paddle/fluid/tests/unittests/test_cross_entropy_op.py | 1 - 2 files changed, 4 deletions(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index b719568c65..766bf0ab0c 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -348,9 +348,6 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, } } } - - platform::DeviceContextPool::Instance().Get(place_)->Wait(); - if (create_vars && create_local_scope) { scope->DeleteScope(local_scope); } diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py index 25dde7b334..c5b9e92d69 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py @@ -15,7 +15,6 @@ import unittest import numpy as np from op_test import OpTest, randomize_probability -import paddle.fluid as fluid class TestCrossEntropyOp1(OpTest): From c0ac0cd6b37732690168e8fa311242fd25f095bd Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sat, 28 Apr 2018 14:07:00 +0800 Subject: [PATCH 17/50] Complete rename --- .../framework/details/multi_devices_graph_builder.cc | 8 ++++---- .../fluid/framework/details/multi_devices_graph_builder.h | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index c2eb1c31b4..725dc57b04 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -45,7 +45,7 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( const std::vector &places, const std::string &loss_var_name, const std::unordered_set ¶ms, - const std::vector &local_scopes, bool skip_scale_loss) + const std::vector &local_scopes, bool use_default_grad_scale) : loss_var_name_(loss_var_name), places_(places), local_scopes_(local_scopes) { @@ -53,7 +53,7 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( for (auto &p : params) { grad_names_.insert(GradVarName(p)); } - skip_scale_loss_ = skip_scale_loss; + use_default_grad_scale_ = use_default_grad_scale; } void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result, @@ -126,8 +126,8 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( } else if (IsDistTrainOp(*op, send_op)) { CreateComputationalOps(&result, *op, 1); } else if (IsScaleLossOp(*op)) { - // user can customize loss@grad if skip_scale_loss_ - if (!skip_scale_loss_) { + // user can customize loss@grad if not use_default_grad_scale_ + if (use_default_grad_scale_) { CreateScaleLossGradOp(&result); } is_forwarding = false; diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.h b/paddle/fluid/framework/details/multi_devices_graph_builder.h index fa4d31bdc4..bad47458ef 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.h +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.h @@ -41,7 +41,7 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { const std::string &loss_var_name, const std::unordered_set ¶ms, const std::vector &local_scopes, - bool skip_scale_loss); + bool use_default_grad_scale); #endif std::unique_ptr Build(const ProgramDesc &program) const override; @@ -59,7 +59,7 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { #ifdef PADDLE_WITH_CUDA platform::NCCLContextMap *nccl_ctxs_; #endif - bool skip_scale_loss_; + bool use_default_grad_scale_; bool IsScaleLossOp(const OpDesc &op) const; From 13fac4232a3d24c39babbe396ba71fe9186f1fe5 Mon Sep 17 00:00:00 2001 From: yangyaming Date: Sat, 28 Apr 2018 16:40:05 +0800 Subject: [PATCH 18/50] Fix to pass CI. --- paddle/fluid/framework/details/multi_devices_graph_builder.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index 725dc57b04..daba9bf2dd 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -34,7 +34,7 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( const std::vector &places, const std::string &loss_var_name, const std::unordered_set ¶ms, - const std::vector &local_scopes, bool skip_scale_loss, + const std::vector &local_scopes, bool use_default_grad_scale, platform::NCCLContextMap *nccl_ctxs) : loss_var_name_(loss_var_name), places_(places), From ef48f3c7665b1142fc04dbfeb6aee04ebf4c2c45 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Sat, 28 Apr 2018 17:43:30 +0800 Subject: [PATCH 19/50] wip --- paddle/fluid/operators/detail/grpc_server.cc | 6 ++++++ paddle/fluid/operators/detail/grpc_server.h | 7 ++++++- paddle/fluid/operators/listen_and_serv_op.cc | 19 ++++++++++++++++++- paddle/fluid/operators/listen_and_serv_op.h | 6 ++++++ 4 files changed, 36 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 95f4738b4f..92819ff958 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -241,6 +241,12 @@ void AsyncGRPCServer::RunSyncUpdate() { t_prefetch_.reset(new std::thread( std::bind(&AsyncGRPCServer::HandleRequest, this, cq_prefetch_.get(), "cq_prefetch", prefetch_register))); + + { + std::lock_guard lock(this->mutex_ready_); + ready_ = 1; + } + condition_ready_.notify_all(); // wait server server_->Wait(); t_send_->join(); diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index 99b87b8c6c..d7c06fc181 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -45,8 +45,9 @@ class RequestBase; class AsyncGRPCServer final { public: explicit AsyncGRPCServer(const std::string &address, bool sync_mode) - : address_(address), sync_mode_(sync_mode) {} + : address_(address), sync_mode_(sync_mode), ready_(0) {} + bool WaitServerReady(); void RunSyncUpdate(); // functions to sync server barrier status. @@ -118,6 +119,10 @@ class AsyncGRPCServer final { framework::ProgramDesc *program_; framework::Executor *executor_; int selected_port_; + + std::mutext mutex_ready_; + std::condition_variable condition_ready_; + int ready_; }; }; // namespace detail diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 57cff680ab..0a4b6a08e5 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -265,6 +265,23 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, } // while(true) } +void ListenAndServOp::StartServerThread() { + server_thread_.reset(new std::thread( + std::bind(&ListenAndServOp::ServerThreadEntry, this, rpc_service_))); +} + +void ListenAndServOp::ServerThreadEntry( + std::shared_ptr service) { + service->RunSyncUpdate(); + VLOG(4) << "RunServer thread end"; + + { + std::lock_guard lock(this->barrier_mutex_); + barrier_cond_step_ = cond; + } + barrier_condition_.notify_all(); +} + void ListenAndServOp::RunImpl(const framework::Scope &scope, const platform::Place &dev_place) const { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); @@ -298,7 +315,7 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, // start the server listening after all member initialized. server_thread_.reset(new std::thread(RunServer, rpc_service_)); VLOG(3) << "wait server thread to become ready..."; - sleep(5); + // Write to a file of server selected port for python use. SavePort(rpc_service_); if (sync_mode) { diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h index 3cc0f30477..c85569acdc 100644 --- a/paddle/fluid/operators/listen_and_serv_op.h +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -51,6 +51,10 @@ class ListenAndServOp : public framework::OperatorBase { framework::Scope* recv_scope, framework::BlockDesc* prefetch_block) const; + void StartServerThread(); + + void ServerThreadEntry(); + void Stop() override; void RunImpl(const framework::Scope& scope, @@ -59,6 +63,8 @@ class ListenAndServOp : public framework::OperatorBase { protected: mutable std::shared_ptr rpc_service_; mutable std::shared_ptr server_thread_; + std::mutext server_ready_mutex_; + std::condition_variable server_ready_; }; } // namespace operators From 008f6df9b2b150f7cd85d457645f4405fd95d4b1 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Mon, 30 Apr 2018 08:06:30 +0800 Subject: [PATCH 20/50] update --- paddle/fluid/operators/detail/grpc_server.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 92819ff958..ee3b3e3ccb 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -208,6 +208,11 @@ void AsyncGRPCServer::WaitClientGet(int count) { } } +bool AsyncGRPCServer::WaitServerReady() { + std::unique_lock lock(this->mutex_ready_); + condition_ready_.wait(lock, [&] { return this->ready_ == 1; }); +} + void AsyncGRPCServer::RunSyncUpdate() { ::grpc::ServerBuilder builder; builder.AddListeningPort(address_, ::grpc::InsecureServerCredentials(), From f3f889b166bb194fedea2bb1f1e84d27ceb4542f Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Mon, 23 Apr 2018 00:09:09 -0700 Subject: [PATCH 21/50] Add instructions to run vgg --- benchmark/cluster/vgg16/vgg16_fluid.py | 35 ++++++++++++++++++++------ 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/benchmark/cluster/vgg16/vgg16_fluid.py b/benchmark/cluster/vgg16/vgg16_fluid.py index 8b29227cfa..12c7394808 100644 --- a/benchmark/cluster/vgg16/vgg16_fluid.py +++ b/benchmark/cluster/vgg16/vgg16_fluid.py @@ -11,7 +11,25 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""VGG16 benchmark in Fluid""" +"""VGG16 benchmark in Fluid + +# Single trainer, single PS on a single machine. +VGG_SRC="${CODE_DIR}/vgg16_fluid.py" +export TRAINING_ROLE=PSERVER +export TRAINERS=1 +export POD_IP=127.0.0.1 +export PADDLE_INIT_PORT=6174 +MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 & +sleep 10 # wait for PS to start. +export TRAINING_ROLE=TRAINER +MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 --device=GPU & + +# To run multiple trainers on a single machine +# change TRAINERS=2 and launch 2 trainers. +# CUDA_VISIBLE_DEVICES=4 MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 --device=GPU --task_index=0 & +# CUDA_VISIBLE_DEVICES=5 MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 --device=GPU --task_index=1 & +""" + from __future__ import print_function import sys @@ -200,18 +218,19 @@ def main(): num_samples += len(data) train_pass_acc.add(value=acc, weight=b_size) print( - "Pass = %d, Iters = %d, Loss = %f, Accuracy = %f, Speed = %.2f img/s" - % (pass_id, iters, loss, acc, - len(data) / (time.time() - ts)) + "Task:%d Pass = %d, Iters = %d, Loss = %f, Accuracy = %f, " + "Speed = %.2f img/s " % (args.task_index, pass_id, iters, + loss, acc, + len(data) / (time.time() - ts)) ) # The accuracy is the accumulation of batches, but not the current batch. pass_elapsed = time.time() - start_time pass_train_acc = train_pass_acc.eval() pass_test_acc = test(exe) - print( - "Pass = %d, Training performance = %f imgs/s, Train accuracy = %f, Test accuracy = %f\n" - % (pass_id, num_samples / pass_elapsed, pass_train_acc, - pass_test_acc)) + print("Task:%d Pass = %d, Training performance = %f imgs/s, " + "Train accuracy = %f, Test accuracy = %f\n" % + (args.task_index, pass_id, num_samples / pass_elapsed, + pass_train_acc, pass_test_acc)) if args.local: # Parameter initialization From f0917740bf95b9328a19fb78dbcbbd8ab8f0db62 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 1 May 2018 17:37:25 +0800 Subject: [PATCH 22/50] follow comments --- benchmark/cluster/vgg16/run_vgg_dist.sh | 21 +++++++++++++++++++++ benchmark/cluster/vgg16/vgg16_fluid.py | 20 +------------------- 2 files changed, 22 insertions(+), 19 deletions(-) create mode 100644 benchmark/cluster/vgg16/run_vgg_dist.sh diff --git a/benchmark/cluster/vgg16/run_vgg_dist.sh b/benchmark/cluster/vgg16/run_vgg_dist.sh new file mode 100644 index 0000000000..8c0501439e --- /dev/null +++ b/benchmark/cluster/vgg16/run_vgg_dist.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Update to point to the source file. +VGG_SRC="vgg16_fluid.py" + +export TRAINING_ROLE=PSERVER +export TRAINERS=2 +export POD_IP=127.0.0.1 +export PADDLE_INIT_PORT=6174 +MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 & + +# Need to wait for the ps to start first. +sleep 10 +echo "done start ps" + +export TRAINING_ROLE=TRAINER +export TRAINERS=2 +export POD_IP=127.0.0.1 +export PADDLE_INIT_PORT=6174 +CUDA_VISIBLE_DEVICES=4 MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 --device=GPU --task_index=0 & +CUDA_VISIBLE_DEVICES=5 MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 --device=GPU --task_index=1 & diff --git a/benchmark/cluster/vgg16/vgg16_fluid.py b/benchmark/cluster/vgg16/vgg16_fluid.py index 12c7394808..6c47f6535c 100644 --- a/benchmark/cluster/vgg16/vgg16_fluid.py +++ b/benchmark/cluster/vgg16/vgg16_fluid.py @@ -11,25 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""VGG16 benchmark in Fluid - -# Single trainer, single PS on a single machine. -VGG_SRC="${CODE_DIR}/vgg16_fluid.py" -export TRAINING_ROLE=PSERVER -export TRAINERS=1 -export POD_IP=127.0.0.1 -export PADDLE_INIT_PORT=6174 -MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 & -sleep 10 # wait for PS to start. -export TRAINING_ROLE=TRAINER -MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 --device=GPU & - -# To run multiple trainers on a single machine -# change TRAINERS=2 and launch 2 trainers. -# CUDA_VISIBLE_DEVICES=4 MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 --device=GPU --task_index=0 & -# CUDA_VISIBLE_DEVICES=5 MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 --device=GPU --task_index=1 & -""" - +"""VGG16 benchmark in Fluid""" from __future__ import print_function import sys From 3eef539a4238baca093cabab84561c71ef6e039b Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Mon, 30 Apr 2018 16:23:00 -0700 Subject: [PATCH 23/50] add word2vec test for the new API --- .../book/word2vec/no_test_word2vec_new_api.py | 146 ++++++++++++++++++ 1 file changed, 146 insertions(+) create mode 100644 python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py diff --git a/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py b/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py new file mode 100644 index 0000000000..1e31824aa1 --- /dev/null +++ b/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py @@ -0,0 +1,146 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.fluid as fluid +import numpy as np +import math +import sys +from functools import partial + +PASS_NUM = 100 +EMBED_SIZE = 32 +HIDDEN_SIZE = 256 +N = 5 +BATCH_SIZE = 32 + + +def create_random_lodtensor(lod, place, low, high): + # The range of data elements is [low, high] + data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64") + res = fluid.LoDTensor() + res.set(data, place) + res.set_lod([lod]) + return res + + +word_dict = paddle.dataset.imikolov.build_dict() +dict_size = len(word_dict) + + +def inference_network(is_sparse): + first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64') + second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64') + third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64') + forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64') + + embed_first = fluid.layers.embedding( + input=first_word, + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + embed_second = fluid.layers.embedding( + input=second_word, + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + embed_third = fluid.layers.embedding( + input=third_word, + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + embed_forth = fluid.layers.embedding( + input=forth_word, + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=is_sparse, + param_attr='shared_w') + + concat_embed = fluid.layers.concat( + input=[embed_first, embed_second, embed_third, embed_forth], axis=1) + hidden1 = fluid.layers.fc(input=concat_embed, + size=HIDDEN_SIZE, + act='sigmoid') + predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax') + return predict_word + + +def train_network(): + next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') + predict_word = inference_network() + cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) + avg_cost = fluid.layers.mean(cost) + return avg_cost + + +def train(use_cuda, is_sparse, save_path): + train_reader = paddle.batch( + paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + def event_handler(event): + if isinstance(event, fluid.EndPass): + avg_cost = trainer.test(reader=paddle.dataset.imikolov.test( + word_dict, N)) + + if avg_cost < 5.0: + trainer.params.save(save_path) + return + if math.isnan(avg_cost): + sys.exit("got NaN loss, training failed.") + + trainer = fluid.Trainer( + partial(inference_network, is_sparse), + optimizer=fluid.optimizer.SGD(learning_rate=0.001), + place=place, + event_handler=event_handler) + trainer.train(train_reader, 100) + + +def infer(use_cuda, save_path): + params = fluid.Params(save_path) + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + inferencer = fluid.Inferencer(inference_network, params, place=place) + + lod = [0, 1] + first_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) + second_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) + third_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) + fourth_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) + result = inferencer.infer({ + 'firstw': first_word, + 'secondw': second_word, + 'thirdw': third_word, + 'forthw': fourth_word + }) + print(result) + + +def main(use_cuda, is_sparse): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + save_path = "word2vec.inference.model" + train(use_cuda, is_sparse, save_path) + infer(use_cuda, save_path) + + +if __name__ == '__main__': + for use_cuda in (False, True): + for is_sparse in (False, True): + main(use_cuda=use_cuda, is_sparse=is_sparse) From a785a837b97c8790c34d16e140df1c4d92b7cf90 Mon Sep 17 00:00:00 2001 From: Helin Wang Date: Tue, 1 May 2018 16:50:38 -0700 Subject: [PATCH 24/50] update the example with the latest API --- .../tests/book/word2vec/no_test_word2vec_new_api.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py b/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py index 1e31824aa1..272db7b573 100644 --- a/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py +++ b/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py @@ -94,7 +94,7 @@ def train(use_cuda, is_sparse, save_path): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() def event_handler(event): - if isinstance(event, fluid.EndPass): + if isinstance(event, fluid.Event.END_EPOCH): avg_cost = trainer.test(reader=paddle.dataset.imikolov.test( word_dict, N)) @@ -106,10 +106,9 @@ def train(use_cuda, is_sparse, save_path): trainer = fluid.Trainer( partial(inference_network, is_sparse), - optimizer=fluid.optimizer.SGD(learning_rate=0.001), - place=place, - event_handler=event_handler) - trainer.train(train_reader, 100) + fluid.optimizer.SGD(learning_rate=0.001), + place=place) + trainer.train(train_reader, 100, event_handler) def infer(use_cuda, save_path): From ea372b3452324c508c837798247f41ee53419cfc Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 2 May 2018 10:34:31 +0800 Subject: [PATCH 25/50] add more log --- paddle/fluid/operators/detail/grpc_server.cc | 18 +++++++++++++----- paddle/fluid/operators/listen_and_serv_op.cc | 1 + 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 25c6bb9ee2..8abcfbb438 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -82,7 +82,9 @@ class RequestSend final : public RequestBase { virtual std::string GetReqName() { return request_->Varname(); } virtual void Process() { - queue_->Push(std::make_pair(request_->Varname(), request_)); + std::string var_name = GetReqName(); + VLOG(3) << "RequestSend " << var_name; + queue_->Push(std::make_pair(var_name, request_)); sendrecv::VoidMessage reply; responder_.Finish(reply, ::grpc::Status::OK, this); @@ -106,7 +108,7 @@ class RequestGet final : public RequestBase { responder_(&ctx_), scope_(scope), queue_(queue) { - int method_id = static_cast(detail::GrpcMethod::kGetVariable); + auto method_id = static_cast(detail::GrpcMethod::kGetVariable); service_->RequestAsyncUnary(method_id, &ctx_, &request_, &responder_, cq_, cq_, this); } @@ -118,6 +120,7 @@ class RequestGet final : public RequestBase { virtual void Process() { // proc request. std::string var_name = request_.varname(); + VLOG(3) << "RequestGet " << var_name; auto* var = scope_->FindVar(var_name); ::grpc::ByteBuffer reply; @@ -176,7 +179,7 @@ class RequestPrefetch final : public RequestBase { ::grpc::ByteBuffer reply; std::string var_name = request_->OutVarname(); - VLOG(3) << "prefetch var " << var_name; + VLOG(3) << "RequestPrefetch " << var_name; auto var_desc = program_->Block(0).FindVar(var_name); framework::Scope* local_scope = &scope_->NewScope(); auto* var = local_scope->FindVar(var_name); @@ -307,18 +310,21 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, bool ok = false; while (true) { - VLOG(3) << "HandleRequest for " << cq_name << " while in"; + VLOG(3) << "HandleRequest for " << cq_name << " wait Next"; if (!cq->Next(&tag, &ok)) { LOG(INFO) << cq_name << " CompletionQueue shutdown!"; break; } - VLOG(3) << "HandleRequest for " << cq_name << " while after Next"; + VLOG(3) << "HandleRequest for " << cq_name << " get Next"; PADDLE_ENFORCE(tag); + if (sync_mode_) { // FIXME(typhoonzero): de-couple the barriers with recv_op + VLOG(3) << "HandleRequest for " << cq_name << " before WaitCond"; if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1); if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0); + VLOG(3) << "HandleRequest for " << cq_name << " after WaitCond"; } RequestBase* base = reinterpret_cast(tag); @@ -353,8 +359,10 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, void AsyncGRPCServer::WaitCond(int cond) { std::unique_lock lock(this->barrier_mutex_); + VLOG(3) << "WaitCond " << cond << " in"; barrier_condition_.wait(lock, [=] { return this->barrier_cond_step_ == cond; }); + VLOG(3) << "WaitCond " << cond << " out"; } void AsyncGRPCServer::SetCond(int cond) { diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 36bd1d3993..e0b828548f 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -255,6 +255,7 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, std::vector> fs; for (auto iter = grad_to_queue.begin(); iter != grad_to_queue.end(); iter++) { std::string grad_name = iter->first; + VLOG(3) << "create async update thread for " << grad_name; fs.push_back(framework::Async([grad_name, &exit_flag, &executor, &grad_to_queue, &grad_to_prepared_ctx]() { AsyncUpdateThread(grad_name, exit_flag, grad_to_queue[grad_name], From 5baae6ee92a0041bc0972fe972ca8d19616350bd Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Wed, 2 May 2018 11:02:49 +0800 Subject: [PATCH 26/50] add ccache in Dockerfile to speed up compile --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d99d3d182e..164fe84904 100644 --- a/Dockerfile +++ b/Dockerfile @@ -32,7 +32,7 @@ RUN apt-get update && \ automake locales clang-format swig doxygen cmake \ liblapack-dev liblapacke-dev \ clang-3.8 llvm-3.8 libclang-3.8-dev \ - net-tools libtool && \ + net-tools libtool ccache && \ apt-get clean -y # Install Go and glide From ebf0027391a657279365be5ead628c6bc8daed37 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 2 May 2018 11:22:27 +0800 Subject: [PATCH 27/50] use IOThreadPool to dispatch async update task --- paddle/fluid/operators/listen_and_serv_op.cc | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index e0b828548f..f22f8b2610 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -201,14 +201,16 @@ static void AsyncUpdateThread( LOG(ERROR) << "Can not find server side var: " << recv_var_name; PADDLE_THROW("Can not find server side var"); } - try { - executor->RunPreparedContext(prepared, v.second->GetMutableLocalScope(), - false, false); - } catch (std::exception &e) { - LOG(ERROR) << "run sub program error " << e.what(); - } + auto fs = framework::Async([var_name, &executor, &v, prepared] { + try { + executor->RunPreparedContext(prepared, v.second->GetMutableLocalScope(), + false, false); + } catch (std::exception &e) { + LOG(ERROR) << "run sub program error " << e.what(); + } + }); + fs.wait(); } - VLOG(3) << "update thread for " << var_name << " ended"; } void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, @@ -256,8 +258,8 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, for (auto iter = grad_to_queue.begin(); iter != grad_to_queue.end(); iter++) { std::string grad_name = iter->first; VLOG(3) << "create async update thread for " << grad_name; - fs.push_back(framework::Async([grad_name, &exit_flag, &executor, - &grad_to_queue, &grad_to_prepared_ctx]() { + fs.push_back(framework::AsyncIO([grad_name, &exit_flag, &executor, + &grad_to_queue, &grad_to_prepared_ctx]() { AsyncUpdateThread(grad_name, exit_flag, grad_to_queue[grad_name], executor, grad_to_prepared_ctx[grad_name].get()); })); From d86626df847a71e89646b5b81ec4b4f6fa1e6c9b Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 2 May 2018 11:40:07 +0800 Subject: [PATCH 28/50] optimize log --- paddle/fluid/operators/detail/grpc_server.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 8abcfbb438..7ca694886e 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -321,7 +321,6 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, if (sync_mode_) { // FIXME(typhoonzero): de-couple the barriers with recv_op - VLOG(3) << "HandleRequest for " << cq_name << " before WaitCond"; if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1); if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0); VLOG(3) << "HandleRequest for " << cq_name << " after WaitCond"; @@ -359,10 +358,8 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, void AsyncGRPCServer::WaitCond(int cond) { std::unique_lock lock(this->barrier_mutex_); - VLOG(3) << "WaitCond " << cond << " in"; barrier_condition_.wait(lock, [=] { return this->barrier_cond_step_ == cond; }); - VLOG(3) << "WaitCond " << cond << " out"; } void AsyncGRPCServer::SetCond(int cond) { From 90d73c79c3c6645abf1d737cedc43341e94ce45d Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 2 May 2018 11:16:44 +0800 Subject: [PATCH 29/50] fix shfl_sync for CUDA8.0 --- paddle/cuda/include/hl_base.h | 15 +++++++++++++++ paddle/cuda/src/hl_cuda_lstm.cu | 14 +++++++++----- paddle/cuda/src/hl_top_k.cu | 5 ++++- paddle/fluid/platform/cuda_primitives.h | 4 ---- 4 files changed, 28 insertions(+), 10 deletions(-) diff --git a/paddle/cuda/include/hl_base.h b/paddle/cuda/include/hl_base.h index 6c4f09dacb..b979aa7723 100644 --- a/paddle/cuda/include/hl_base.h +++ b/paddle/cuda/include/hl_base.h @@ -228,6 +228,21 @@ extern __thread cudaStream_t default_stream; << "CUDA error: " << hl_get_device_error_string((size_t)err); \ } +// __shfl has been deprecated as of CUDA 9.0. +#if CUDA_VERSION < 9000 +template +__forceinline__ __device__ T +__shfl_sync(unsigned, T val, int src_line, int width) { + return __shfl(val, src_line, width); +} + +#define CREATE_SHFL_MASK(mask, predicate) mask = 0u; +#else +#define FULL_WARP_MASK 0xFFFFFFFF +#define CREATE_SHFL_MASK(mask, predicate) \ + mask = __ballot_sync(FULL_WARP_MASK, (predicate)) +#endif + #endif /* __NVCC__ */ #endif /* HL_BASE_H_ */ diff --git a/paddle/cuda/src/hl_cuda_lstm.cu b/paddle/cuda/src/hl_cuda_lstm.cu index 38371366f8..e30fcddffd 100644 --- a/paddle/cuda/src/hl_cuda_lstm.cu +++ b/paddle/cuda/src/hl_cuda_lstm.cu @@ -341,12 +341,15 @@ void hl_lstm_parallel_forward(real *gateValue, } __device__ __forceinline__ void transpose_32x32(real a[], const int idx) { - int addr = idx % 32; + const int warp_size = 32; + int addr = idx % warp_size; + unsigned mask = 0u; + CREATE_SHFL_MASK(mask, addr < warp_size); #pragma unroll for (int k = 1; k < 32; k++) { // rSrc[k] = __shfl_sync(rSrc[k], (threadIdx.x + k) % 32, 32); - addr = __shfl_sync(addr, (idx + 1) % 32, 32); - a[k] = __shfl_sync(a[k], addr, 32); + addr = __shfl_sync(mask, addr, (idx + 1) % 32, 32); + a[k] = __shfl_sync(mask, a[k], addr, 32); } #pragma unroll @@ -360,10 +363,11 @@ __device__ __forceinline__ void transpose_32x32(real a[], const int idx) { } addr = (32 - idx) % 32; + CREATE_SHFL_MASK(mask, idx % 32 < warp_size); #pragma unroll for (int k = 0; k < 32; k++) { - a[k] = __shfl_sync(a[k], addr, 32); - addr = __shfl_sync(addr, (idx + 31) % 32, 32); + a[k] = __shfl_sync(mask, a[k], addr, 32); + addr = __shfl_sync(mask, addr, (idx + 31) % 32, 32); } } diff --git a/paddle/cuda/src/hl_top_k.cu b/paddle/cuda/src/hl_top_k.cu index 94c9cceb2c..59ba552f56 100644 --- a/paddle/cuda/src/hl_top_k.cu +++ b/paddle/cuda/src/hl_top_k.cu @@ -244,13 +244,16 @@ __device__ __forceinline__ void blockReduce(Pair* shTopK, if (--beamSize == 0) break; __syncthreads(); + unsigned mask = 0u; + // CREATE_SHFL_MASK(mask, tid < len); + if (tid == maxId[0]) { if (beam < maxLength) { shTopK[tid] = topK[beam]; } } if (maxId[0] / 32 == warp) { - if (__shfl_sync(beam, (maxId[0]) % 32, 32) == maxLength) break; + if (__shfl_sync(mask, beam, (maxId[0]) % 32, 32) == maxLength) break; } } } diff --git a/paddle/fluid/platform/cuda_primitives.h b/paddle/fluid/platform/cuda_primitives.h index 46b97043ab..866ff30a8b 100644 --- a/paddle/fluid/platform/cuda_primitives.h +++ b/paddle/fluid/platform/cuda_primitives.h @@ -74,10 +74,6 @@ __forceinline__ __device__ T __shfl_down_sync(unsigned, T val, int delta) { } #define CREATE_SHFL_MASK(mask, predicate) mask = 0u; #else -template -__forceinline__ __device__ T __shfl_down_sync(unsigned mask, T val, int delta) { - return __shfl_down(mask, val, delta); -} #define FULL_WARP_MASK 0xFFFFFFFF #define CREATE_SHFL_MASK(mask, predicate) \ mask = __ballot_sync(FULL_WARP_MASK, (predicate)) From d11b8e56e546f465256684ecc362b1e6bca9033a Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Sat, 28 Apr 2018 02:50:44 +0000 Subject: [PATCH 30/50] fix --- paddle/fluid/pybind/tensor_py.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index dcd711a33f..93b09ed692 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -107,7 +107,7 @@ T TensorGetElement(const framework::Tensor &self, size_t offset) { return self.data()[offset]; } else { std::shared_ptr dst(new framework::Tensor); - framework::TensorCopy(self, platform::CPUPlace(), dst.get()); + framework::TensorCopySync(self, platform::CPUPlace(), dst.get()); return dst->data()[offset]; } } @@ -117,9 +117,9 @@ template void TensorSetElement(framework::Tensor *self, size_t offset, T elem) { if (platform::is_gpu_place(self->place())) { std::shared_ptr dst(new framework::Tensor); - framework::TensorCopy(*self, platform::CPUPlace(), dst.get()); + framework::TensorCopySync(*self, platform::CPUPlace(), dst.get()); dst->data()[offset] = elem; - framework::TensorCopy(*dst.get(), self->place(), self); + framework::TensorCopySync(*dst.get(), self->place(), self); } else if (platform::is_cpu_place(self->place())) { self->data()[offset] = elem; From a1a401eb2639d29d6c78fa61db37fb9341dbe709 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Sat, 28 Apr 2018 07:12:55 +0000 Subject: [PATCH 31/50] fix --- paddle/fluid/operators/send_recv_op_test.cc | 34 +++++++++++++-------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index d2e1f3cb2f..93e55d4103 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -113,7 +113,7 @@ void AddOp(const std::string &type, const f::VariableNameMap &inputs, op->SetAttrMap(attrs); } -void StartServerNet(bool is_sparse) { +void StartServerNet(bool is_sparse, std::atomic *initialized) { f::Scope scope; p::CPUPlace place; if (is_sparse) { @@ -121,7 +121,6 @@ void StartServerNet(bool is_sparse) { } else { InitTensorsInScope(place, &scope); } - // sub program run in listen_and_serv_op, for simple test we use sum f::ProgramDesc program; const auto &root_block = program.Block(0); @@ -129,7 +128,6 @@ void StartServerNet(bool is_sparse) { auto *prefetch_block = program.AppendBlock(root_block); // X for server side tensors, RX for received tensors, must be of same shape. AddOp("sum", {{"X", {"x0", "x1"}}}, {{"Out", {"Out"}}}, {}, optimize_block); - f::AttributeMap attrs; attrs.insert({"endpoint", std::string("127.0.0.1:0")}); attrs.insert({"Fanin", 1}); @@ -141,12 +139,16 @@ void StartServerNet(bool is_sparse) { attrs.insert({"sync_mode", true}); listen_and_serv_op = f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); + *initialized = true; listen_and_serv_op->Run(scope, place); LOG(INFO) << "server exit"; } TEST(SendRecvOp, CPUDense) { - std::thread server_thread(StartServerNet, false); + std::atomic initialized{false}; + std::thread server_thread(StartServerNet, false, &initialized); + while (!initialized) { + } sleep(5); // wait server to start // local net f::Scope scope; @@ -156,9 +158,11 @@ TEST(SendRecvOp, CPUDense) { scope.Var("RPC_CLIENT_VAR"); f::AttributeMap attrs; - selected_port = static_cast( - listen_and_serv_op.get()) - ->GetSelectedPort(); + auto *listen_and_serv_op_ptr = + static_cast( + listen_and_serv_op.get()); + ASSERT_TRUE(listen_and_serv_op_ptr != nullptr); + selected_port = listen_and_serv_op_ptr->GetSelectedPort(); std::string endpoint = paddle::string::Sprintf("127.0.0.1:%d", selected_port); attrs.insert({"endpoints", std::vector({endpoint})}); attrs.insert({"epmap", std::vector({endpoint})}); @@ -184,8 +188,12 @@ TEST(SendRecvOp, CPUDense) { } TEST(SendRecvOp, CPUSparse) { - std::thread server_thread(StartServerNet, true); - sleep(3); // wait server to start + std::atomic initialized; + initialized = false; + std::thread server_thread(StartServerNet, true, &initialized); + while (!initialized) { + } + sleep(5); // wait server to start // local net f::Scope scope; p::CPUPlace place; @@ -193,9 +201,11 @@ TEST(SendRecvOp, CPUSparse) { InitSelectedRowsInScope(place, &scope); scope.Var("RPC_CLIENT_VAR"); f::AttributeMap attrs; - selected_port = static_cast( - listen_and_serv_op.get()) - ->GetSelectedPort(); + auto *listen_and_serv_op_ptr = + static_cast( + listen_and_serv_op.get()); + ASSERT_TRUE(listen_and_serv_op_ptr != nullptr); + selected_port = listen_and_serv_op_ptr->GetSelectedPort(); std::string endpoint = paddle::string::Sprintf("127.0.0.1:%d", selected_port); attrs.insert({"endpoints", std::vector({endpoint})}); attrs.insert({"epmap", std::vector({endpoint})}); From d946d01e2b72d56ff3f6a61ae85f3da7b26e87b5 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 2 May 2018 07:08:36 +0000 Subject: [PATCH 32/50] follow cpplint --- paddle/cuda/include/hl_base.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/paddle/cuda/include/hl_base.h b/paddle/cuda/include/hl_base.h index b979aa7723..a42ace5bca 100644 --- a/paddle/cuda/include/hl_base.h +++ b/paddle/cuda/include/hl_base.h @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef HL_BASE_H_ -#define HL_BASE_H_ +#ifndef PADDLE_CUDA_INCLUDE_HL_BASE_H_ +#define PADDLE_CUDA_INCLUDE_HL_BASE_H_ #include @@ -207,8 +207,8 @@ typedef struct { #ifdef __NVCC__ -#include "cuda_runtime.h" -#include "hl_cuda.h" +#include "./cuda_runtime.h" +#include "./hl_cuda.h" #include "paddle/utils/Logging.h" extern __thread bool g_sync_flag; @@ -243,6 +243,6 @@ __shfl_sync(unsigned, T val, int src_line, int width) { mask = __ballot_sync(FULL_WARP_MASK, (predicate)) #endif -#endif /* __NVCC__ */ +#endif // __NVCC__ -#endif /* HL_BASE_H_ */ +#endif // PADDLE_CUDA_INCLUDE_HL_BASE_H_ From 55f0d840291f0fd038328a085f475938b48f7e2b Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Wed, 2 May 2018 00:23:41 -0700 Subject: [PATCH 33/50] Fix Cpplint Issues in fluid/inference/tensorrt/ (#10318) * Fix CPPLint issues in fluid/inference/tensorrt/ * Fix compile errors --- paddle/fluid/inference/tensorrt/engine.h | 2 +- paddle/fluid/inference/tensorrt/helper.h | 10 +++---- .../fluid/inference/tensorrt/test_tensorrt.cc | 26 +++++++++---------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index 82d8c3df4e..d6d4c2f8a2 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -65,7 +65,7 @@ class TensorRTEngine : public EngineBase { // Initialize the inference network, so that TensorRT layers can add to this // network. void InitNetwork() { - infer_builder_.reset(createInferBuilder(logger_)); + infer_builder_.reset(createInferBuilder(&logger_)); infer_network_.reset(infer_builder_->createNetwork()); } // After finishing adding ops, freeze this network and creates the executation diff --git a/paddle/fluid/inference/tensorrt/helper.h b/paddle/fluid/inference/tensorrt/helper.h index 796283d325..2b402cce60 100644 --- a/paddle/fluid/inference/tensorrt/helper.h +++ b/paddle/fluid/inference/tensorrt/helper.h @@ -46,13 +46,13 @@ const int kDataTypeSize[] = { // The following two API are implemented in TensorRT's header file, cannot load // from the dynamic library. So create our own implementation and directly // trigger the method from the dynamic library. -static nvinfer1::IBuilder* createInferBuilder(nvinfer1::ILogger& logger) { +static nvinfer1::IBuilder* createInferBuilder(nvinfer1::ILogger* logger) { return static_cast( - dy::createInferBuilder_INTERNAL(&logger, NV_TENSORRT_VERSION)); + dy::createInferBuilder_INTERNAL(logger, NV_TENSORRT_VERSION)); } -static nvinfer1::IRuntime* createInferRuntime(nvinfer1::ILogger& logger) { +static nvinfer1::IRuntime* createInferRuntime(nvinfer1::ILogger* logger) { return static_cast( - dy::createInferRuntime_INTERNAL(&logger, NV_TENSORRT_VERSION)); + dy::createInferRuntime_INTERNAL(logger, NV_TENSORRT_VERSION)); } // A logger for create TensorRT infer builder. @@ -80,7 +80,7 @@ class NaiveLogger : public nvinfer1::ILogger { return *x; } - virtual ~NaiveLogger() override {} + ~NaiveLogger() override {} }; } // namespace tensorrt diff --git a/paddle/fluid/inference/tensorrt/test_tensorrt.cc b/paddle/fluid/inference/tensorrt/test_tensorrt.cc index aed5b5e1a2..a075379857 100644 --- a/paddle/fluid/inference/tensorrt/test_tensorrt.cc +++ b/paddle/fluid/inference/tensorrt/test_tensorrt.cc @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include #include #include #include "NvInfer.h" -#include "cuda.h" -#include "cuda_runtime_api.h" #include "paddle/fluid/platform/dynload/tensorrt.h" namespace dy = paddle::platform::dynload; @@ -43,7 +43,7 @@ class Logger : public nvinfer1::ILogger { class ScopedWeights { public: - ScopedWeights(float value) : value_(value) { + explicit ScopedWeights(float value) : value_(value) { w.type = nvinfer1::DataType::kFLOAT; w.values = &value_; w.count = 1; @@ -58,13 +58,13 @@ class ScopedWeights { // The following two API are implemented in TensorRT's header file, cannot load // from the dynamic library. So create our own implementation and directly // trigger the method from the dynamic library. -nvinfer1::IBuilder* createInferBuilder(nvinfer1::ILogger& logger) { +nvinfer1::IBuilder* createInferBuilder(nvinfer1::ILogger* logger) { return static_cast( - dy::createInferBuilder_INTERNAL(&logger, NV_TENSORRT_VERSION)); + dy::createInferBuilder_INTERNAL(logger, NV_TENSORRT_VERSION)); } -nvinfer1::IRuntime* createInferRuntime(nvinfer1::ILogger& logger) { +nvinfer1::IRuntime* createInferRuntime(nvinfer1::ILogger* logger) { return static_cast( - dy::createInferRuntime_INTERNAL(&logger, NV_TENSORRT_VERSION)); + dy::createInferRuntime_INTERNAL(logger, NV_TENSORRT_VERSION)); } const char* kInputTensor = "input"; @@ -74,7 +74,7 @@ const char* kOutputTensor = "output"; nvinfer1::IHostMemory* CreateNetwork() { Logger logger; // Create the engine. - nvinfer1::IBuilder* builder = createInferBuilder(logger); + nvinfer1::IBuilder* builder = createInferBuilder(&logger); ScopedWeights weights(2.); ScopedWeights bias(3.); @@ -103,9 +103,9 @@ nvinfer1::IHostMemory* CreateNetwork() { return model; } -void Execute(nvinfer1::IExecutionContext& context, const float* input, +void Execute(nvinfer1::IExecutionContext* context, const float* input, float* output) { - const nvinfer1::ICudaEngine& engine = context.getEngine(); + const nvinfer1::ICudaEngine& engine = context->getEngine(); // Two binds, input and output ASSERT_EQ(engine.getNbBindings(), 2); const int input_index = engine.getBindingIndex(kInputTensor); @@ -119,7 +119,7 @@ void Execute(nvinfer1::IExecutionContext& context, const float* input, // Copy the input to the GPU, execute the network, and copy the output back. ASSERT_EQ(0, cudaMemcpyAsync(buffers[input_index], input, sizeof(float), cudaMemcpyHostToDevice, stream)); - context.enqueue(1, buffers, stream, nullptr); + context->enqueue(1, buffers, stream, nullptr); ASSERT_EQ(0, cudaMemcpyAsync(output, buffers[output_index], sizeof(float), cudaMemcpyDeviceToHost, stream)); cudaStreamSynchronize(stream); @@ -136,7 +136,7 @@ TEST(TensorrtTest, BasicFunction) { // Use the model to create an engine and an execution context. Logger logger; - nvinfer1::IRuntime* runtime = createInferRuntime(logger); + nvinfer1::IRuntime* runtime = createInferRuntime(&logger); nvinfer1::ICudaEngine* engine = runtime->deserializeCudaEngine(model->data(), model->size(), nullptr); model->destroy(); @@ -145,7 +145,7 @@ TEST(TensorrtTest, BasicFunction) { // Execute the network. float input = 1234; float output; - Execute(*context, &input, &output); + Execute(context, &input, &output); EXPECT_EQ(output, input * 2 + 3); // Destroy the engine. From ac266dfb8d91ce129ccab35524825833444901ed Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 2 May 2018 07:33:44 +0000 Subject: [PATCH 34/50] follow cpplint --- paddle/cuda/include/hl_base.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/paddle/cuda/include/hl_base.h b/paddle/cuda/include/hl_base.h index a42ace5bca..9bddf830bb 100644 --- a/paddle/cuda/include/hl_base.h +++ b/paddle/cuda/include/hl_base.h @@ -12,8 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef PADDLE_CUDA_INCLUDE_HL_BASE_H_ -#define PADDLE_CUDA_INCLUDE_HL_BASE_H_ +#pragma once #include @@ -244,5 +243,3 @@ __shfl_sync(unsigned, T val, int src_line, int width) { #endif #endif // __NVCC__ - -#endif // PADDLE_CUDA_INCLUDE_HL_BASE_H_ From 9bcd9f661bad5c26cd70a71b6d3ececdf2ef52f0 Mon Sep 17 00:00:00 2001 From: chengduo Date: Wed, 2 May 2018 15:33:52 +0800 Subject: [PATCH 35/50] fix cpplint error (#10329) --- paddle/fluid/operators/math/pooling.cc | 107 +++++++++++++------------ paddle/fluid/operators/math/pooling.cu | 84 +++++++++++-------- paddle/fluid/operators/math/pooling.h | 83 +++++++++++-------- 3 files changed, 154 insertions(+), 120 deletions(-) diff --git a/paddle/fluid/operators/math/pooling.cc b/paddle/fluid/operators/math/pooling.cc index 97a2e81c84..b871851798 100644 --- a/paddle/fluid/operators/math/pooling.cc +++ b/paddle/fluid/operators/math/pooling.cc @@ -11,8 +11,9 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/fluid/operators/math/pooling.h" +#include +#include namespace paddle { namespace operators { @@ -27,9 +28,10 @@ template class Pool2dFunctor { public: void operator()(const platform::CPUDeviceContext& context, - const framework::Tensor& input, std::vector& ksize, - std::vector& strides, std::vector& paddings, - PoolProcess pool_process, framework::Tensor* output) { + const framework::Tensor& input, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, PoolProcess pool_process, + framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -63,11 +65,11 @@ class Pool2dFunctor { T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - pool_process.compute(ele, input_data[h * input_width + w]); + pool_process.compute(input_data[h * input_width + w], &ele); } } int pool_size = (hend - hstart) * (wend - wstart); - pool_process.finalize(ele, (static_cast(pool_size))); + pool_process.finalize(static_cast(pool_size), &ele); output_data[ph * output_width + pw] = ele; } } @@ -86,13 +88,12 @@ class Pool2dFunctor { template class Pool2dGradFunctor { public: - void operator()(const platform::CPUDeviceContext& context, - const framework::Tensor& input, - const framework::Tensor& output, - const framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings, - PoolProcess pool_grad_process, - framework::Tensor* input_grad) { + void operator()( + const platform::CPUDeviceContext& context, const framework::Tensor& input, + const framework::Tensor& output, const framework::Tensor& output_grad, + const std::vector& ksize, const std::vector& strides, + const std::vector& paddings, PoolProcess pool_grad_process, + framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -131,8 +132,8 @@ class Pool2dGradFunctor { input_data[h * input_width + w], output_data[ph * output_width + pw], output_grad_data[ph * output_width + pw], - input_grad_data[h * input_width + w], - static_cast(scale)); + static_cast(scale), + input_grad_data + h * input_width + w); } } } @@ -154,12 +155,11 @@ class Pool2dGradFunctor { template class MaxPool2dGradFunctor { public: - void operator()(const platform::CPUDeviceContext& context, - const framework::Tensor& input, - const framework::Tensor& output, - const framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings, - framework::Tensor* input_grad) { + void operator()( + const platform::CPUDeviceContext& context, const framework::Tensor& input, + const framework::Tensor& output, const framework::Tensor& output_grad, + const std::vector& ksize, const std::vector& strides, + const std::vector& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -246,9 +246,10 @@ template class Pool3dFunctor { public: void operator()(const platform::CPUDeviceContext& context, - const framework::Tensor& input, std::vector& ksize, - std::vector& strides, std::vector& paddings, - PoolProcess pool_process, framework::Tensor* output) { + const framework::Tensor& input, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, PoolProcess pool_process, + framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; @@ -293,14 +294,14 @@ class Pool3dFunctor { for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute( - ele, - input_data[(d * input_height + h) * input_width + w]); + input_data[(d * input_height + h) * input_width + w], + &ele); } } } int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); - pool_process.finalize(ele, static_cast(pool_size)); + pool_process.finalize(static_cast(pool_size), &ele); output_data[output_idx] = ele; } } @@ -320,13 +321,12 @@ class Pool3dFunctor { template class Pool3dGradFunctor { public: - void operator()(const platform::CPUDeviceContext& context, - const framework::Tensor& input, - const framework::Tensor& output, - const framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings, - PoolProcess pool_grad_process, - framework::Tensor* input_grad) { + void operator()( + const platform::CPUDeviceContext& context, const framework::Tensor& input, + const framework::Tensor& output, const framework::Tensor& output_grad, + const std::vector& ksize, const std::vector& strides, + const std::vector& paddings, PoolProcess pool_grad_process, + framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; @@ -379,8 +379,8 @@ class Pool3dGradFunctor { (pd * output_height + ph) * output_width + pw; pool_grad_process.compute( input_data[input_idx], output_data[output_idx], - output_grad_data[output_idx], - input_grad_data[input_idx], static_cast(scale)); + output_grad_data[output_idx], static_cast(scale), + input_grad_data + input_idx); } } } @@ -404,12 +404,11 @@ class Pool3dGradFunctor { template class MaxPool3dGradFunctor { public: - void operator()(const platform::CPUDeviceContext& context, - const framework::Tensor& input, - const framework::Tensor& output, - const framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings, - framework::Tensor* input_grad) { + void operator()( + const platform::CPUDeviceContext& context, const framework::Tensor& input, + const framework::Tensor& output, const framework::Tensor& output_grad, + const std::vector& ksize, const std::vector& strides, + const std::vector& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; @@ -510,9 +509,10 @@ template class MaxPool2dWithIndexFunctor { public: void operator()(const platform::CPUDeviceContext& context, - const framework::Tensor& input, std::vector& ksize, - std::vector& strides, std::vector& paddings, - framework::Tensor* output, framework::Tensor* mask) { + const framework::Tensor& input, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* output, + framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; @@ -576,8 +576,9 @@ class MaxPool2dWithIndexGradFunctor { public: void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& output_grad, - const framework::Tensor& mask, std::vector& ksize, - std::vector& strides, std::vector& paddings, + const framework::Tensor& mask, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_height = input_grad->dims()[2]; @@ -628,9 +629,10 @@ template class MaxPool3dWithIndexFunctor { public: void operator()(const platform::CPUDeviceContext& context, - const framework::Tensor& input, std::vector& ksize, - std::vector& strides, std::vector& paddings, - framework::Tensor* output, framework::Tensor* mask) { + const framework::Tensor& input, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* output, + framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_depth = input.dims()[2]; const int input_height = input.dims()[3]; @@ -708,8 +710,9 @@ class MaxPool3dWithIndexGradFunctor { public: void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& output_grad, - const framework::Tensor& mask, std::vector& ksize, - std::vector& strides, std::vector& paddings, + const framework::Tensor& mask, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_depth = input_grad->dims()[2]; diff --git a/paddle/fluid/operators/math/pooling.cu b/paddle/fluid/operators/math/pooling.cu index 267f8c409d..b1c76350d1 100644 --- a/paddle/fluid/operators/math/pooling.cu +++ b/paddle/fluid/operators/math/pooling.cu @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include #include "paddle/fluid/operators/math/pooling.h" #include "paddle/fluid/platform/cuda_primitives.h" @@ -47,11 +49,11 @@ __global__ void KernelPool2D(const int nthreads, const T* input_data, T ele = pool_process.initial(); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - pool_process.compute(ele, input_data[h * input_width + w]); + pool_process.compute(input_data[h * input_width + w], &ele); } } int pool_size = (hend - hstart) * (wend - wstart); - pool_process.finalize(ele, (static_cast(pool_size))); + pool_process.finalize(static_cast(pool_size), &ele); output_data[index] = ele; } } @@ -96,8 +98,8 @@ __global__ void KernelPool2DGrad( int pool_size = (hend - hstart) * (wend - wstart); int output_sub_idx = ph * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], - output_grad[output_sub_idx], gradient, - static_cast(1.0 / pool_size)); + output_grad[output_sub_idx], + static_cast(1.0 / pool_size), &gradient); } } input_grad[index] = gradient; @@ -158,9 +160,10 @@ template class Pool2dFunctor { public: void operator()(const platform::CUDADeviceContext& context, - const framework::Tensor& input, std::vector& ksize, - std::vector& strides, std::vector& paddings, - PoolProcess pool_process, framework::Tensor* output) { + const framework::Tensor& input, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, PoolProcess pool_process, + framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; @@ -201,9 +204,11 @@ class Pool2dGradFunctor { void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, - const framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings, - PoolProcess pool_process, framework::Tensor* input_grad) { + const framework::Tensor& output_grad, + const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, PoolProcess pool_process, + framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; @@ -246,8 +251,10 @@ class MaxPool2dGradFunctor { void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, - const framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings, + const framework::Tensor& output_grad, + const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; @@ -340,12 +347,12 @@ __global__ void KernelPool3D(const int nthreads, const T* input_data, for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { pool_process.compute( - ele, input_data[(d * input_height + h) * input_width + w]); + input_data[(d * input_height + h) * input_width + w], &ele); } } } int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); - pool_process.finalize(ele, static_cast(pool_size)); + pool_process.finalize(static_cast(pool_size), &ele); output_data[index] = ele; } } @@ -405,8 +412,8 @@ __global__ void KernelPool3DGrad( int pool_size = (dend - dstart) * (hend - hstart) * (wend - wstart); int output_sub_idx = (pd * output_height + ph) * output_width + pw; pool_process.compute(input, output_data[output_sub_idx], - output_grad[output_sub_idx], gradient, - static_cast(1.0 / pool_size)); + output_grad[output_sub_idx], + static_cast(1.0 / pool_size), &gradient); } } } @@ -474,9 +481,10 @@ template class Pool3dFunctor { public: void operator()(const platform::CUDADeviceContext& context, - const framework::Tensor& input, std::vector& ksize, - std::vector& strides, std::vector& paddings, - PoolProcess pool_process, framework::Tensor* output) { + const framework::Tensor& input, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, PoolProcess pool_process, + framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; @@ -525,9 +533,11 @@ class Pool3dGradFunctor { void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, - const framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings, - PoolProcess pool_process, framework::Tensor* input_grad) { + const framework::Tensor& output_grad, + const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, PoolProcess pool_process, + framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; @@ -578,8 +588,10 @@ class MaxPool3dGradFunctor { void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, - const framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings, + const framework::Tensor& output_grad, + const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; @@ -736,9 +748,10 @@ template class MaxPool2dWithIndexFunctor { public: void operator()(const platform::CUDADeviceContext& context, - const framework::Tensor& input, std::vector& ksize, - std::vector& strides, std::vector& paddings, - framework::Tensor* output, framework::Tensor* mask) { + const framework::Tensor& input, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* output, + framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; @@ -779,8 +792,9 @@ class MaxPool2dWithIndexGradFunctor { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, - const framework::Tensor& mask, std::vector& ksize, - std::vector& strides, std::vector& paddings, + const framework::Tensor& mask, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; @@ -937,9 +951,10 @@ template class MaxPool3dWithIndexFunctor { public: void operator()(const platform::CUDADeviceContext& context, - const framework::Tensor& input, std::vector& ksize, - std::vector& strides, std::vector& paddings, - framework::Tensor* output, framework::Tensor* mask) { + const framework::Tensor& input, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* output, + framework::Tensor* mask) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_depth = input.dims()[2]; @@ -987,8 +1002,9 @@ class MaxPool3dWithIndexGradFunctor { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& output_grad, - const framework::Tensor& mask, std::vector& ksize, - std::vector& strides, std::vector& paddings, + const framework::Tensor& mask, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* input_grad) { const int batch_size = input_grad->dims()[0]; const int input_channels = input_grad->dims()[1]; diff --git a/paddle/fluid/operators/math/pooling.h b/paddle/fluid/operators/math/pooling.h index 74cb42f0d0..2538d739cc 100644 --- a/paddle/fluid/operators/math/pooling.h +++ b/paddle/fluid/operators/math/pooling.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" @@ -23,8 +24,8 @@ namespace operators { namespace math { #define FLT_MAX \ - __FLT_MAX__ // It might need to be placed in another file, but I'm still - // wondering where to put it. + __FLT_MAX__ // TODO(zcd) :It might need to be placed in another file, but I'm + // still wondering where to put it. /* * \brief Extracting simple operations from pooling. @@ -40,33 +41,33 @@ template class MaxPool { public: DEVICE inline T initial() { return static_cast(-FLT_MAX); } - DEVICE inline void compute(T& y, const T& x) { y = y > x ? y : x; } - DEVICE inline void finalize(T& y, const T& pool_field) {} + DEVICE inline void compute(const T& x, T* y) { *y = *y > x ? *y : x; } + DEVICE inline void finalize(const T& pool_field, T* y) {} }; template class AvgPool { public: DEVICE inline T initial() { return static_cast(0); } - DEVICE inline void compute(T& y, const T& x) { y += x; } - DEVICE inline void finalize(T& y, const T& pool_field) { y /= pool_field; } + DEVICE inline void compute(const T& x, T* y) { *y += x; } + DEVICE inline void finalize(const T& pool_field, T* y) { *y /= pool_field; } }; template class MaxPoolGrad { public: - DEVICE inline void compute(const T& x, const T& y, const T& dy, T& dx, - T scale) { - dx += dy * (x == y); + DEVICE inline void compute(const T& x, const T& y, const T& dy, T scale, + T* dx) { + *dx += dy * (x == y); } }; template class AvgPoolGrad { public: - DEVICE inline void compute(const T& x, const T& y, const T& dy, T& dx, - T scale) { - dx += (scale * dy); + DEVICE inline void compute(const T& x, const T& y, const T& dy, T scale, + T* dx) { + *dx += (scale * dy); } }; @@ -88,8 +89,9 @@ template class Pool2dFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, - std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_compute, + const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, PoolProcess pool_compute, framework::Tensor* output); }; @@ -98,9 +100,11 @@ class Pool2dGradFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, - const framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings, - PoolProcess pool_compute, framework::Tensor* input_grad); + const framework::Tensor& output_grad, + const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, PoolProcess pool_compute, + framework::Tensor* input_grad); }; template @@ -108,8 +112,10 @@ class MaxPool2dGradFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, - const framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings, + const framework::Tensor& output_grad, + const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* input_grad); }; @@ -117,8 +123,9 @@ template class Pool3dFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, - std::vector& ksize, std::vector& strides, - std::vector& paddings, PoolProcess pool_compute, + const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, PoolProcess pool_compute, framework::Tensor* output); }; @@ -127,9 +134,11 @@ class Pool3dGradFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, - const framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings, - PoolProcess pool_compute, framework::Tensor* input_grad); + const framework::Tensor& output_grad, + const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, PoolProcess pool_compute, + framework::Tensor* input_grad); }; template @@ -137,8 +146,10 @@ class MaxPool3dGradFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, const framework::Tensor& output, - const framework::Tensor& output_grad, std::vector& ksize, - std::vector& strides, std::vector& paddings, + const framework::Tensor& output_grad, + const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* input_grad); }; @@ -153,8 +164,9 @@ template class MaxPool2dWithIndexFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, - std::vector& ksize, std::vector& strides, - std::vector& paddings, framework::Tensor* output, + const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* output, framework::Tensor* mask); }; @@ -163,8 +175,9 @@ class MaxPool2dWithIndexGradFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& output_grad, - const framework::Tensor& mask, std::vector& ksize, - std::vector& strides, std::vector& paddings, + const framework::Tensor& mask, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* input_grad); }; @@ -172,8 +185,9 @@ template class MaxPool3dWithIndexFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, - std::vector& ksize, std::vector& strides, - std::vector& paddings, framework::Tensor* output, + const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* output, framework::Tensor* mask); }; @@ -182,8 +196,9 @@ class MaxPool3dWithIndexGradFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& output_grad, - const framework::Tensor& mask, std::vector& ksize, - std::vector& strides, std::vector& paddings, + const framework::Tensor& mask, const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, framework::Tensor* input_grad); }; From 1945b729b6f24ca66ddabb3b9b1ff720e08ef801 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Wed, 2 May 2018 00:34:30 -0700 Subject: [PATCH 36/50] Fix CPPLint issues with math/sequence_padding (#10317) * Fix cpplint issues in sequence_padding * Fix typo in cu file * Fix dependencies of sequence_padding * Add include --- .../fluid/operators/math/sequence_padding.cc | 16 ++++++------ .../fluid/operators/math/sequence_padding.cu | 25 ++++++++++--------- .../fluid/operators/math/sequence_padding.h | 5 ++-- .../operators/math/sequence_padding_test.cc | 4 +-- paddle/fluid/operators/warpctc_op.h | 4 +-- 5 files changed, 28 insertions(+), 26 deletions(-) diff --git a/paddle/fluid/operators/math/sequence_padding.cc b/paddle/fluid/operators/math/sequence_padding.cc index 38bd3b9975..d63c6c4ed5 100644 --- a/paddle/fluid/operators/math/sequence_padding.cc +++ b/paddle/fluid/operators/math/sequence_padding.cc @@ -22,7 +22,7 @@ template class PaddingLoDTensorFunctor { public: void operator()(const platform::CPUDeviceContext& context, - const framework::LoDTensor& seq, framework::Tensor& padding, + const framework::LoDTensor& seq, framework::Tensor* padding, bool norm_by_times) { auto lod = seq.lod(); PADDLE_ENFORCE_GT(lod.size(), 0UL, @@ -37,7 +37,7 @@ class PaddingLoDTensorFunctor { "The first dimension of LoDTensor seq should be " "equal to the sum of all sequences's length."); - auto padding_dims = padding.dims(); + auto padding_dims = padding->dims(); PADDLE_ENFORCE_EQ(padding_dims.size(), 3UL, "The input padding should be a 3-D Tensor of shape " "[max_sequence_length, num_sequences, sequence_width]."); @@ -58,7 +58,7 @@ class PaddingLoDTensorFunctor { "width of sequence in LoDTensor seq."); const T* seq_data = seq.data(); - T* padding_data = padding.data(); + T* padding_data = padding->data(); for (int64_t i = 0; i < max_sequence_length; ++i) { for (int64_t j = 0; j < num_sequences; ++j) { int64_t start_pos = abs_offset_lod[level][j]; @@ -84,16 +84,16 @@ template class UnpaddingLoDTensorFunctor { public: void operator()(const platform::CPUDeviceContext& context, - framework::LoDTensor& seq, const framework::Tensor& padding, + framework::LoDTensor* seq, const framework::Tensor& padding, bool norm_by_times) { - auto lod = seq.lod(); + auto lod = seq->lod(); PADDLE_ENFORCE_GT(lod.size(), 0UL, "The LoD of LoDTensor seq should not be null."); const size_t level = 0; framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); - auto seq_dims = seq.dims(); + auto seq_dims = seq->dims(); PADDLE_ENFORCE_EQ(seq_dims[0], static_cast(abs_offset_lod[level].back()), "The first dimension of LoDTensor seq should be " @@ -114,13 +114,13 @@ class UnpaddingLoDTensorFunctor { "The second dimension of Tensor padding should be " "the number of sequences in LoDTensor seq."); - const int64_t sequence_width = seq.numel() / seq_dims[0]; + const int64_t sequence_width = seq->numel() / seq_dims[0]; PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width, "The third dimension of Tensor padding should be the " "width of sequence in LoDTensor seq."); const T* padding_data = padding.data(); - T* seq_data = seq.data(); + T* seq_data = seq->data(); for (int64_t i = 0; i < num_sequences; ++i) { int64_t start_pos = abs_offset_lod[level][i]; int64_t sequence_length = abs_offset_lod[level][i + 1] - start_pos; diff --git a/paddle/fluid/operators/math/sequence_padding.cu b/paddle/fluid/operators/math/sequence_padding.cu index c044e6fc32..0956a0c17d 100644 --- a/paddle/fluid/operators/math/sequence_padding.cu +++ b/paddle/fluid/operators/math/sequence_padding.cu @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "paddle/fluid/operators/math/sequence_padding.h" namespace paddle { @@ -61,7 +62,7 @@ template class PaddingLoDTensorFunctor { public: void operator()(const platform::CUDADeviceContext& context, - const framework::LoDTensor& seq, framework::Tensor& padding, + const framework::LoDTensor& seq, framework::Tensor* padding, bool norm_by_times) { auto lod = seq.lod(); PADDLE_ENFORCE_GT(lod.size(), 0UL, @@ -76,7 +77,7 @@ class PaddingLoDTensorFunctor { "The first dimension of LoDTensor seq should be " "equal to the sum of all sequences's length."); - auto padding_dims = padding.dims(); + auto padding_dims = padding->dims(); PADDLE_ENFORCE_EQ(padding_dims.size(), 3UL, "The input padding should be a 3-D Tensor of shape " "[max_sequence_length, num_sequences, sequence_width]."); @@ -97,8 +98,8 @@ class PaddingLoDTensorFunctor { "width of sequence in LoDTensor seq."); if (!norm_by_times && num_sequences == 1UL) { - TensorCopy(seq, context.GetPlace(), context, &padding); - padding.Resize(padding_dims); + TensorCopy(seq, context.GetPlace(), context, padding); + padding->Resize(padding_dims); return; } @@ -117,7 +118,7 @@ class PaddingLoDTensorFunctor { dim3 grid(grid_dim_x, grid_dim_y); const T* seq_data = seq.data(); - T* padding_data = padding.data(); + T* padding_data = padding->data(); if (norm_by_times) { SequencePaddingKernel<<>>( padding_data, const_cast(seq_data), @@ -136,16 +137,16 @@ template class UnpaddingLoDTensorFunctor { public: void operator()(const platform::CUDADeviceContext& context, - framework::LoDTensor& seq, const framework::Tensor& padding, + framework::LoDTensor* seq, const framework::Tensor& padding, bool norm_by_times) { - auto lod = seq.lod(); + auto lod = seq->lod(); PADDLE_ENFORCE_GT(lod.size(), 0UL, "The lod of LoDTensor seq should not be null."); const size_t level = 0; framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); - auto seq_dims = seq.dims(); + auto seq_dims = seq->dims(); PADDLE_ENFORCE_EQ(seq_dims[0], static_cast(abs_offset_lod[level].back()), "The first dimension of LoDTensor seq should be " @@ -166,14 +167,14 @@ class UnpaddingLoDTensorFunctor { "The second dimension of Tensor padding should be " "the number of sequences in LoDTensor seq."); - const int64_t sequence_width = seq.numel() / seq_dims[0]; + const int64_t sequence_width = seq->numel() / seq_dims[0]; PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width, "The third dimension of Tensor padding should be the " "width of sequence in LoDTensor seq."); if (!norm_by_times && num_sequences == 1UL) { - TensorCopy(padding, context.GetPlace(), context, &seq); - seq.Resize(seq_dims); + TensorCopy(padding, context.GetPlace(), context, seq); + seq->Resize(seq_dims); return; } @@ -192,7 +193,7 @@ class UnpaddingLoDTensorFunctor { dim3 grid(grid_dim_x, grid_dim_y); const T* padding_data = padding.data(); - T* seq_data = seq.data(); + T* seq_data = seq->data(); if (norm_by_times) { SequencePaddingKernel<<>>( const_cast(padding_data), seq_data, diff --git a/paddle/fluid/operators/math/sequence_padding.h b/paddle/fluid/operators/math/sequence_padding.h index 17f044b9d6..b56e6db1eb 100644 --- a/paddle/fluid/operators/math/sequence_padding.h +++ b/paddle/fluid/operators/math/sequence_padding.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/platform/device_context.h" @@ -64,13 +65,13 @@ template class PaddingLoDTensorFunctor { public: void operator()(const DeviceContext& context, const framework::LoDTensor& seq, - framework::Tensor& padding, bool norm_by_times); + framework::Tensor* padding, bool norm_by_times); }; template class UnpaddingLoDTensorFunctor { public: - void operator()(const DeviceContext& context, framework::LoDTensor& seq, + void operator()(const DeviceContext& context, framework::LoDTensor* seq, const framework::Tensor& padding, bool norm_by_times); }; diff --git a/paddle/fluid/operators/math/sequence_padding_test.cc b/paddle/fluid/operators/math/sequence_padding_test.cc index e3d6214485..b9a1b9ae4d 100644 --- a/paddle/fluid/operators/math/sequence_padding_test.cc +++ b/paddle/fluid/operators/math/sequence_padding_test.cc @@ -54,12 +54,12 @@ void TestSequencePadding(const paddle::framework::LoD& lod, static_cast(sequence_width)}); padding.mutable_data(padding_dims, *place); paddle::operators::math::PaddingLoDTensorFunctor()( - *context, seq, padding, false); + *context, seq, &padding, false); seq_back.set_lod(lod); seq_back.mutable_data(seq_dims, *place); paddle::operators::math::UnpaddingLoDTensorFunctor()( - *context, seq_back, padding, false); + *context, &seq_back, padding, false); if (paddle::platform::is_cpu_place(*place)) { cpu_seq_back = seq_back; diff --git a/paddle/fluid/operators/warpctc_op.h b/paddle/fluid/operators/warpctc_op.h index 85131d0025..705cc894c0 100644 --- a/paddle/fluid/operators/warpctc_op.h +++ b/paddle/fluid/operators/warpctc_op.h @@ -162,7 +162,7 @@ class WarpCTCKernel : public framework::OpKernel { static_cast(sequence_width)}); warpctc_logits.mutable_data(warpctc_logits_dims, ctx.GetPlace()); math::PaddingLoDTensorFunctor()( - ctx.template device_context(), *logits, warpctc_logits, + ctx.template device_context(), *logits, &warpctc_logits, false); const T* warpctc_logits_data = warpctc_logits.data(); @@ -217,7 +217,7 @@ class WarpCTCGradKernel : public framework::OpKernel { logits_grad->mutable_data(ctx.GetPlace()); bool norm_by_times = ctx.Attr("norm_by_times"); math::UnpaddingLoDTensorFunctor()( - ctx.template device_context(), *logits_grad, + ctx.template device_context(), logits_grad, *warpctc_grad, norm_by_times); const T* loss_grad_data = loss_grad->data(); From e7ac709b4bf1b1ef63bc13e63d7122e8bdbf07d9 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Wed, 2 May 2018 15:37:11 +0800 Subject: [PATCH 37/50] done --- paddle/fluid/operators/detail/grpc_server.cc | 4 +- paddle/fluid/operators/detail/grpc_server.h | 4 +- paddle/fluid/operators/listen_and_serv_op.cc | 47 ++++++++------------ paddle/fluid/operators/listen_and_serv_op.h | 16 ++++--- paddle/fluid/operators/send_recv_op_test.cc | 13 +++++- 5 files changed, 44 insertions(+), 40 deletions(-) diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index ee3b3e3ccb..bb9c93480d 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -208,9 +208,9 @@ void AsyncGRPCServer::WaitClientGet(int count) { } } -bool AsyncGRPCServer::WaitServerReady() { +void AsyncGRPCServer::WaitServerReady() { std::unique_lock lock(this->mutex_ready_); - condition_ready_.wait(lock, [&] { return this->ready_ == 1; }); + condition_ready_.wait(lock, [=] { return this->ready_ == 1; }); } void AsyncGRPCServer::RunSyncUpdate() { diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index d7c06fc181..7f9cae21cc 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -47,7 +47,7 @@ class AsyncGRPCServer final { explicit AsyncGRPCServer(const std::string &address, bool sync_mode) : address_(address), sync_mode_(sync_mode), ready_(0) {} - bool WaitServerReady(); + void WaitServerReady(); void RunSyncUpdate(); // functions to sync server barrier status. @@ -120,7 +120,7 @@ class AsyncGRPCServer final { framework::Executor *executor_; int selected_port_; - std::mutext mutex_ready_; + std::mutex mutex_ready_; std::condition_variable condition_ready_; int ready_; }; diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 0a4b6a08e5..350c9c8563 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -80,12 +80,7 @@ static void ParallelExecuteBlocks( for (size_t i = 0; i < fs.size(); ++i) fs[i].wait(); } -static void SavePort(std::shared_ptr rpc_service) { - std::ofstream port_file; - port_file.open("/tmp/paddle.selected_port"); - port_file << rpc_service->GetSelectedPort(); - port_file.close(); -} +std::atomic_int ListenAndServOp::selected_port_{0}; ListenAndServOp::ListenAndServOp(const std::string &type, const framework::VariableNameMap &inputs, @@ -93,15 +88,27 @@ ListenAndServOp::ListenAndServOp(const std::string &type, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} -int ListenAndServOp::GetSelectedPort() const { - return rpc_service_->GetSelectedPort(); -} - void ListenAndServOp::Stop() { rpc_service_->Push(LISTEN_TERMINATE_MESSAGE); server_thread_->join(); } +void ListenAndServOp::SavePort(const std::string &file_path) const { + // NOTE: default write file to /tmp/paddle.selected_port + selected_port_ = rpc_service_->GetSelectedPort(); + + std::ofstream port_file; + port_file.open(file_path); + port_file << selected_port_.load(); + port_file.close(); + VLOG(4) << "selected port written to " << file_path; +} + +void ListenAndServOp::WaitServerReady() { + while (selected_port_.load() == 0) { + } +} + void ListenAndServOp::RunSyncLoop(framework::Executor *executor, framework::ProgramDesc *program, framework::Scope *recv_scope, @@ -265,23 +272,6 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, } // while(true) } -void ListenAndServOp::StartServerThread() { - server_thread_.reset(new std::thread( - std::bind(&ListenAndServOp::ServerThreadEntry, this, rpc_service_))); -} - -void ListenAndServOp::ServerThreadEntry( - std::shared_ptr service) { - service->RunSyncUpdate(); - VLOG(4) << "RunServer thread end"; - - { - std::lock_guard lock(this->barrier_mutex_); - barrier_cond_step_ = cond; - } - barrier_condition_.notify_all(); -} - void ListenAndServOp::RunImpl(const framework::Scope &scope, const platform::Place &dev_place) const { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); @@ -315,9 +305,10 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, // start the server listening after all member initialized. server_thread_.reset(new std::thread(RunServer, rpc_service_)); VLOG(3) << "wait server thread to become ready..."; + rpc_service_->WaitServerReady(); // Write to a file of server selected port for python use. - SavePort(rpc_service_); + SavePort(); if (sync_mode) { RunSyncLoop(&executor, program, &recv_scope, prefetch_block); } else { diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h index c85569acdc..87c0df2a8a 100644 --- a/paddle/fluid/operators/listen_and_serv_op.h +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include #include #include @@ -39,8 +40,6 @@ class ListenAndServOp : public framework::OperatorBase { const framework::VariableNameMap& outputs, const framework::AttributeMap& attrs); - int GetSelectedPort() const; - void RunSyncLoop(framework::Executor* executor, framework::ProgramDesc* program, framework::Scope* recv_scope, @@ -51,20 +50,25 @@ class ListenAndServOp : public framework::OperatorBase { framework::Scope* recv_scope, framework::BlockDesc* prefetch_block) const; - void StartServerThread(); + void SavePort( + const std::string& file_path = "/tmp/paddle.selected_port") const; + + void WaitServerReady(); - void ServerThreadEntry(); + int GetSelectedPort() { return selected_port_; } void Stop() override; void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override; + static void ResetPort() { selected_port_ = 0; } + protected: mutable std::shared_ptr rpc_service_; mutable std::shared_ptr server_thread_; - std::mutext server_ready_mutex_; - std::condition_variable server_ready_; + // FIXME(wuyi): it's static so that the operator can be cloned. + static std::atomic_int selected_port_; }; } // namespace operators diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index d2e1f3cb2f..a0b5a390db 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -116,6 +116,7 @@ void AddOp(const std::string &type, const f::VariableNameMap &inputs, void StartServerNet(bool is_sparse) { f::Scope scope; p::CPUPlace place; + VLOG(4) << "before init tensor"; if (is_sparse) { InitSelectedRowsInScope(place, &scope); } else { @@ -129,6 +130,7 @@ void StartServerNet(bool is_sparse) { auto *prefetch_block = program.AppendBlock(root_block); // X for server side tensors, RX for received tensors, must be of same shape. AddOp("sum", {{"X", {"x0", "x1"}}}, {{"Out", {"Out"}}}, {}, optimize_block); + VLOG(4) << "before attr"; f::AttributeMap attrs; attrs.insert({"endpoint", std::string("127.0.0.1:0")}); @@ -139,15 +141,19 @@ void StartServerNet(bool is_sparse) { attrs.insert({"PrefetchBlock", prefetch_block}); attrs.insert({"grad_to_block_id", std::vector({""})}); attrs.insert({"sync_mode", true}); + VLOG(4) << "before init op"; listen_and_serv_op = f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); + VLOG(4) << "before run op"; listen_and_serv_op->Run(scope, place); LOG(INFO) << "server exit"; } TEST(SendRecvOp, CPUDense) { std::thread server_thread(StartServerNet, false); - sleep(5); // wait server to start + // wait server to start + static_cast(listen_and_serv_op.get()) + ->WaitServerReady(); // local net f::Scope scope; p::CPUPlace place; @@ -181,11 +187,13 @@ TEST(SendRecvOp, CPUDense) { listen_and_serv_op->Stop(); server_thread.join(); listen_and_serv_op.reset(nullptr); + paddle::operators::ListenAndServOp::ResetPort(); } TEST(SendRecvOp, CPUSparse) { std::thread server_thread(StartServerNet, true); - sleep(3); // wait server to start + static_cast(listen_and_serv_op.get()) + ->WaitServerReady(); // local net f::Scope scope; p::CPUPlace place; @@ -226,4 +234,5 @@ TEST(SendRecvOp, CPUSparse) { listen_and_serv_op->Stop(); server_thread.join(); listen_and_serv_op.reset(); + paddle::operators::ListenAndServOp::ResetPort(); } From 3d846fc3f28f136b52a0dd39fc608ac5bc9b0ad4 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 2 May 2018 15:45:08 +0800 Subject: [PATCH 38/50] Make Variable support for future.division. --- python/paddle/fluid/layers/math_op_patch.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index 08a0184c2c..1754061c4b 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -169,7 +169,9 @@ def monkey_patch_variable(): # a*b == b*a. Do not need to reverse explicitly ("__rmul__", "elementwise_mul", False), ("__div__", "elementwise_div", False), + ("__truediv__", "elementwise_div", False), ("__rdiv__", "elementwise_div", True), + ("__rtruediv__", "elementwise_div", True), ("__pow__", "elementwise_pow", False), ("__rpow__", "elementwise_pow", True), # for logical compare From da960ada49694b09e2f74cfeec4774c394ce066f Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Wed, 2 May 2018 16:14:25 +0800 Subject: [PATCH 39/50] redefine distribute transpiler api --- benchmark/cluster/vgg16/vgg16_fluid.py | 2 -- python/paddle/fluid/distribute_transpiler.py | 31 ++++++++++++++----- .../fluid/tests/book/test_fit_a_line.py | 7 +---- .../tests/book/test_image_classification.py | 7 +---- .../tests/book/test_label_semantic_roles.py | 7 +---- .../tests/book/test_machine_translation.py | 7 +---- .../fluid/tests/book/test_recognize_digits.py | 7 +---- .../tests/book/test_recommender_system.py | 7 +---- .../tests/book/test_understand_sentiment.py | 7 +---- .../paddle/fluid/tests/book/test_word2vec.py | 7 +---- 10 files changed, 31 insertions(+), 58 deletions(-) diff --git a/benchmark/cluster/vgg16/vgg16_fluid.py b/benchmark/cluster/vgg16/vgg16_fluid.py index 8b29227cfa..7e0b887544 100644 --- a/benchmark/cluster/vgg16/vgg16_fluid.py +++ b/benchmark/cluster/vgg16/vgg16_fluid.py @@ -239,8 +239,6 @@ def main(): t = fluid.DistributeTranspiler() t.transpile( - optimize_ops, - params_grads, trainer_id=args.task_index, pservers=args.ps_hosts, trainers=trainers) diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index e63411782a..079d90f585 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -137,8 +137,6 @@ def split_dense_variable(var_list, class DistributeTranspiler: def transpile(self, - optimize_ops, - params_grads, trainer_id, program=None, pservers="127.0.0.1:6174", @@ -169,11 +167,6 @@ class DistributeTranspiler: 4. append ops that should run on current server instance. 5. add listen_and_serv op - :param optimize_ops: op list of optimization, should be the - return value of Optimizer.minimize - :type optimize_ops: list - :param params_grads: list of tuple(weight, gradient) - :type params_grads: list :param trainer_id: one unique id for each trainer in a job. :type trainer_id: int :param program: program to transpile, default is default_main_program @@ -194,7 +187,6 @@ class DistributeTranspiler: program = default_main_program() self.origin_program = program self.trainer_num = trainers - self.optimize_ops = optimize_ops self.sync_mode = sync_mode # TODO(typhoonzero): currently trainer_id is fetched from cluster system # like Kubernetes, we should port this to use etcd later when developing @@ -202,6 +194,7 @@ class DistributeTranspiler: self.trainer_id = trainer_id pserver_endpoints = pservers.split(",") self.pserver_endpoints = pserver_endpoints + self.optimize_ops, params_grads = self._get_optimize_pass() # process lookup_table_op # 1. check all lookup_table_op is distributed @@ -1147,3 +1140,25 @@ class DistributeTranspiler: # we only need to append op for once break return lr_ops + + def _get_optimize_pass(self): + block = self.origin_program.global_block() + opt_ops = [] + params_grads = [] + for op in block.ops: + if self._is_opt_op(op): + opt_ops.append(op) + params_grads.append((self.origin_program.global_block().var( + op.input("Param")[0]), + self.origin_program.global_block().var( + op.input("Grad")[0]))) + elif op.type == "scale": + # for adam optimize op + for in_name in op.input_arg_names: + if in_name.startswith("beta1_pow_acc") or \ + in_name.startswith("beta2_pow_acc"): + opt_ops.append(op) + break + else: + pass + return opt_ops, params_grads diff --git a/python/paddle/fluid/tests/book/test_fit_a_line.py b/python/paddle/fluid/tests/book/test_fit_a_line.py index 6dfc2997ae..ecb34699af 100644 --- a/python/paddle/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/fluid/tests/book/test_fit_a_line.py @@ -80,12 +80,7 @@ def train(use_cuda, save_dirname, is_local): trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() - t.transpile( - optimize_ops, - params_grads, - trainer_id, - pservers=pserver_endpoints, - trainers=trainers) + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index 09f994c370..8ff4f6d47a 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -189,12 +189,7 @@ def train(net_type, use_cuda, save_dirname, is_local): trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() - t.transpile( - optimize_ops, - params_grads, - trainer_id, - pservers=pserver_endpoints, - trainers=trainers) + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, diff --git a/python/paddle/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/fluid/tests/book/test_label_semantic_roles.py index d9cd76952e..50ef29c457 100644 --- a/python/paddle/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book/test_label_semantic_roles.py @@ -259,12 +259,7 @@ def train(use_cuda, save_dirname=None, is_local=True): trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() - t.transpile( - optimize_ops, - params_grads, - trainer_id, - pservers=pserver_endpoints, - trainers=trainers) + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, diff --git a/python/paddle/fluid/tests/book/test_machine_translation.py b/python/paddle/fluid/tests/book/test_machine_translation.py index 830d78df8b..46c6b9c29a 100644 --- a/python/paddle/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/test_machine_translation.py @@ -231,12 +231,7 @@ def train_main(use_cuda, is_sparse, is_local=True): trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() - t.transpile( - optimize_ops, - params_grads, - trainer_id, - pservers=pserver_endpoints, - trainers=trainers) + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, diff --git a/python/paddle/fluid/tests/book/test_recognize_digits.py b/python/paddle/fluid/tests/book/test_recognize_digits.py index 5ec6890c1b..c115aa4d7d 100644 --- a/python/paddle/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/fluid/tests/book/test_recognize_digits.py @@ -162,12 +162,7 @@ def train(nn_type, trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() - t.transpile( - optimize_ops, - params_grads, - trainer_id, - pservers=pserver_endpoints, - trainers=trainers) + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, diff --git a/python/paddle/fluid/tests/book/test_recommender_system.py b/python/paddle/fluid/tests/book/test_recommender_system.py index 2172c275b8..d022dedbff 100644 --- a/python/paddle/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/fluid/tests/book/test_recommender_system.py @@ -261,12 +261,7 @@ def train(use_cuda, save_dirname, is_local=True): trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() - t.transpile( - optimize_ops, - params_grads, - trainer_id, - pservers=pserver_endpoints, - trainers=trainers) + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, diff --git a/python/paddle/fluid/tests/book/test_understand_sentiment.py b/python/paddle/fluid/tests/book/test_understand_sentiment.py index dedd153778..241778e303 100644 --- a/python/paddle/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/fluid/tests/book/test_understand_sentiment.py @@ -213,12 +213,7 @@ def train(word_dict, trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() - t.transpile( - optimize_ops, - params_grads, - trainer_id, - pservers=pserver_endpoints, - trainers=trainers) + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, diff --git a/python/paddle/fluid/tests/book/test_word2vec.py b/python/paddle/fluid/tests/book/test_word2vec.py index 8929779de9..6dec0f6857 100644 --- a/python/paddle/fluid/tests/book/test_word2vec.py +++ b/python/paddle/fluid/tests/book/test_word2vec.py @@ -145,12 +145,7 @@ def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True): trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) training_role = os.getenv("TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() - t.transpile( - optimize_ops, - params_grads, - trainer_id, - pservers=pserver_endpoints, - trainers=trainers) + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": pserver_prog = t.get_pserver_program(current_endpoint) pserver_startup = t.get_startup_program(current_endpoint, From 1e28ba763994ebdda22004dcc293b5a9e3a540af Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 2 May 2018 08:25:32 +0000 Subject: [PATCH 40/50] follow comments --- paddle/cuda/include/hl_base.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/cuda/include/hl_base.h b/paddle/cuda/include/hl_base.h index 9bddf830bb..402302a5bf 100644 --- a/paddle/cuda/include/hl_base.h +++ b/paddle/cuda/include/hl_base.h @@ -206,8 +206,8 @@ typedef struct { #ifdef __NVCC__ -#include "./cuda_runtime.h" -#include "./hl_cuda.h" +#include +#include "paddle/cuda/include/hl_cuda.h" #include "paddle/utils/Logging.h" extern __thread bool g_sync_flag; From 57be5c6c743408c010875cde372d17df529c71a0 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 2 May 2018 17:00:01 +0800 Subject: [PATCH 41/50] "fix double type error" (#10322) * "fix double type error" * "fix ci" --- paddle/fluid/operators/batch_norm_op.cc | 15 ++++++++++----- paddle/fluid/operators/batch_norm_op.cu.cc | 4 +++- paddle/fluid/operators/mul_op.cc | 6 ++++-- paddle/fluid/operators/mul_op.cu.cc | 4 +++- 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index c9939e8602..f8b2505ccf 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -87,9 +87,13 @@ class BatchNormOp : public framework::OperatorWithKernel { const framework::ExecutionContext &ctx) const override { auto input_data_type = framework::ToDataType(ctx.Input("X")->type()); - // For float or float16 input tensor, the type of the scale, bias, mean, - // and var tensors should both be float. + // By default, the type of the scale, bias, mean, + // and var tensors should both be float. (For float or float16 input tensor) + // or double (For double input tensor). auto bn_param_type = framework::proto::VarType::FP32; + if (input_data_type == framework::proto::VarType::FP64) { + bn_param_type = framework::proto::VarType::FP64; + } PADDLE_ENFORCE_EQ(bn_param_type, framework::ToDataType(ctx.Input("Scale")->type()), "Scale input should be of float type"); @@ -492,8 +496,9 @@ REGISTER_OPERATOR(batch_norm, ops::BatchNormOp, ops::BatchNormOpMaker, REGISTER_OPERATOR(batch_norm_grad, ops::BatchNormGradOp); REGISTER_OP_CPU_KERNEL( - batch_norm, - ops::BatchNormKernel); + batch_norm, ops::BatchNormKernel, + ops::BatchNormKernel); REGISTER_OP_CPU_KERNEL( batch_norm_grad, - ops::BatchNormGradKernel); + ops::BatchNormGradKernel, + ops::BatchNormGradKernel); diff --git a/paddle/fluid/operators/batch_norm_op.cu.cc b/paddle/fluid/operators/batch_norm_op.cu.cc index cb1927bc0f..550dd32d36 100644 --- a/paddle/fluid/operators/batch_norm_op.cu.cc +++ b/paddle/fluid/operators/batch_norm_op.cu.cc @@ -287,6 +287,8 @@ namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( batch_norm, ops::BatchNormKernel, + ops::BatchNormKernel, ops::BatchNormKernel); REGISTER_OP_CUDA_KERNEL( - batch_norm_grad, ops::BatchNormGradKernel); + batch_norm_grad, ops::BatchNormGradKernel, + ops::BatchNormGradKernel); diff --git a/paddle/fluid/operators/mul_op.cc b/paddle/fluid/operators/mul_op.cc index c9fabc8d48..6903cf83b4 100644 --- a/paddle/fluid/operators/mul_op.cc +++ b/paddle/fluid/operators/mul_op.cc @@ -204,6 +204,8 @@ REGISTER_OPERATOR(mul, ops::MulOp, ops::MulOpMaker, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(mul_grad, ops::MulGradOp); REGISTER_OP_CPU_KERNEL( - mul, ops::MulKernel); + mul, ops::MulKernel, + ops::MulKernel); REGISTER_OP_CPU_KERNEL( - mul_grad, ops::MulGradKernel); + mul_grad, ops::MulGradKernel, + ops::MulGradKernel); diff --git a/paddle/fluid/operators/mul_op.cu.cc b/paddle/fluid/operators/mul_op.cu.cc index 757f9c3ee2..81f3e42bf4 100644 --- a/paddle/fluid/operators/mul_op.cu.cc +++ b/paddle/fluid/operators/mul_op.cu.cc @@ -18,6 +18,8 @@ limitations under the License. */ namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(mul, ops::MulKernel, + ops::MulKernel, ops::MulKernel); REGISTER_OP_CUDA_KERNEL(mul_grad, - ops::MulGradKernel); + ops::MulGradKernel, + ops::MulGradKernel); From 1bb579a3f5f8b077faa32ce13ae34617f6d04e3d Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 2 May 2018 17:33:26 +0800 Subject: [PATCH 42/50] A naive trainer implementation --- python/paddle/fluid/__init__.py | 3 +- python/paddle/fluid/layers/io.py | 6 +- python/paddle/fluid/optimizer.py | 3 +- .../book/word2vec/no_test_word2vec_new_api.py | 12 +- python/paddle/fluid/trainer.py | 188 ++++++++++++++++-- 5 files changed, 185 insertions(+), 27 deletions(-) diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 1e6482e3c1..bd325bd257 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -21,8 +21,7 @@ import executor from executor import * import trainer -from trainer import Trainer -from trainer import Event +from trainer import * import inferencer from inferencer import Inferencer diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index cc71c2136a..a5570b653e 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -50,8 +50,6 @@ def data(name, dtype(int|float): The type of data : float32, float_16, int etc type(VarType): The output type. By default it is LOD_TENSOR. lod_level(int): The LoD Level. 0 means the input data is not a sequence. - main_program(Program): Name of the main program that calls this - startup_program(Program): Name of the startup program stop_gradient(bool): A boolean that mentions whether gradient should flow. Returns: @@ -74,13 +72,15 @@ def data(name, if append_batch_size: shape = [-1] + shape # append batch size as -1 - return helper.create_global_variable( + data_var = helper.create_global_variable( name=name, shape=shape, dtype=dtype, type=type, stop_gradient=stop_gradient, lod_level=lod_level) + data_var.is_data = True + return data_var class BlockGuardServ(BlockGuard): diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 9ae43b3e93..0a314ddfd7 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -28,7 +28,8 @@ from contextlib import contextmanager __all__ = [ 'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad', 'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer', 'AdamOptimizer', - 'AdamaxOptimizer', 'DecayedAdagradOptimizer', 'Adadelta', 'ModelAverage' + 'AdamaxOptimizer', 'DecayedAdagradOptimizer', 'Adadelta', 'ModelAverage', + 'Optimizer' ] diff --git a/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py b/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py index 272db7b573..30939cae29 100644 --- a/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py +++ b/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py @@ -79,9 +79,9 @@ def inference_network(is_sparse): return predict_word -def train_network(): +def train_network(is_sparse): next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') - predict_word = inference_network() + predict_word = inference_network(is_sparse) cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) avg_cost = fluid.layers.mean(cost) return avg_cost @@ -94,7 +94,8 @@ def train(use_cuda, is_sparse, save_path): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() def event_handler(event): - if isinstance(event, fluid.Event.END_EPOCH): + print type(event) + if isinstance(event, fluid.EndEpochEvent): avg_cost = trainer.test(reader=paddle.dataset.imikolov.test( word_dict, N)) @@ -105,10 +106,11 @@ def train(use_cuda, is_sparse, save_path): sys.exit("got NaN loss, training failed.") trainer = fluid.Trainer( - partial(inference_network, is_sparse), + partial(train_network, is_sparse), fluid.optimizer.SGD(learning_rate=0.001), place=place) - trainer.train(train_reader, 100, event_handler) + trainer.train( + reader=train_reader, num_epochs=100, event_handler=event_handler) def infer(use_cuda, save_path): diff --git a/python/paddle/fluid/trainer.py b/python/paddle/fluid/trainer.py index aeda676502..2362da370a 100644 --- a/python/paddle/fluid/trainer.py +++ b/python/paddle/fluid/trainer.py @@ -12,44 +12,200 @@ # See the License for the specific language governing permissions and # limitations under the License. +import core +import framework +import executor +import data_feeder +import contextlib + +# optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module +import optimizer as opt_module + __all__ = [ - 'Event', 'Trainer', + 'BeginEpochEvent', + 'EndEpochEvent', + 'BeginStepEvent', + 'EndStepEvent', ] -class Event(object): - BEGIN_EPOCH = 0 - END_EPOCH = 1 - BEGIN_STEP = 2 - END_STEP = 3 +class BeginEpochEvent(object): + def __init__(self, epoch_id): + self.epoch = epoch_id + + +class EndEpochEvent(object): + def __init__(self, epoch_id): + self.epoch = epoch_id - def __init__(self): - self.step = 0 - self.epoch = 0 - self.type = Event.BEGIN_EPOCH + +class BeginStepEvent(object): + def __init__(self, epoch_id, step_id): + self.epoch = epoch_id + self.step = step_id + + +class EndStepEvent(object): + def __init__(self, epoch_id, step_id): + self.epoch = epoch_id + self.step = step_id class Trainer(object): + """ + + Args: + network_func(callable): A function which will return loss. The loss must be a scaler. + optimizer(optimizer.Optimizer): The optimizer should be an instance of Optimizer + params: + place: The device place of this trainer. + """ + def __init__(self, network_func, optimizer, params=None, place=None): # 1. we need to generate a framework.Program by calling # network_func. Reference: fluid.program_guard in # test_word2vec.py + self.scope = self._get_scope_from_params(params) + + self.startup_program = framework.Program() + self.train_program = framework.Program() + + with framework.program_guard(self.train_program, self.startup_program): + loss = network_func() + if not isinstance(optimizer, opt_module.Optimizer): + raise TypeError( + "The optimizer should be an instance of Optimizer") + + optimizer.minimize(loss) + + self.place = Trainer._check_and_get_place(place) # 2. move the default_main_program to self.program and run the # default_startup program on an empty core.Scope() + # Run startup program + if params is None: + exe = executor.Executor(place) + exe.run(self.startup_program, scope=self.scope) # 3. call self.params.add_vars with the initialized scope, it # will add the new vars of the initialized scope into # self.params. - self.network_func = network_func - self.optimizer = optimizer - self.params = params - self.place = place + # TODO(yuyang): This depends on parameters implementation. + # TODO(helin): support distributed training - def train(self, reader, num_epochs, event_handler): - pass + def train(self, + num_epochs, + event_handler, + reader=None, + parallel=False, + feed_order=None): + """ + Train the model. + + Args: + num_epochs: The number of epoch. An epoch will process all data in reader + event_handler: The event handler. A function with type (ev:Event)->void + reader: + parallel: True if use multi-CPUs or multi-GPUs + feed_order: Feeding order of reader. None will following the defining + order in program + + Returns: + + """ + if parallel: + raise NotImplementedError( + "Parallel Executor version of trainer is not implemented") + + self._train_by_executor(num_epochs, event_handler, reader, feed_order) def test(self, reader): pass + + def _get_scope_from_params(self, params): + """ + Get Scope from parameter object. + Args: + params(Parameter|None): The parameter object instance. Could be None. + + Returns: New scope if params is None. Or params.scope() + NOTE: This method is WIP. Not fully implemented. + """ + if params is None: + return core.Scope() # new scope when params is None + else: + raise NotImplementedError("Not implemented right now.") + + @staticmethod + def _check_and_get_place(place): + """ + Check the type of place or get the default place + Args: + place(None|core.CUDAPlace|core.CPUPlace): the place that trainer will be executed on. + + Raises: + TypeError if the type mismatched. + + Returns: + the original place if it is not None. + if fluid is compiled with CUDA, returns CUDAPlace(0) by default. + Otherwise returns CPUPlace by default. + """ + if place is None: + if core.is_compiled_with_cuda(): + return core.CUDAPlace(0) + else: + return core.CPUPlace() + else: + if not isinstance(place, core.CUDAPlace) and not isinstance( + place, core.CPUPlace): + raise TypeError("Place should be either CUDAPlace or CPUPlace") + return place + + @contextlib.contextmanager + def _prog_and_scope_guard(self): + with framework.program_guard( + main_program=self.train_program, + startup_program=self.startup_program): + with executor.scope_guard(self.scope): + yield + + def _train_by_executor(self, num_epochs, event_handler, reader, feed_order): + """ + Train by Executor and single device. + + Args: + num_epochs: + event_handler: + reader: + feed_order: + + Returns: + + """ + with self._prog_and_scope_guard(): + exe = executor.Executor(self.place) + if feed_order is None: + feed_var_list = [ + var + for var in self.train_program.global_block( + ).vars.itervalues() + if hasattr(var, 'is_data') and var.is_data + ] + else: + feed_var_list = [ + self.train_program.global_block().var(var_name) + for var_name in feed_order + ] + + feeder = data_feeder.DataFeeder( + feed_list=feed_var_list, place=self.place) + for epoch_id in range(num_epochs): + event_handler(BeginEpochEvent(epoch_id)) + for step_id, data in enumerate(reader()): + event_handler(BeginStepEvent(epoch_id, step_id)) + exe.run(feed=feeder.feed(data), fetch_list=[]) + event_handler(EndStepEvent(epoch_id, step_id)) + event_handler(EndEpochEvent(epoch_id)) From 5e151b2c83f70900a47431d27aa33687b407ddd4 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 2 May 2018 17:47:11 +0800 Subject: [PATCH 43/50] Follow comment --- paddle/fluid/operators/cross_entropy_op.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/operators/cross_entropy_op.h b/paddle/fluid/operators/cross_entropy_op.h index 822a83712d..19a2aec92b 100644 --- a/paddle/fluid/operators/cross_entropy_op.h +++ b/paddle/fluid/operators/cross_entropy_op.h @@ -72,13 +72,13 @@ class XeGradFunctor { size_t num_classes) : dx_(dx), dy_(dy), x_(x), label_(label), num_classes_(num_classes) {} - HOSTDEVICE void operator()(size_t label_id) { - auto x_is_true_offset = label_id * num_classes_ + label_[label_id]; - for (size_t x_offset = label_id * num_classes_; - x_offset < (label_id + 1) * num_classes_; ++x_offset) { + HOSTDEVICE void operator()(size_t sample_id) { + auto x_is_true_offset = sample_id * num_classes_ + label_[sample_id]; + for (size_t x_offset = sample_id * num_classes_; + x_offset < (sample_id + 1) * num_classes_; ++x_offset) { dx_[x_offset] = x_offset != x_is_true_offset ? static_cast(0) - : -dy_[label_id] / x_[x_offset]; + : -dy_[sample_id] / x_[x_offset]; } } From 6422c0e4f6814536ba7772e431858c84840d417b Mon Sep 17 00:00:00 2001 From: "yi.wu" Date: Wed, 2 May 2018 23:12:18 +0800 Subject: [PATCH 44/50] update by comment --- paddle/fluid/operators/listen_and_serv_op.cc | 5 ++++- paddle/fluid/operators/send_recv_op_test.cc | 11 +++++------ .../paddle/fluid/tests/unittests/test_dist_train.py | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 350c9c8563..038a2aa1f1 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -308,7 +308,10 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, rpc_service_->WaitServerReady(); // Write to a file of server selected port for python use. - SavePort(); + std::string file_path = + string::Sprintf("/tmp/paddle.%d.selected_port", + static_cast(::getpid())); + SavePort(file_path); if (sync_mode) { RunSyncLoop(&executor, program, &recv_scope, prefetch_block); } else { diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 0d495d8d15..eb51f301bf 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -198,8 +198,11 @@ TEST(SendRecvOp, CPUSparse) { std::thread server_thread(StartServerNet, true, &initialized); while (!initialized) { } - static_cast(listen_and_serv_op.get()) - ->WaitServerReady(); + auto *listen_and_serv_op_ptr = + static_cast( + listen_and_serv_op.get()); + ASSERT_TRUE(listen_and_serv_op_ptr != nullptr); + listen_and_serv_op_ptr->WaitServerReady(); // local net f::Scope scope; @@ -208,10 +211,6 @@ TEST(SendRecvOp, CPUSparse) { InitSelectedRowsInScope(place, &scope); scope.Var("RPC_CLIENT_VAR"); f::AttributeMap attrs; - auto *listen_and_serv_op_ptr = - static_cast( - listen_and_serv_op.get()); - ASSERT_TRUE(listen_and_serv_op_ptr != nullptr); selected_port = listen_and_serv_op_ptr->GetSelectedPort(); std::string endpoint = paddle::string::Sprintf("127.0.0.1:%d", selected_port); attrs.insert({"endpoints", std::vector({endpoint})}); diff --git a/python/paddle/fluid/tests/unittests/test_dist_train.py b/python/paddle/fluid/tests/unittests/test_dist_train.py index c7fdd06f10..77e9a8f7e7 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_train.py +++ b/python/paddle/fluid/tests/unittests/test_dist_train.py @@ -34,7 +34,7 @@ class TestSendOp(unittest.TestCase): p.start() time.sleep(10) - with open("/tmp/paddle.selected_port", "r") as fn: + with open("/tmp/paddle.%d.selected_port" % p.pid, "r") as fn: selected_port = int(fn.readlines()[0]) self.init_client(place, selected_port) From 753ea15d26576103e2592b500a2d443246408f54 Mon Sep 17 00:00:00 2001 From: Lei Wang Date: Tue, 1 May 2018 19:11:06 -0700 Subject: [PATCH 45/50] Build: add cicheck task. --- paddle/scripts/paddle_build.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 53455fd860..1595cc9e8a 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -40,6 +40,7 @@ function print_usage() { ${BLUE}capi${NONE}: generate paddle CAPI package ${BLUE}fluid_inference_lib${NONE}: deploy fluid inference library ${BLUE}check_style${NONE}: run code style check + ${BLUE}cicheck${NONE}: run CI tasks " } @@ -453,6 +454,8 @@ function gen_capi_package() { } function gen_fluid_inference_lib() { + mkdir -p ${PADDLE_ROOT}/build + cd ${PADDLE_ROOT}/build if [ ${WITH_C_API:-OFF} == "OFF" ] ; then cat < Date: Thu, 3 May 2018 09:28:35 +0800 Subject: [PATCH 46/50] Fix __shfl_down_sync_ of cross_entropy (#10345) * fix __shfl_down_sync_ of cross_entropy * use reduceSum * "fix ci" --- .../fluid/operators/elementwise_op_function.h | 42 +---------- paddle/fluid/operators/math/cross_entropy.cu | 65 +++------------- paddle/fluid/operators/row_conv_op.cu | 2 +- paddle/fluid/platform/cuda_device_function.h | 74 +++++++++++++++++++ paddle/fluid/platform/cuda_primitives.h | 13 ---- 5 files changed, 88 insertions(+), 108 deletions(-) create mode 100644 paddle/fluid/platform/cuda_device_function.h diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h index 953aedc850..8b052611f8 100644 --- a/paddle/fluid/operators/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise_op_function.h @@ -22,6 +22,7 @@ limitations under the License. */ #ifdef __NVCC__ #include #include +#include "paddle/fluid/platform/cuda_device_function.h" #include "paddle/fluid/platform/cuda_primitives.h" constexpr int ELEMWISE_MAX_BLOCK_DIM = 1024; #endif @@ -336,43 +337,6 @@ static void ElemwiseGradBroadcast1CPU(const T* x, const T* y, const T* out, } #ifdef __NVCC__ - -template -__device__ T reduceSum(T val, int tid, int len) { - // NOTE(zcd): The warp size should be taken from the - // parameters of the GPU but not specified as 32 simply. - // To make the reduceSum more efficiently, - // I use Warp-Level Parallelism and assume the Warp size - // is 32 which may be different for different GPU, - // but most card's warp size is 32. - const int warpSize = 32; - __shared__ T shm[warpSize]; - unsigned mask = 0u; - CREATE_SHFL_MASK(mask, tid < len); - - for (int offset = warpSize / 2; offset > 0; offset /= 2) - val += platform::__shfl_down_sync(mask, val, offset); - - if (tid < warpSize) shm[tid] = 0; - - __syncthreads(); - - if (tid % warpSize == 0) { - shm[tid / warpSize] = val; - } - __syncthreads(); - - CREATE_SHFL_MASK(mask, tid < warpSize); - - if (tid < warpSize) { - val = shm[tid]; - for (int offset = warpSize / 2; offset > 0; offset /= 2) - val += platform::__shfl_down_sync(mask, val, offset); - } - - return val; -} - template static __global__ void ElemwiseGradBroadcast1CUDAKernel( const T* x, const T* y, const T* out, const T* dout, int h, int w, @@ -395,7 +359,7 @@ static __global__ void ElemwiseGradBroadcast1CUDAKernel( if (dy) { h = h > ELEMWISE_MAX_BLOCK_DIM ? ELEMWISE_MAX_BLOCK_DIM : h; - val = reduceSum(val, tid, h); + val = paddle::platform::reduceSum(val, tid, h); if (threadIdx.x == 0) { dy[j] = val; } @@ -472,7 +436,7 @@ static __global__ void ElemwiseGradBroadcast2CUDAKernel( if (dy) { int h = pre * post; h = h > ELEMWISE_MAX_BLOCK_DIM ? ELEMWISE_MAX_BLOCK_DIM : h; - val = reduceSum(val, tid, h); + val = paddle::platform::reduceSum(val, tid, h); if (threadIdx.x == 0) { dy[j] = val; } diff --git a/paddle/fluid/operators/math/cross_entropy.cu b/paddle/fluid/operators/math/cross_entropy.cu index 6d2ba2bd0d..0de58d5fdd 100644 --- a/paddle/fluid/operators/math/cross_entropy.cu +++ b/paddle/fluid/operators/math/cross_entropy.cu @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/cross_entropy.h" +#include "paddle/fluid/platform/cuda_device_function.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { @@ -30,66 +31,22 @@ __global__ void CrossEntropyKernel(T* Y, const T* X, const int64_t* label, } } -template -__device__ __forceinline__ T sum_single_warp(T val) { - val += platform::__shfl_down_sync(0, val, 16); - val += platform::__shfl_down_sync(0, val, 8); - val += platform::__shfl_down_sync(0, val, 4); - val += platform::__shfl_down_sync(0, val, 2); - val += platform::__shfl_down_sync(0, val, 1); - return val; -} - -// CUDA do not support dynamic arrary in template -// https://stackoverflow.com/questions/20497209 -template -struct SharedMemory { - // Ensure that we won't compile any un-specialized types - __device__ T* GetPointer() { return NULL; } -}; - -template <> -struct SharedMemory { - __device__ float* GetPointer() { - extern __shared__ float s_float[]; - return s_float; - } -}; - -template <> -struct SharedMemory { - __device__ double* GetPointer() { - extern __shared__ double s_double[]; - return s_double; - } -}; - template __global__ void SoftCrossEntropyKernel(T* Y, const T* X, const T* label, const int class_num) { int tid = threadIdx.x; - SharedMemory d_sum_shared; - T* d_sum = d_sum_shared.GetPointer(); - d_sum[tid] = 0; + T val = 0; - int cur_idx = tid; - int next_idx = blockIdx.x * class_num + tid; - while (cur_idx < class_num) { - d_sum[tid] += - math::TolerableValue()(std::log(X[next_idx])) * label[next_idx]; - next_idx += blockDim.x; - cur_idx += blockDim.x; + int idx = blockIdx.x * class_num + tid; + int end = blockIdx.x * class_num + class_num; + for (; idx < end; idx += blockDim.x) { + val += math::TolerableValue()(std::log(X[idx])) * label[idx]; } - __syncthreads(); - for (unsigned int stride = blockDim.x >> 1; stride >= 32; stride >>= 1) { - if (tid < stride) d_sum[tid] += d_sum[tid + stride]; - __syncthreads(); + val = paddle::platform::reduceSum(val, tid, blockDim.x); + if (threadIdx.x == 0) { + Y[blockIdx.x] = -val; } - - T val = d_sum[tid]; - val = sum_single_warp(val); - if (tid == 0) Y[blockIdx.x] = -val; } } // namespace @@ -113,9 +70,7 @@ class CrossEntropyFunctor { ? 512 : pow(2, static_cast(std::log2(class_num))); - SoftCrossEntropyKernel<<< - batch_size, block, block * sizeof(T), - reinterpret_cast(ctx).stream()>>>( + SoftCrossEntropyKernel<<>>( loss_data, prob_data, label_data, class_num); } else { const int64_t* label_data = labels->data(); diff --git a/paddle/fluid/operators/row_conv_op.cu b/paddle/fluid/operators/row_conv_op.cu index dd8e62aca4..79d08cf3d1 100644 --- a/paddle/fluid/operators/row_conv_op.cu +++ b/paddle/fluid/operators/row_conv_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/row_conv_op.h" -#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/cuda_device_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/platform/cuda_device_function.h b/paddle/fluid/platform/cuda_device_function.h new file mode 100644 index 0000000000..7cfeaab35b --- /dev/null +++ b/paddle/fluid/platform/cuda_device_function.h @@ -0,0 +1,74 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include + +namespace paddle { +namespace platform { + +// __shfl_down and __shfl have been deprecated as of CUDA 9.0. +#if CUDA_VERSION < 9000 +template +__forceinline__ __device__ T __shfl_down_sync(unsigned, T val, int delta) { + return __shfl_down(val, delta); +} + +template +__forceinline__ __device__ T __shfl_sync(unsigned, T val, int src_line, + int width) { + return __shfl(val, src_line, width); +} +#define CREATE_SHFL_MASK(mask, predicate) mask = 0u; +#else +#define FULL_WARP_MASK 0xFFFFFFFF +#define CREATE_SHFL_MASK(mask, predicate) \ + mask = __ballot_sync(FULL_WARP_MASK, (predicate)) +#endif + +template +__device__ T reduceSum(T val, int tid, int len) { + // NOTE(zcd): The warp size should be taken from the + // parameters of the GPU but not specified as 32 simply. + // To make the reduceSum more efficiently, + // I use Warp-Level Parallelism and assume the Warp size + // is 32 which may be different for different GPU, + // but most card's warp size is 32. + const int warpSize = 32; + __shared__ T shm[warpSize]; + unsigned mask = 0u; + CREATE_SHFL_MASK(mask, tid < len); + + for (int offset = warpSize / 2; offset > 0; offset /= 2) + val += platform::__shfl_down_sync(mask, val, offset); + + if (tid < warpSize) shm[tid] = 0; + + if (tid % warpSize == 0) { + shm[tid / warpSize] = val; + } + __syncthreads(); + + CREATE_SHFL_MASK(mask, tid < warpSize); + + if (tid < warpSize) { + val = shm[tid]; + for (int offset = warpSize / 2; offset > 0; offset /= 2) + val += platform::__shfl_down_sync(mask, val, offset); + } + return val; +} + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/cuda_primitives.h b/paddle/fluid/platform/cuda_primitives.h index 866ff30a8b..8758af0804 100644 --- a/paddle/fluid/platform/cuda_primitives.h +++ b/paddle/fluid/platform/cuda_primitives.h @@ -66,18 +66,5 @@ CUDA_ATOMIC_WRAPPER(Add, double) { } #endif -// __shfl_down has been deprecated as of CUDA 9.0. -#if CUDA_VERSION < 9000 -template -__forceinline__ __device__ T __shfl_down_sync(unsigned, T val, int delta) { - return __shfl_down(val, delta); -} -#define CREATE_SHFL_MASK(mask, predicate) mask = 0u; -#else -#define FULL_WARP_MASK 0xFFFFFFFF -#define CREATE_SHFL_MASK(mask, predicate) \ - mask = __ballot_sync(FULL_WARP_MASK, (predicate)) -#endif - } // namespace platform } // namespace paddle From 4a497b826da9f133ef873be5316f0cf12d280f55 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Thu, 3 May 2018 03:32:25 +0200 Subject: [PATCH 47/50] MKLDNN implementation of batch normalization (#9904) * Initial implementation of forward pass for MKLDNN batch norm * Added attributes for MKLDNN batch norm * MKLDNN batch norm forward pass passes unittest. Started working on backward * Backward pass for MKLDNN batch norm added * MKLDNN batch norm: scoring added to forward pass * MKLDNN batch norm: bias as input added; handling AnyLayout when kernel is looked up * MKLDNN batch norm: python unit tests added; mkldnn tests removed * MKLDNN batch norm: changes required by cpplint * MKLDNN batch norm: refactoring the operator * MKLDNN batch norm: saved variance inversed in backward pass for correct execution of MKLDNN unit tests * MKLDNN batch norm: refctoring, function for static/const cast to void* added * MKLDNN batch norm: remove AnyLayout from batch norm * MKLDNN batch norm: only NCHW format is supported. Unittests refactored * MKDNN batch norm: use_mkldnn added to attributes * MKLDNN batch norm: AnyLayout removed from unittest * MKLDNN batch norm: added CUDNN defines to batch norm * MKLDNN batch norm: undefined data_format variable corrected * MKLDNN batch norm: use_cudnn added, use of setUp method for configuring attributes * MKLDNN batch norm: added use_cudnn attribute to batch norm operator * MKLDNN batch norm: correcting batch norm unit tests for MKLDNN * MKLDNN batch norm: MKLDNN tests moved to another file; reverting changes for saved variance not being inverted * Change default layout to NCHW * MKLDNN batch norm: init_kernel_type method added to unit tests * MKLDNN batch norm: style changes * MKLDNN batch norm: unit tests refactored * MKLDNN batch norm: added use_mkldnn attribute to batch norm python interface --- .../fluid/operators/batch_norm_mkldnn_op.cc | 325 ++++++++++++++++++ paddle/fluid/operators/batch_norm_op.cc | 35 +- python/paddle/fluid/layers/nn.py | 10 +- .../unittests/test_batch_norm_mkldnn_op.py | 56 +++ .../tests/unittests/test_batch_norm_op.py | 49 ++- 5 files changed, 458 insertions(+), 17 deletions(-) create mode 100644 paddle/fluid/operators/batch_norm_mkldnn_op.cc create mode 100644 python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py diff --git a/paddle/fluid/operators/batch_norm_mkldnn_op.cc b/paddle/fluid/operators/batch_norm_mkldnn_op.cc new file mode 100644 index 0000000000..0e4a56d4a4 --- /dev/null +++ b/paddle/fluid/operators/batch_norm_mkldnn_op.cc @@ -0,0 +1,325 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "mkldnn.hpp" +#include "paddle/fluid/operators/batch_norm_op.h" +#include "paddle/fluid/platform/mkldnn_helper.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using paddle::platform::MKLDNNDeviceContext; +using paddle::platform::MKLDNNMemDesc; +using mkldnn::memory; + +template +using EigenArrayMap = + Eigen::Map>; +template +using ConstEigenArrayMap = + Eigen::Map>; +template +using EigenVectorArrayMap = Eigen::Map>; +template +using ConstEigenVectorArrayMap = + Eigen::Map>; + +namespace { +template +struct bn_type_traits { + using op_type = T; + using op_desc = typename op_type::desc; + using op_prim = typename op_type::primitive_desc; +}; + +template +void copy_to_weights(T scale_begin, T scale_end, T shift_begin, T shift_end, + Container *c) { + auto it = std::begin(*c); + + std::copy(scale_begin, scale_end, std::inserter(*c, it)); + std::copy( + shift_begin, shift_end, + std::inserter(*c, std::next(it, std::distance(scale_begin, scale_end)))); +} + +template +void run_batch_norm_op(Args &&... args) { + Op batch_norm_op{args...}; + + std::vector pipeline; + pipeline.push_back(batch_norm_op); + mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); +} + +template +inline void *cast_const_to_void(const T *t) { + return static_cast(const_cast(t)); +} +} // namespace + +template +class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + auto data_layout_str = ctx.Attr("data_layout"); + auto data_layout = framework::StringToDataLayout(data_layout_str); + PADDLE_ENFORCE(data_layout == framework::DataLayout::kNCHW, + "MKLDNN batch normalization handles only NCHW data layout"); + + const float epsilon = ctx.Attr("epsilon"); + const float momentum = ctx.Attr("momentum"); + const bool is_test = ctx.Attr("is_test"); + + const auto *x = ctx.Input("X"); + const auto *mean = ctx.Input("Mean"); + const auto *variance = ctx.Input("Variance"); + + auto &dev_ctx = ctx.template device_context(); + auto mkldnn_engine = dev_ctx.GetEngine(); + + auto *y = ctx.Output("Y"); + auto *mean_out = ctx.Output("MeanOut"); + auto *variance_out = ctx.Output("VarianceOut"); + auto *batch_mean = ctx.Output("SavedMean"); + auto *batch_variance = ctx.Output("SavedVariance"); + + const auto *scale = ctx.Input("Scale"); + const auto *shift = ctx.Input("Bias"); + + y->mutable_data(ctx.GetPlace()); + mean_out->mutable_data(ctx.GetPlace()); + variance_out->mutable_data(ctx.GetPlace()); + + if (!is_test) { + batch_mean->mutable_data(ctx.GetPlace()); + batch_variance->mutable_data(ctx.GetPlace()); + } + + auto propagation = is_test == true ? mkldnn::prop_kind::forward_scoring + : mkldnn::prop_kind::forward_training; + + auto dims = paddle::framework::vectorize2int(x->dims()); + + auto src_md = + MKLDNNMemDesc(dims, memory::data_type::f32, memory::format::nchw); + auto dst_md = + MKLDNNMemDesc(dims, memory::data_type::f32, memory::format::nchw); + + auto src_pd = mkldnn::memory::primitive_desc{src_md, mkldnn_engine}; + auto dst_pd = mkldnn::memory::primitive_desc{dst_md, mkldnn_engine}; + + auto src = mkldnn::memory{src_pd, cast_const_to_void(x->data())}; + auto dst = mkldnn::memory{dst_pd, y->data()}; + + unsigned flags = mkldnn::use_scale_shift; + if (is_test) flags |= mkldnn::use_global_stats; + + using bn_fwd_types = bn_type_traits; + auto batch_norm_fwd_desc = + bn_fwd_types::op_desc{propagation, src_md, epsilon, flags}; + auto batch_norm_fwd_pd = + bn_fwd_types::op_prim{batch_norm_fwd_desc, mkldnn_engine}; + + const unsigned int ic = dims[1]; + + // MKLDNN requires a single piece of memory for scale and shift/bias data + const size_t scaleshift_size = 2 * ic; + std::vector scaleshift_data; + scaleshift_data.reserve(scaleshift_size); + + copy_to_weights(scale->data(), scale->data() + ic, shift->data(), + shift->data() + ic, &scaleshift_data); + + auto scaleshift_memory = mkldnn::memory{ + batch_norm_fwd_pd.weights_primitive_desc(), scaleshift_data.data()}; + + if (is_test) { + auto mean_memory = mkldnn::memory{batch_norm_fwd_pd.mean_primitive_desc(), + cast_const_to_void(mean->data())}; + + auto variance_memory = + mkldnn::memory{batch_norm_fwd_pd.variance_primitive_desc(), + cast_const_to_void(variance->data())}; + + run_batch_norm_op( + batch_norm_fwd_pd, src, (const mkldnn::primitive::at &)mean_memory, + (const mkldnn::primitive::at &)variance_memory, scaleshift_memory, + dst); + } else { + auto mean_memory = + mkldnn::memory{batch_norm_fwd_pd.mean_primitive_desc(), + cast_const_to_void(batch_mean->data())}; + + auto variance_memory = + mkldnn::memory{batch_norm_fwd_pd.variance_primitive_desc(), + cast_const_to_void(batch_variance->data())}; + + run_batch_norm_op(batch_norm_fwd_pd, src, + scaleshift_memory, dst, + mean_memory, variance_memory); + } + + if (!is_test) { + const unsigned int in = dims[0]; + const unsigned int sample_size = x->numel() / in / ic; + + // saved_xx is use just in this batch of data + EigenVectorArrayMap saved_mean_e( + batch_mean->mutable_data(ctx.GetPlace()), ic); + EigenVectorArrayMap saved_variance_e( + batch_variance->mutable_data(ctx.GetPlace()), ic); + saved_mean_e.setZero(); + saved_variance_e.setZero(); + + const unsigned int x_arr_size = in * ic; + ConstEigenArrayMap x_arr(x->data(), sample_size, x_arr_size); + for (unsigned int nc = 0; nc < x_arr_size; ++nc) { + saved_mean_e(nc % ic) += x_arr.col(nc).sum(); + } + saved_mean_e /= in * sample_size; + for (unsigned int nc = 0; nc < x_arr_size; ++nc) { + saved_variance_e(nc % ic) += + (x_arr.col(nc) - saved_mean_e(nc % ic)).matrix().squaredNorm(); + } + saved_variance_e /= in * sample_size; + + ConstEigenVectorArrayMap mean_arr{mean->data(), ic}; + ConstEigenVectorArrayMap variance_arr{variance->data(), ic}; + + EigenVectorArrayMap running_mean_arr( + mean_out->mutable_data(ctx.GetPlace()), ic); + EigenVectorArrayMap running_var_arr( + variance_out->mutable_data(ctx.GetPlace()), ic); + + auto one_minus_momentum = 1. - momentum; + running_mean_arr = + mean_arr * momentum + saved_mean_e * one_minus_momentum; + running_var_arr = + variance_arr * momentum + saved_variance_e * one_minus_momentum; + } + } +}; + +template +class BatchNormMKLDNNGradOpKernel : public paddle::framework::OpKernel { + public: + void Compute(const paddle::framework::ExecutionContext &ctx) const override { + auto data_layout_str = ctx.Attr("data_layout"); + auto data_layout = framework::StringToDataLayout(data_layout_str); + PADDLE_ENFORCE(data_layout == framework::DataLayout::kNCHW, + "MKLDNN batch normalization handles only NCHW data layout"); + + auto &dev_ctx = ctx.template device_context(); + auto mkldnn_engine = dev_ctx.GetEngine(); + + const float epsilon = ctx.Attr("epsilon"); + + const auto *x = ctx.Input("X"); + const auto *scale = ctx.Input("Scale"); + const auto *shift = ctx.Input("Bias"); + const auto *batch_mean = ctx.Input("SavedMean"); + const auto *batch_variance = ctx.Input("SavedVariance"); + + const auto *diff_y = ctx.Input(framework::GradVarName("Y")); + auto *diff_x = ctx.Output(framework::GradVarName("X")); + auto *diff_scale = ctx.Output(framework::GradVarName("Scale")); + auto *diff_shift = ctx.Output(framework::GradVarName("Bias")); + + diff_x->mutable_data(ctx.GetPlace()); + diff_scale->mutable_data(ctx.GetPlace()); + diff_shift->mutable_data(ctx.GetPlace()); + + auto dims = paddle::framework::vectorize2int(x->dims()); + unsigned flags = mkldnn::use_scale_shift | !mkldnn::use_global_stats; + + auto src_md = + MKLDNNMemDesc(dims, memory::data_type::f32, memory::format::nchw); + auto dst_md = + MKLDNNMemDesc(dims, memory::data_type::f32, memory::format::nchw); + auto diff_src_md = + MKLDNNMemDesc(dims, memory::data_type::f32, memory::format::nchw); + auto diff_dst_md = + MKLDNNMemDesc(dims, memory::data_type::f32, memory::format::nchw); + + using bn_bwd_types = bn_type_traits; + using bn_fwd_types = bn_type_traits; + + auto batch_norm_fwd_desc = bn_fwd_types::op_desc{ + mkldnn::prop_kind::forward_training, src_md, epsilon, flags}; + auto batch_norm_fwd_pd = + bn_fwd_types::op_prim{batch_norm_fwd_desc, mkldnn_engine}; + + auto batch_norm_bwd_desc = bn_bwd_types::op_desc{ + mkldnn::prop_kind::backward, diff_dst_md, dst_md, epsilon, flags}; + auto batch_norm_bwd_pd = bn_bwd_types::op_prim{ + batch_norm_bwd_desc, mkldnn_engine, batch_norm_fwd_pd}; + + auto src = mkldnn::memory{{src_md, mkldnn_engine}, + cast_const_to_void(x->data())}; + + auto mean = mkldnn::memory{batch_norm_bwd_pd.mean_primitive_desc(), + cast_const_to_void(batch_mean->data())}; + + auto variance = + mkldnn::memory{batch_norm_bwd_pd.variance_primitive_desc(), + cast_const_to_void(batch_variance->data())}; + + auto diff_dst = mkldnn::memory{{diff_dst_md, mkldnn_engine}, + cast_const_to_void(diff_y->data())}; + + const unsigned int ic = dims[1]; + + const size_t scaleshift_size = 2 * ic; + + std::vector scaleshift_data; + scaleshift_data.reserve(scaleshift_size); + copy_to_weights(scale->data(), scale->data() + ic, shift->data(), + shift->data() + ic, &scaleshift_data); + + auto scaleshift_memory = mkldnn::memory{ + batch_norm_bwd_pd.weights_primitive_desc(), scaleshift_data.data()}; + + std::vector diff_scaleshift_data; + diff_scaleshift_data.reserve(scaleshift_size); + copy_to_weights(diff_scale->data(), diff_scale->data() + ic, + diff_shift->data(), diff_shift->data() + ic, + &diff_scaleshift_data); + + auto diff_scaleshift_memory = + mkldnn::memory{batch_norm_bwd_pd.diff_weights_primitive_desc(), + diff_scaleshift_data.data()}; + + auto diff_src = mkldnn::memory{{diff_src_md, mkldnn_engine}, + static_cast(diff_x->data())}; + + run_batch_norm_op( + batch_norm_bwd_pd, src, mean, variance, diff_dst, scaleshift_memory, + diff_src, diff_scaleshift_memory); + + auto it = std::begin(diff_scaleshift_data); + std::copy(it, std::next(it, ic), diff_scale->data()); + std::copy(std::next(it, ic), std::end(diff_scaleshift_data), + diff_shift->data()); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_KERNEL(batch_norm, MKLDNN, paddle::platform::CPUPlace, + ops::BatchNormMKLDNNOpKernel); +REGISTER_OP_KERNEL(batch_norm_grad, MKLDNN, paddle::platform::CPUPlace, + ops::BatchNormMKLDNNGradOpKernel); diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index f8b2505ccf..b4bd40d031 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -15,6 +15,9 @@ limitations under the License. */ #include "paddle/fluid/operators/batch_norm_op.h" #include #include "paddle/fluid/framework/data_layout.h" +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif namespace paddle { namespace operators { @@ -106,7 +109,18 @@ class BatchNormOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(bn_param_type, framework::ToDataType( ctx.Input("Variance")->type()), "Variance input should be of float type"); - return framework::OpKernelType(input_data_type, ctx.GetPlace()); + + framework::LibraryType library_{framework::LibraryType::kPlain}; +#ifdef PADDLE_WITH_MKLDNN + if (library_ == framework::LibraryType::kPlain && + platform::CanMKLDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kMKLDNN; + } +#endif + // TODO(pzelazko-intel): enable MKLDNN layout when it's ready + framework::DataLayout layout = framework::DataLayout::kAnyLayout; + return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout, + library_); } }; @@ -151,6 +165,9 @@ class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker { "Variance of the current mini batch, " "will apply to output when training") .AsIntermediate(); + AddAttr("use_mkldnn", + "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); AddComment(R"DOC( Batch Normalization. @@ -349,8 +366,19 @@ class BatchNormGradOp : public framework::OperatorWithKernel { if (t == nullptr) { PADDLE_THROW("can't find Y@GRAD"); } - return framework::OpKernelType(framework::ToDataType(t->type()), - ctx.GetPlace()); + + framework::LibraryType library_{framework::LibraryType::kPlain}; +#ifdef PADDLE_WITH_MKLDNN + if (library_ == framework::LibraryType::kPlain && + platform::CanMKLDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kMKLDNN; + } +#endif + // TODO(pzelazko-intel): enable MKLDNN layout when it's ready + framework::DataLayout layout = framework::DataLayout::kAnyLayout; + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), + layout, library_); } }; @@ -474,6 +502,7 @@ class BatchNormGradMaker : public framework::SingleGradOpDescMaker { op->SetInput(framework::GradVarName("Y"), OutputGrad("Y")); op->SetInput("Scale", Input("Scale")); + op->SetInput("Bias", Input("Bias")); op->SetInput("SavedMean", Output("SavedMean")); op->SetInput("SavedVariance", Output("SavedVariance")); diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 7f16bf2a0c..93e8d0bf29 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1496,6 +1496,7 @@ def batch_norm(input, bias_attr=None, data_layout='NCHW', in_place=False, + use_mkldnn=False, name=None, moving_mean_name=None, moving_variance_name=None, @@ -1574,9 +1575,12 @@ def batch_norm(input, "SavedMean": saved_mean, "SavedVariance": saved_variance }, - attrs={"momentum": momentum, - "epsilon": epsilon, - "is_test": is_test}) + attrs={ + "momentum": momentum, + "epsilon": epsilon, + "is_test": is_test, + "use_mkldnn": use_mkldnn + }) return helper.append_activation(batch_norm_out) diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py new file mode 100644 index 0000000000..f6097d4b84 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py @@ -0,0 +1,56 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +import paddle.fluid.core as core +from paddle.fluid.op import Operator +import paddle.fluid as fluid +from op_test import OpTest +from paddle.fluid.framework import grad_var_name +from test_batch_norm_op import TestBatchNormOpInference, TestBatchNormOpTraining, _reference_training, _reference_grad + + +class TestMKLDNNBatchNormOpTraining(TestBatchNormOpTraining): + def init_kernel_type(self): + self.use_mkldnn = True + self.data_formats = ["NCHW"] + + def ref_forward_backward(self, x, y_grad, scale, bias, mean, variance, + epsilon, momentum, shape, data_layout): + # run forward + y, saved_mean, saved_variance = _reference_training( + x, scale, bias, epsilon, data_layout) + mean_out = saved_mean * (1. - momentum) + momentum * mean + variance_out = saved_variance * (1. - momentum) + momentum * variance + # run backward + x_grad, scale_grad, bias_grad = _reference_grad( + x, y_grad, scale, saved_mean, saved_variance, epsilon, data_layout) + + return y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad + + +class TestMKLDNNBatchNormOpInference(TestBatchNormOpInference): + def init_kernel_type(self): + self.use_mkldnn = True + + def test_check_output(self): + place = core.CPUPlace() + data_format = "NCHW" + + self.check_with_place(place, data_format, self.dtype, [2, 3, 4, 5]) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index a0e78a4607..4216d83653 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -158,6 +158,8 @@ def set_output_grad(scope, outputs, place, feed_dict=None): class TestBatchNormOpInference(unittest.TestCase): def setUp(self): self.dtype = np.float32 + self.use_mkldnn = False + self.init_kernel_type() def __assert_close(self, tensor, np_array, msg, atol=1e-4): self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) @@ -230,6 +232,7 @@ class TestBatchNormOpInference(unittest.TestCase): # attrs is_test=True, data_layout=data_layout, + use_mkldnn=self.use_mkldnn, epsilon=epsilon) batch_norm_op.run(scope, place) @@ -254,10 +257,15 @@ class TestBatchNormOpInference(unittest.TestCase): [2, 3, 4, 5]) self.check_with_place(place, data_format, self.dtype, [2, 3]) + def init_kernel_type(self): + pass + class TestFP16BatchNormOpInference(TestBatchNormOpInference): def setUp(self): self.dtype = np.float16 + self.use_mkldnn = False + self.init_kernel_type() def test_check_output(self): places = [] @@ -274,9 +282,28 @@ class TestFP16BatchNormOpInference(TestBatchNormOpInference): class TestBatchNormOpTraining(unittest.TestCase): + def setUp(self): + self.use_mkldnn = False + self.data_formats = ["NCHW", "NHWC"] + self.init_kernel_type() + def __assert_close(self, tensor, np_array, msg, atol=1e-4): np.allclose(np.array(tensor), np_array, atol=atol) + def ref_forward_backward(self, x, y_grad, scale, bias, mean, variance, + epsilon, momentum, shape, data_layout): + # run forward + y, saved_mean, var_ref = _reference_training(x, scale, bias, epsilon, + data_layout) + mean_out = saved_mean * (1. - momentum) + momentum * mean + variance_out = var_ref * (1. - momentum) + momentum * variance + saved_variance = 1. / np.sqrt(var_ref + epsilon) + # run backward + x_grad, scale_grad, bias_grad = _reference_grad( + x, y_grad, scale, saved_mean, var_ref, epsilon, data_layout) + + return y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad + def test_forward_backward(self): def test_with_place(place, data_layout, shape): # attr @@ -295,16 +322,11 @@ class TestBatchNormOpTraining(unittest.TestCase): mean = np.zeros(scale_shape).astype(np.float32) variance = np.ones(scale_shape).astype(np.float32) - # run forward - y, saved_mean, var_ref = _reference_training(x, scale, bias, - epsilon, data_layout) - mean_out = saved_mean * (1. - momentum) + momentum * mean - variance_out = var_ref * (1. - momentum) + momentum * variance - saved_variance = 1. / np.sqrt(var_ref + epsilon) - # run backward y_grad = np.random.random_sample(shape).astype(np.float32) - x_grad, scale_grad, bias_grad = _reference_grad( - x, y_grad, scale, saved_mean, var_ref, epsilon, data_layout) + + y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad = self.ref_forward_backward( + x, y_grad, scale, bias, mean, variance, epsilon, momentum, + shape, data_layout) var_dict = locals() var_dict['y@GRAD'] = y_grad @@ -344,7 +366,8 @@ class TestBatchNormOpTraining(unittest.TestCase): "momentum": momentum, "epsilon": epsilon, "is_test": False, - "data_layout": data_layout + "data_layout": data_layout, + "use_mkldnn": self.use_mkldnn }) block.create_var(name='y@GRAD', dtype='float32', shape=y.shape) @@ -387,13 +410,17 @@ class TestBatchNormOpTraining(unittest.TestCase): print "op test forward passed: ", str(place), data_layout places = [core.CPUPlace()] + if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"): places.append(core.CUDAPlace(0)) for place in places: - for data_format in ["NCHW", "NHWC"]: + for data_format in self.data_formats: test_with_place(place, data_format, [2, 3, 4, 5]) + def init_kernel_type(self): + pass + if __name__ == '__main__': unittest.main() From 6084af47ef4358ab2b54636aa41d976f2ea34056 Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Thu, 3 May 2018 10:28:16 +0800 Subject: [PATCH 48/50] Fix the bug when a input variable of op is dispensable. (#10268) * Fix the bug when a input variable of op is dispensable. * Add HasInputs/Outputs interfaces to OperatorBase. * Remove the unreferenced header file. --- paddle/capi/Matrix.cpp | 2 +- paddle/fluid/framework/operator.cc | 41 +++++++++++++++++++++++++++--- paddle/fluid/framework/operator.h | 2 ++ paddle/fluid/platform/profiler.h | 1 - 4 files changed, 40 insertions(+), 6 deletions(-) diff --git a/paddle/capi/Matrix.cpp b/paddle/capi/Matrix.cpp index 24b0020636..733d49cacf 100644 --- a/paddle/capi/Matrix.cpp +++ b/paddle/capi/Matrix.cpp @@ -108,7 +108,7 @@ paddle_error paddle_matrix_get_row(paddle_matrix mat, paddle_error paddle_matrix_get_shape(paddle_matrix mat, uint64_t* height, uint64_t* width) { - if (mat == nullptr) return kPD_NULLPTR; + if (mat == nullptr || cast(mat)->mat == nullptr) return kPD_NULLPTR; if (height != nullptr) { *height = cast(mat)->mat->getHeight(); } diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 32576423a6..d70f26026c 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -93,6 +93,14 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) { RunImpl(scope, place); } +bool OperatorBase::HasInputs(const std::string& name) const { + if (inputs_.find(name) != inputs_.end()) { + return true; + } else { + return false; + } +} + std::string OperatorBase::Input(const std::string& name) const { auto& ins = Inputs(name); PADDLE_ENFORCE_LE(ins.size(), 1UL, @@ -109,6 +117,14 @@ const std::vector& OperatorBase::Inputs( return it->second; } +bool OperatorBase::HasOutputs(const std::string& name) const { + if (outputs_.find(name) != outputs_.end()) { + return true; + } else { + return false; + } +} + std::string OperatorBase::Output(const std::string& name) const { auto& outs = Outputs(name); PADDLE_ENFORCE_LE(outs.size(), 1UL, @@ -220,13 +236,18 @@ void OperatorBase::CheckAllInputOutputSet() const { if (op_info == nullptr || op_info->proto_ == nullptr) return; for (auto& in : op_info->Proto().inputs()) { - PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(), - "Type %s's input %s is not set", Type(), in.name()); + if (!in.dispensable()) { + PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(), + "Operator %s's input, %s, is not set", Type(), in.name()); + } } for (auto& out : op_info->Proto().outputs()) { - PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(), - "Type %s's output %s is not set", Type(), out.name()); + if (!out.dispensable()) { + PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(), + "Operator %s's output, %s, is not set", Type(), + out.name()); + } } } @@ -332,6 +353,9 @@ class RuntimeInferShapeContext : public InferShapeContext { : op_(op), scope_(scope) {} bool HasInput(const std::string& name) const override { + if (!op_.HasInputs(name)) { + return false; + } auto& ins = Inputs(name); size_t length = ins.size(); if (length == 0) { @@ -345,6 +369,9 @@ class RuntimeInferShapeContext : public InferShapeContext { } bool HasOutput(const std::string& name) const override { + if (!op_.HasOutputs(name)) { + return false; + } auto& outs = Outputs(name); size_t length = outs.size(); if (length == 0) { @@ -358,6 +385,9 @@ class RuntimeInferShapeContext : public InferShapeContext { } bool HasInputs(const std::string& name) const override { + if (!op_.HasInputs(name)) { + return false; + } auto inputs = op_.Inputs(name); if (inputs.empty()) { return false; @@ -371,6 +401,9 @@ class RuntimeInferShapeContext : public InferShapeContext { } bool HasOutputs(const std::string& name) const override { + if (!op_.HasOutputs(name)) { + return false; + } auto outputs = op_.Outputs(name); if (outputs.empty()) { return false; diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 826cc57b72..d373c48b1a 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -105,6 +105,7 @@ class OperatorBase { const VariableNameMap& Inputs() const { return inputs_; } const VariableNameMap& Outputs() const { return outputs_; } + bool HasInputs(const std::string& name) const; //! Get a input with argument's name described in `op_proto` std::string Input(const std::string& name) const; //! Get a input which has multiple variables. @@ -112,6 +113,7 @@ class OperatorBase { //! Get all inputs variable names std::vector InputVars() const; + bool HasOutputs(const std::string& name) const; //! Get a output with argument's name described in `op_proto` std::string Output(const std::string& name) const; //! Get an output which has multiple variables. diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h index b07427c8f6..428d9ebcea 100644 --- a/paddle/fluid/platform/profiler.h +++ b/paddle/fluid/platform/profiler.h @@ -18,7 +18,6 @@ limitations under the License. */ #include #include #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/profiler.pb.h" namespace paddle { namespace platform { From e9737d600f44b13810c91c497a2ce42d96efddfe Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 3 May 2018 11:03:07 +0800 Subject: [PATCH 49/50] add a private function to find adam opt pass --- python/paddle/fluid/distribute_transpiler.py | 28 +++++++++++--------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index 079d90f585..c180e7b210 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -401,11 +401,8 @@ class DistributeTranspiler: # HACK: optimization global ops only used to scale beta1 and beta2 # replace it with dependency engine. for op in self.optimize_ops: - if op.type == "scale": - for in_name in op.input_arg_names: - if in_name.startswith("beta1_pow_acc") or \ - in_name.startswith("beta2_pow_acc"): - global_ops.append(op) + if self._is_adam_connected_op(op): + global_ops.append(op) def __append_optimize_op__(op, block, grad_to_block_id): if self._is_opt_op(op): @@ -1152,13 +1149,20 @@ class DistributeTranspiler: op.input("Param")[0]), self.origin_program.global_block().var( op.input("Grad")[0]))) - elif op.type == "scale": - # for adam optimize op - for in_name in op.input_arg_names: - if in_name.startswith("beta1_pow_acc") or \ - in_name.startswith("beta2_pow_acc"): - opt_ops.append(op) - break + elif self._is_adam_connected_op(op): + opt_ops.append(op) else: pass return opt_ops, params_grads + + def _is_adam_connected_op(self, op): + """ + A hack function to determinate whether the input operator + is connected to optimize operator. + """ + if op.type == "scale": + for in_name in op.input_arg_names: + if in_name.startswith("beta1_pow_acc") or \ + in_name.startswith("beta2_pow_acc"): + return True + return False From f63ff90b03b444ff7562bf72fca6877ad7b068a2 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Thu, 3 May 2018 11:13:30 +0800 Subject: [PATCH 50/50] Fix/fp64 (#10346) * "fix double type error" * "fix ci" * "softmax fp64" * "fix momentum" * "fix ci" --- paddle/fluid/operators/momentum_op.cc | 8 ++++++++ paddle/fluid/operators/scale_op.cc | 10 ++++------ paddle/fluid/operators/softmax_op.cc | 6 ++++-- paddle/fluid/operators/softmax_op.cu.cc | 6 ++++-- paddle/fluid/operators/top_k_op.cc | 3 ++- paddle/fluid/operators/top_k_op.cu | 3 ++- 6 files changed, 24 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/operators/momentum_op.cc b/paddle/fluid/operators/momentum_op.cc index 6c70970e15..f13ec53905 100644 --- a/paddle/fluid/operators/momentum_op.cc +++ b/paddle/fluid/operators/momentum_op.cc @@ -17,6 +17,8 @@ limitations under the License. */ namespace paddle { namespace operators { +using Tensor = framework::Tensor; + class MomentumOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -50,6 +52,12 @@ class MomentumOp : public framework::OperatorWithKernel { ctx->SetOutputDim("ParamOut", param_dim); ctx->SetOutputDim("VelocityOut", param_dim); } + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + auto input_data_type = + framework::ToDataType(ctx.Input("Param")->type()); + return framework::OpKernelType(input_data_type, ctx.GetPlace()); + } }; class MomentumOpMaker : public framework::OpProtoAndCheckerMaker { diff --git a/paddle/fluid/operators/scale_op.cc b/paddle/fluid/operators/scale_op.cc index 1e938638c9..7dcf33c989 100644 --- a/paddle/fluid/operators/scale_op.cc +++ b/paddle/fluid/operators/scale_op.cc @@ -35,7 +35,6 @@ class ScaleOp : public framework::OperatorWithKernel { } }; -template class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { public: ScaleOpMaker(OpProto *proto, OpAttrChecker *op_checker) @@ -47,9 +46,9 @@ Scale operator $$Out = scale*X$$ )DOC"); - AddAttr("scale", - "(float, default 1.0)" - "The scaling factor of the scale operator.") + AddAttr("scale", + "(float, default 1.0)" + "The scaling factor of the scale operator.") .SetDefault(1.0); } }; @@ -73,8 +72,7 @@ class ScaleGradMaker : public framework::SingleGradOpDescMaker { namespace ops = paddle::operators; -REGISTER_OPERATOR(scale, ops::ScaleOp, ops::ScaleOpMaker, - ops::ScaleGradMaker); +REGISTER_OPERATOR(scale, ops::ScaleOp, ops::ScaleOpMaker, ops::ScaleGradMaker); REGISTER_OP_CPU_KERNEL( scale, ops::ScaleKernel, ops::ScaleKernel, diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index 2741ba95bc..aa7b192e32 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -164,7 +164,9 @@ REGISTER_OPERATOR(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(softmax_grad, ops::SoftmaxOpGrad); REGISTER_OP_CPU_KERNEL( - softmax, ops::SoftmaxKernel); + softmax, ops::SoftmaxKernel, + ops::SoftmaxKernel); REGISTER_OP_CPU_KERNEL( softmax_grad, - ops::SoftmaxGradKernel); + ops::SoftmaxGradKernel, + ops::SoftmaxGradKernel); diff --git a/paddle/fluid/operators/softmax_op.cu.cc b/paddle/fluid/operators/softmax_op.cu.cc index 0c1f7cef7a..5fb4f011d9 100644 --- a/paddle/fluid/operators/softmax_op.cu.cc +++ b/paddle/fluid/operators/softmax_op.cu.cc @@ -19,6 +19,8 @@ namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( softmax, ops::SoftmaxKernel, + ops::SoftmaxKernel, ops::SoftmaxKernel); -REGISTER_OP_CUDA_KERNEL(softmax_grad, - ops::SoftmaxGradKernel); +REGISTER_OP_CUDA_KERNEL( + softmax_grad, ops::SoftmaxGradKernel, + ops::SoftmaxGradKernel); diff --git a/paddle/fluid/operators/top_k_op.cc b/paddle/fluid/operators/top_k_op.cc index 2e4e8caed5..942a5de3f9 100644 --- a/paddle/fluid/operators/top_k_op.cc +++ b/paddle/fluid/operators/top_k_op.cc @@ -75,4 +75,5 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(top_k, ops::TopkOp, ops::TopkOpMaker, paddle::framework::EmptyGradOpMaker); REGISTER_OP_CPU_KERNEL(top_k, - ops::TopkKernel); + ops::TopkKernel, + ops::TopkKernel); diff --git a/paddle/fluid/operators/top_k_op.cu b/paddle/fluid/operators/top_k_op.cu index d7f4d383ce..2ea9fd1d29 100644 --- a/paddle/fluid/operators/top_k_op.cu +++ b/paddle/fluid/operators/top_k_op.cu @@ -318,4 +318,5 @@ class TopkOpCUDAKernel : public framework::OpKernel { } // namespace operators } // namespace paddle -REGISTER_OP_CUDA_KERNEL(top_k, paddle::operators::TopkOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(top_k, paddle::operators::TopkOpCUDAKernel, + paddle::operators::TopkOpCUDAKernel);