From 79a1a7cda0d45df31f6d4fa60bdc3f4e206e31bc Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 18 Apr 2018 17:48:53 +0800 Subject: [PATCH 01/72] init async gprc server --- paddle/fluid/operators/CMakeLists.txt | 2 + .../operators/async_listen_and_serv_op.cc | 217 ++++++++++++ .../operators/async_listen_and_serv_op.h | 55 ++++ paddle/fluid/operators/detail/CMakeLists.txt | 2 +- .../operators/detail/async_grpc_server.cc | 311 ++++++++++++++++++ .../operators/detail/async_grpc_server.h | 111 +++++++ 6 files changed, 697 insertions(+), 1 deletion(-) create mode 100644 paddle/fluid/operators/async_listen_and_serv_op.cc create mode 100644 paddle/fluid/operators/async_listen_and_serv_op.h create mode 100644 paddle/fluid/operators/detail/async_grpc_server.cc create mode 100644 paddle/fluid/operators/detail/async_grpc_server.h diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 7d6781c2c3..f723652a8f 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -189,6 +189,8 @@ if(WITH_DISTRIBUTE) set_source_files_properties(recv_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) op_library(listen_and_serv_op DEPS ${DISTRIBUTE_DEPS}) set_source_files_properties(listen_and_serv_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) + op_library(async_listen_and_serv_op DEPS ${DISTRIBUTE_DEPS}) + set_source_files_properties(async_listen_and_serv_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) op_library(send_vars_op DEPS ${DISTRIBUTE_DEPS}) set_source_files_properties(send_vars_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) op_library(send_barrier_op DEPS ${DISTRIBUTE_DEPS}) diff --git a/paddle/fluid/operators/async_listen_and_serv_op.cc b/paddle/fluid/operators/async_listen_and_serv_op.cc new file mode 100644 index 0000000000..4d66f9853a --- /dev/null +++ b/paddle/fluid/operators/async_listen_and_serv_op.cc @@ -0,0 +1,217 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include // NOLINT +#include + +#include "paddle/fluid/operators/async_listen_and_serv_op.h" + +namespace paddle { +namespace operators { + +void RunServer(std::shared_ptr service) { + service->RunAsyncUpdate(); + VLOG(4) << "RunServer thread end"; +} + +static void CreateTensorFromMessageType(framework::Variable *var, + sendrecv::VarType var_type) { + if (var_type == sendrecv::VarType::LOD_TENSOR) { + var->GetMutable(); + } else if (var_type == sendrecv::VarType::SELECTED_ROWS) { + var->GetMutable(); + } else { + PADDLE_THROW( + "VariableMessage type %d is not in " + "[LoDTensor, SelectedRows]", + var_type); + } +} + +static void ParallelExecuteBlocks( + const std::vector ¶llel_blkids, framework::Executor *executor, + const std::vector> + &prepared, + framework::ProgramDesc *program, framework::Scope *scope) { + std::vector> fs; + for (size_t idx : parallel_blkids) { + fs.push_back( + framework::Async([&executor, &prepared, &program, &scope, idx]() { + int run_block = idx; // thread local + try { + executor->RunPreparedContext(prepared[run_block].get(), scope, + false, false); + } catch (std::exception &e) { + LOG(ERROR) << "run sub program error " << e.what(); + } + })); + } + for (size_t i = 0; i < fs.size(); ++i) fs[i].wait(); +} + +static void AsyncExecuteBlock( + size_t block_id, framework::Executor *executor, + std::shared_ptr ctx, + framework::ProgramDesc *program, framework::Scope *scope) {} + +AsyncListenAndServOp::AsyncListenAndServOp( + const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + +int AsyncListenAndServOp::GetSelectedPort() const { + return rpc_service_->GetSelectedPort(); +} + +void AsyncListenAndServOp::Stop() { + rpc_service_->Push(LISTEN_TERMINATE_MESSAGE); + server_thread_->join(); +} + +void AsyncListenAndServOp::RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const { + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(dev_place); + framework::Scope &recv_scope = scope.NewScope(); + + if (!rpc_service_) { + std::string endpoint = Attr("endpoint"); + rpc_service_.reset(new detail::SyncGRPCServer(endpoint)); + } + + auto *optimize_block = Attr(kOptimizeBlock); + auto *prefetch_block = Attr(kPrefetchBlock); + auto *program = optimize_block->Program(); + size_t num_blocks = program->Size(); + PADDLE_ENFORCE_GE(num_blocks, 2, + "server program should have at least 2 blocks"); + + framework::Executor executor(dev_place); + std::vector block_list; + for (size_t blkid = 1; blkid < num_blocks; ++blkid) { + if (blkid != static_cast(prefetch_block->ID())) { + block_list.push_back(blkid); + } + } + auto optimize_prepared = executor.Prepare(*program, block_list); + // Insert placeholder for block0 which holds current op itself. + optimize_prepared.insert( + optimize_prepared.begin(), + std::shared_ptr(nullptr)); + + rpc_service_->SetScope(&recv_scope); + rpc_service_->SetDevCtx(&dev_ctx); + // TODO(qiao) set proper fields for table lookup and update + rpc_service_->SetExecutor(&executor); + VLOG(3) << "prefetch block id is " << prefetch_block->ID(); + auto prefetch_prepared = executor.Prepare(*program, prefetch_block->ID()); + rpc_service_->SetPrefetchPreparedCtx(prefetch_prepared.get()); + prefetch_prepared.release(); + rpc_service_->SetProgram(program); + // start the server listening after all member initialized. + server_thread_.reset(new std::thread(RunServer, rpc_service_)); + VLOG(3) << "wait server thread to become ready..."; + sleep(5); + // Write to a file of server selected port for python use. + std::ofstream port_file; + port_file.open("/tmp/paddle.selected_port"); + port_file << rpc_service_->GetSelectedPort(); + port_file.close(); + + bool exit_flag = false; + // Record received sparse variables, so that + // we could reset those after execute optimize program + std::vector sparse_vars; + while (!exit_flag) { + const detail::ReceivedMessage v = rpc_service_->Get(); + auto recv_var_name = v.first; + if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { + LOG(INFO) << "received terminate message and exit"; + exit_flag = true; + break; + } else { + VLOG(3) << "received grad: " << recv_var_name; + auto var = v.second->GetVar(); + if (var == nullptr) { + LOG(ERROR) << "Can not find server side var: " << recv_var_name; + PADDLE_THROW("Can not find server side var"); + } + if (var->IsType()) { + sparse_vars.push_back(var); + } + AsyncExecuteBlock(); + } + + if (exit_flag) { + rpc_service_->ShutDown(); + break; + } + + // NOTE: if is_gpu_place, CUDA kernels are launched by multiple threads + // and this will still work. + + // The optimize blocks which have the same parent ID would run parallel + // TODO(Yancey1989): need to use ParallelExecutor for future + int32_t last_parent_blkid = program->Block(1).Parent(); + + VLOG(2) << "run all blocks spent " << detail::GetTimestamp() - ts << "(ms)"; + + // Reset the received sparse variables, the sum operator would not + // sum the input sparse variables which rows is empty at the next + // mini-batch. + // TODO(Yancey1989): move the reset action into an operator, we couldn't + // have any hide logic in the operator. + for (auto &var : sparse_vars) { + var->GetMutable()->mutable_rows()->clear(); + } + // FIXME(typhoonzero): use another condition to sync wait clients get. + sparse_vars.clear(); + } // while(true) +} + +class AsyncListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { + public: + AsyncListenAndServOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "(Tensor) Variables that server recv.").AsDuplicable(); + AddComment(R"DOC( +ListenAndServ operator + +This operator will start a RPC server which can receive variables +from send_op and send back variables to recv_op. +)DOC"); + AddAttr("endpoint", + "(string, default 127.0.0.1:6164)" + "IP address to listen on.") + .SetDefault("127.0.0.1:6164") + .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); + AddAttr(kOptimizeBlock, + "BlockID to run on server side."); + AddAttr(kPrefetchBlock, + "prefetch block to run on server side."); + AddAttr("Fanin", "How many clients send to this server.") + .SetDefault(1); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(async_listen_and_serv, ops::AsyncListenAndServOp, + ops::AsyncListenAndServOpMaker); diff --git a/paddle/fluid/operators/async_listen_and_serv_op.h b/paddle/fluid/operators/async_listen_and_serv_op.h new file mode 100644 index 0000000000..9df351b929 --- /dev/null +++ b/paddle/fluid/operators/async_listen_and_serv_op.h @@ -0,0 +1,55 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/threadpool.h" +#include "paddle/fluid/operators/detail/async_grpc_server.h" + +namespace paddle { +namespace operators { + +constexpr char kOptimizeBlock[] = "OptimizeBlock"; +constexpr char kPrefetchBlock[] = "PrefetchBlock"; + +void RunServer(std::shared_ptr service); + +class AsyncListenAndServOp : public framework::OperatorBase { + public: + AsyncListenAndServOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs); + + int GetSelectedPort() const; + + void Stop() override; + + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override; + + protected: + mutable std::shared_ptr rpc_service_; + mutable std::shared_ptr server_thread_; +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/detail/CMakeLists.txt b/paddle/fluid/operators/detail/CMakeLists.txt index 719a7465b8..059099ee0a 100644 --- a/paddle/fluid/operators/detail/CMakeLists.txt +++ b/paddle/fluid/operators/detail/CMakeLists.txt @@ -1,6 +1,6 @@ if(WITH_DISTRIBUTE) grpc_library(sendrecvop_grpc SRCS bytebuffer_stream.cc sendrecvop_utils.cc grpc_client.cc - grpc_server.cc variable_response.cc PROTO send_recv.proto DEPS lod_tensor selected_rows) + grpc_server.cc async_grpc_server.cc variable_response.cc PROTO send_recv.proto DEPS lod_tensor selected_rows) set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") set_source_files_properties(serde_test.cc grpc_server_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) cc_test(serde_test SRCS serde_test.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr diff --git a/paddle/fluid/operators/detail/async_grpc_server.cc b/paddle/fluid/operators/detail/async_grpc_server.cc new file mode 100644 index 0000000000..7e45bc6b2f --- /dev/null +++ b/paddle/fluid/operators/detail/async_grpc_server.cc @@ -0,0 +1,311 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/detail/async_grpc_server.h" + +#include +#include + +using ::grpc::ServerAsyncResponseWriter; + +namespace paddle { +namespace operators { +namespace detail { + +enum CallStatus { PROCESS = 0, FINISH }; + +// reference: +// https://stackoverflow.com/questions/41732884/grpc-multiple-services-in-cpp-async-server +class RequestBase { + public: + explicit RequestBase(GrpcService::AsyncService* service, + ::grpc::ServerCompletionQueue* cq, + const platform::DeviceContext* dev_ctx) + : service_(service), cq_(cq), status_(PROCESS), dev_ctx_(dev_ctx) { + PADDLE_ENFORCE(cq_); + } + virtual ~RequestBase() {} + virtual void Process() { assert(false); } + + CallStatus Status() { return status_; } + void SetStatus(CallStatus status) { status_ = status; } + virtual std::string GetReqName() { + assert(false); + return ""; + } + + protected: + ::grpc::ServerContext ctx_; + GrpcService::AsyncService* service_; + ::grpc::ServerCompletionQueue* cq_; + CallStatus status_; + const platform::DeviceContext* dev_ctx_; +}; + +class RequestSend final : public RequestBase { + public: + explicit RequestSend(GrpcService::AsyncService* service, + ::grpc::ServerCompletionQueue* cq, + framework::Scope* scope, ReceivedQueue* queue, + const platform::DeviceContext* dev_ctx) + : RequestBase(service, cq, dev_ctx), queue_(queue), responder_(&ctx_) { + request_.reset(new VariableResponse(scope, dev_ctx_)); + int method_id = static_cast(detail::GrpcMethod::kSendVariable); + service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, + cq_, cq_, this); + } + + virtual ~RequestSend() {} + + virtual std::string GetReqName() { return request_->Varname(); } + + virtual void Process() { + queue_->Push(std::make_pair(request_->Varname(), request_)); + + sendrecv::VoidMessage reply; + responder_.Finish(reply, ::grpc::Status::OK, this); + status_ = FINISH; + } + + protected: + std::shared_ptr request_; + ReceivedQueue* queue_; + ServerAsyncResponseWriter responder_; +}; + +class RequestGet final : public RequestBase { + public: + explicit RequestGet(GrpcService::AsyncService* service, + ::grpc::ServerCompletionQueue* cq, + framework::Scope* scope, + const platform::DeviceContext* dev_ctx) + : RequestBase(service, cq, dev_ctx), responder_(&ctx_), scope_(scope) { + int method_id = static_cast(detail::GrpcMethod::kGetVariable); + service_->RequestAsyncUnary(method_id, &ctx_, &request_, &responder_, cq_, + cq_, this); + } + + virtual ~RequestGet() {} + + virtual std::string GetReqName() { return request_.varname(); } + + virtual void Process() { + // proc request. + std::string var_name = request_.varname(); + auto* var = scope_->FindVar(var_name); + + ::grpc::ByteBuffer reply; + SerializeToByteBuffer(var_name, var, *dev_ctx_, &reply); + + responder_.Finish(reply, ::grpc::Status::OK, this); + status_ = FINISH; + } + + protected: + sendrecv::VariableMessage request_; + ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; + framework::Scope* scope_; +}; + +class RequestPrefetch final : public RequestBase { + public: + explicit RequestPrefetch(GrpcService::AsyncService* service, + ::grpc::ServerCompletionQueue* cq, + framework::Scope* scope, + const platform::DeviceContext* dev_ctx, + framework::Executor* executor, + framework::ProgramDesc* program, + framework::ExecutorPrepareContext* prefetch_ctx) + : RequestBase(service, cq, dev_ctx), + responder_(&ctx_), + scope_(scope), + executor_(executor), + program_(program), + prefetch_ctx_(prefetch_ctx) { + request_.reset(new VariableResponse(scope, dev_ctx_)); + int method_id = static_cast(detail::GrpcMethod::kPrefetchVariable); + service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, + cq_, cq_, this); + } + + virtual ~RequestPrefetch() {} + + virtual std::string GetReqName() { return request_->Varname(); } + + virtual void Process() { + // prefetch process... + ::grpc::ByteBuffer reply; + + std::string var_name = request_->OutVarname(); + VLOG(3) << "prefetch var " << var_name; + auto var_desc = program_->Block(0).FindVar(var_name); + framework::Scope* local_scope = &scope_->NewScope(); + auto* var = local_scope->FindVar(var_name); + InitializeVariable(var, var_desc->GetType()); + executor_->RunPreparedContext(prefetch_ctx_, scope_, false, false); + + SerializeToByteBuffer(var_name, var, *dev_ctx_, &reply); + + responder_.Finish(reply, ::grpc::Status::OK, this); + status_ = FINISH; + } + + protected: + std::shared_ptr request_; + ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; + framework::Scope* scope_; + framework::Executor* executor_; + framework::ProgramDesc* program_; + framework::ExecutorPrepareContext* prefetch_ctx_; +}; + +void SyncGRPCServer::RunAsyncUpdate() { + ::grpc::ServerBuilder builder; + builder.AddListeningPort(address_, ::grpc::InsecureServerCredentials(), + &selected_port_); + builder.SetMaxSendMessageSize(std::numeric_limits::max()); + builder.SetMaxReceiveMessageSize(std::numeric_limits::max()); + builder.RegisterService(&service_); + + cq_send_ = builder.AddCompletionQueue(); + cq_get_ = builder.AddCompletionQueue(); + cq_prefetch_ = builder.AddCompletionQueue(); + + server_ = builder.BuildAndStart(); + LOG(INFO) << "Server listening on " << address_ + << " selected port: " << selected_port_; + + std::function send_register = + std::bind(&SyncGRPCServer::TryToRegisterNewSendOne, this); + std::function get_register = + std::bind(&SyncGRPCServer::TryToRegisterNewGetOne, this); + std::function prefetch_register = + std::bind(&SyncGRPCServer::TryToRegisterNewPrefetchOne, this); + + // TODO(wuyi): Run these "HandleRequest" in thread pool + t_send_.reset( + new std::thread(std::bind(&SyncGRPCServer::HandleRequest, this, + cq_send_.get(), "cq_send", send_register))); + t_get_.reset( + new std::thread(std::bind(&SyncGRPCServer::HandleRequest, this, + cq_get_.get(), "cq_get", get_register))); + t_prefetch_.reset(new std::thread( + std::bind(&SyncGRPCServer::HandleRequest, this, cq_prefetch_.get(), + "cq_prefetch", prefetch_register))); + // wait server + server_->Wait(); + t_send_->join(); + t_get_->join(); + t_prefetch_->join(); +} + +void SyncGRPCServer::ShutdownQueue() { + std::unique_lock lock(cq_mutex_); + cq_send_->Shutdown(); + cq_get_->Shutdown(); + cq_prefetch_->Shutdown(); +} + +// This URL explains why shutdown is complicate: +void SyncGRPCServer::ShutDown() { + is_shut_down_ = true; + ShutdownQueue(); + server_->Shutdown(); +} + +void SyncGRPCServer::TryToRegisterNewSendOne() { + std::unique_lock lock(cq_mutex_); + if (is_shut_down_) { + VLOG(3) << "shutdown, do not TryToRegisterNewSendOne"; + return; + } + RequestSend* send = new RequestSend(&service_, cq_send_.get(), scope_, + &var_recv_queue_, dev_ctx_); + VLOG(4) << "Create RequestSend status:" << send->Status(); +} + +void SyncGRPCServer::TryToRegisterNewGetOne() { + std::unique_lock lock(cq_mutex_); + if (is_shut_down_) { + VLOG(3) << "shutdown, do not TryToRegisterNewGetOne"; + return; + } + RequestGet* get = new RequestGet(&service_, cq_get_.get(), scope_, dev_ctx_); + VLOG(4) << "Create RequestGet status:" << get->Status(); +} + +void SyncGRPCServer::TryToRegisterNewPrefetchOne() { + std::unique_lock lock(cq_mutex_); + if (is_shut_down_) { + VLOG(3) << "shutdown, do not TryToRegisterNewPrefetchOne"; + return; + } + RequestPrefetch* prefetch = + new RequestPrefetch(&service_, cq_prefetch_.get(), scope_, dev_ctx_, + executor_, program_, prefetch_ctx_); + + VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status(); +} + +void SyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, + const std::string& cq_name, + std::function TryToRegisterNewOne) { + TryToRegisterNewOne(); + + void* tag = NULL; + bool ok = false; + + while (true) { + VLOG(3) << "HandleRequest for " << cq_name << " while in"; + if (!cq->Next(&tag, &ok)) { + LOG(INFO) << cq_name << " CompletionQueue shutdown!"; + break; + } + VLOG(3) << "HandleRequest for " << cq_name << " while after Next"; + + PADDLE_ENFORCE(tag); + + RequestBase* base = reinterpret_cast(tag); + // reference: + // https://github.com/tensorflow/tensorflow/issues/5596 + // https://groups.google.com/forum/#!topic/grpc-io/xftlRy-IQwM + // https://groups.google.com/forum/#!topic/grpc-io/ywATt88Ef_I + if (!ok) { + LOG(WARNING) << cq_name << " recv no regular event:argument name[" + << base->GetReqName() << "]"; + TryToRegisterNewOne(); + delete base; + continue; + } + + switch (base->Status()) { + case PROCESS: { + VLOG(4) << cq_name << " status:" << base->Status(); + TryToRegisterNewOne(); + base->Process(); + break; + } + case FINISH: { + VLOG(4) << cq_name << " status:" << base->Status(); + delete base; + break; + } + default: { assert(false); } + } + } +} + +} // namespace detail +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/detail/async_grpc_server.h b/paddle/fluid/operators/detail/async_grpc_server.h new file mode 100644 index 0000000000..870b684cfa --- /dev/null +++ b/paddle/fluid/operators/detail/async_grpc_server.h @@ -0,0 +1,111 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include // NOLINT +#include + +#include "grpc++/grpc++.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/var_type.h" +#include "paddle/fluid/operators/detail/grpc_service.h" +#include "paddle/fluid/operators/detail/send_recv.grpc.pb.h" +#include "paddle/fluid/operators/detail/send_recv.pb.h" +#include "paddle/fluid/operators/detail/sendrecvop_utils.h" +#include "paddle/fluid/operators/detail/simple_block_queue.h" + +namespace paddle { +namespace operators { +namespace detail { + +typedef std::pair> + ReceivedMessage; +typedef SimpleBlockQueue ReceivedQueue; + +typedef std::pair MessageWithName; +class RequestBase; + +class SyncGRPCServer final { + public: + explicit SyncGRPCServer(const std::string &address) : address_(address) {} + + void RunAsyncUpdate(); + + void SetScope(framework::Scope *scope) { scope_ = scope; } + + void SetDevCtx(const platform::DeviceContext *dev_ctx) { dev_ctx_ = dev_ctx; } + + void SetProgram(framework::ProgramDesc *program) { program_ = program; } + + void SetExecutor(framework::Executor *executor) { executor_ = executor; } + + void SetPrefetchPreparedCtx(framework::ExecutorPrepareContext *prepared) { + prefetch_ctx_ = prepared; + } + + int GetSelectedPort() { return selected_port_; } + + const ReceivedMessage Get() { return this->var_recv_queue_.Pop(); } + + void Push(const std::string &msg_name) { + this->var_recv_queue_.Push(std::make_pair(msg_name, nullptr)); + } + + void ShutDown(); + + protected: + void HandleRequest(::grpc::ServerCompletionQueue *cq, + const std::string &cq_name, + std::function TryToRegisterNewOne); + void TryToRegisterNewSendOne(); + void TryToRegisterNewGetOne(); + void TryToRegisterNewPrefetchOne(); + void ShutdownQueue(); + + private: + std::mutex cq_mutex_; + volatile bool is_shut_down_ = false; + std::unique_ptr<::grpc::ServerCompletionQueue> cq_send_; + std::unique_ptr<::grpc::ServerCompletionQueue> cq_get_; + std::unique_ptr<::grpc::ServerCompletionQueue> cq_prefetch_; + + GrpcService::AsyncService service_; + std::unique_ptr<::grpc::Server> server_; + + std::string address_; + framework::Scope *scope_; + const platform::DeviceContext *dev_ctx_; + + // client send variable to this queue. + ReceivedQueue var_recv_queue_; + + std::unique_ptr t_send_; + std::unique_ptr t_get_; + std::unique_ptr t_prefetch_; + + framework::ExecutorPrepareContext *prefetch_ctx_; + framework::ProgramDesc *program_; + framework::Executor *executor_; + int selected_port_; +}; + +}; // namespace detail +}; // namespace operators +}; // namespace paddle From 1a43828780943569b558043bac4c0170e5d962a1 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 18 Apr 2018 23:42:52 +0800 Subject: [PATCH 02/72] implement main logic --- .../operators/async_listen_and_serv_op.cc | 109 +++++++----------- 1 file changed, 42 insertions(+), 67 deletions(-) diff --git a/paddle/fluid/operators/async_listen_and_serv_op.cc b/paddle/fluid/operators/async_listen_and_serv_op.cc index 4d66f9853a..ec0ddedf3d 100644 --- a/paddle/fluid/operators/async_listen_and_serv_op.cc +++ b/paddle/fluid/operators/async_listen_and_serv_op.cc @@ -19,6 +19,8 @@ limitations under the License. */ #include "paddle/fluid/operators/async_listen_and_serv_op.h" +#include "paddle/utils/StringUtil.h" + namespace paddle { namespace operators { @@ -27,46 +29,18 @@ void RunServer(std::shared_ptr service) { VLOG(4) << "RunServer thread end"; } -static void CreateTensorFromMessageType(framework::Variable *var, - sendrecv::VarType var_type) { - if (var_type == sendrecv::VarType::LOD_TENSOR) { - var->GetMutable(); - } else if (var_type == sendrecv::VarType::SELECTED_ROWS) { - var->GetMutable(); - } else { - PADDLE_THROW( - "VariableMessage type %d is not in " - "[LoDTensor, SelectedRows]", - var_type); - } -} - -static void ParallelExecuteBlocks( - const std::vector ¶llel_blkids, framework::Executor *executor, - const std::vector> - &prepared, - framework::ProgramDesc *program, framework::Scope *scope) { - std::vector> fs; - for (size_t idx : parallel_blkids) { - fs.push_back( - framework::Async([&executor, &prepared, &program, &scope, idx]() { - int run_block = idx; // thread local - try { - executor->RunPreparedContext(prepared[run_block].get(), scope, - false, false); - } catch (std::exception &e) { - LOG(ERROR) << "run sub program error " << e.what(); - } - })); - } - for (size_t i = 0; i < fs.size(); ++i) fs[i].wait(); +static void AsyncExecuteBlock(framework::Executor *executor, + framework::ExecutorPrepareContext *prepared, + framework::Scope *scope) { + framework::Async([&executor, &prepared, &scope]() { + try { + executor->RunPreparedContext(prepared, scope, false, false); + } catch (std::exception &e) { + LOG(ERROR) << "run sub program error " << e.what(); + } + }); } -static void AsyncExecuteBlock( - size_t block_id, framework::Executor *executor, - std::shared_ptr ctx, - framework::ProgramDesc *program, framework::Scope *scope) {} - AsyncListenAndServOp::AsyncListenAndServOp( const std::string &type, const framework::VariableNameMap &inputs, const framework::VariableNameMap &outputs, @@ -93,6 +67,21 @@ void AsyncListenAndServOp::RunImpl(const framework::Scope &scope, rpc_service_.reset(new detail::SyncGRPCServer(endpoint)); } + // grad name to block id + std::unordered_map grad_to_id; + std::unordered_map id_to_grad; + + auto grad_map_str = Attr>("grad_map"); + for (auto &grad_and_id : grad_map_str) { + std::vector pieces; + paddle::str::split(grad_and_id, ' ', &pieces); + PADDLE_ENFORCE_EQ(pieces.size(), 2); + PADDLE_ENFORCE_EQ(grad_to_id.count(pieces[0]), 0); + int block_id = std::stoi(pieces[1]); + grad_to_id[pieces[0]] = block_id; + id_to_grad[block_id] = pieces[0]; + } + auto *optimize_block = Attr(kOptimizeBlock); auto *prefetch_block = Attr(kPrefetchBlock); auto *program = optimize_block->Program(); @@ -108,10 +97,13 @@ void AsyncListenAndServOp::RunImpl(const framework::Scope &scope, } } auto optimize_prepared = executor.Prepare(*program, block_list); - // Insert placeholder for block0 which holds current op itself. - optimize_prepared.insert( - optimize_prepared.begin(), - std::shared_ptr(nullptr)); + + std::unordered_map> + grad_to_prepared; + for (size_t i = 0; i < block_list.size(); ++i) { + grad_to_prepared[id_to_grad[block_list[i]]] = optimize_prepared[i]; + } rpc_service_->SetScope(&recv_scope); rpc_service_->SetDevCtx(&dev_ctx); @@ -122,6 +114,7 @@ void AsyncListenAndServOp::RunImpl(const framework::Scope &scope, rpc_service_->SetPrefetchPreparedCtx(prefetch_prepared.get()); prefetch_prepared.release(); rpc_service_->SetProgram(program); + // start the server listening after all member initialized. server_thread_.reset(new std::thread(RunServer, rpc_service_)); VLOG(3) << "wait server thread to become ready..."; @@ -133,9 +126,6 @@ void AsyncListenAndServOp::RunImpl(const framework::Scope &scope, port_file.close(); bool exit_flag = false; - // Record received sparse variables, so that - // we could reset those after execute optimize program - std::vector sparse_vars; while (!exit_flag) { const detail::ReceivedMessage v = rpc_service_->Get(); auto recv_var_name = v.first; @@ -150,36 +140,17 @@ void AsyncListenAndServOp::RunImpl(const framework::Scope &scope, LOG(ERROR) << "Can not find server side var: " << recv_var_name; PADDLE_THROW("Can not find server side var"); } + AsyncExecuteBlock(&executor, grad_to_prepared[recv_var_name].get(), + &recv_scope); if (var->IsType()) { - sparse_vars.push_back(var); + var->GetMutable()->mutable_rows()->clear(); } - AsyncExecuteBlock(); } if (exit_flag) { rpc_service_->ShutDown(); break; } - - // NOTE: if is_gpu_place, CUDA kernels are launched by multiple threads - // and this will still work. - - // The optimize blocks which have the same parent ID would run parallel - // TODO(Yancey1989): need to use ParallelExecutor for future - int32_t last_parent_blkid = program->Block(1).Parent(); - - VLOG(2) << "run all blocks spent " << detail::GetTimestamp() - ts << "(ms)"; - - // Reset the received sparse variables, the sum operator would not - // sum the input sparse variables which rows is empty at the next - // mini-batch. - // TODO(Yancey1989): move the reset action into an operator, we couldn't - // have any hide logic in the operator. - for (auto &var : sparse_vars) { - var->GetMutable()->mutable_rows()->clear(); - } - // FIXME(typhoonzero): use another condition to sync wait clients get. - sparse_vars.clear(); } // while(true) } @@ -199,6 +170,10 @@ from send_op and send back variables to recv_op. "IP address to listen on.") .SetDefault("127.0.0.1:6164") .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); + AddAttr>( + "grad_map(['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'])", + "a map from grad name to it's optimize block id") + .SetDefault({}); AddAttr(kOptimizeBlock, "BlockID to run on server side."); AddAttr(kPrefetchBlock, From e84f353e1a4b7f1d00eaeb3f0c9814f1f7a433d3 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 19 Apr 2018 13:18:48 +0800 Subject: [PATCH 03/72] optimize --- paddle/fluid/operators/async_listen_and_serv_op.cc | 6 +++++- paddle/fluid/operators/detail/async_grpc_server.cc | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/async_listen_and_serv_op.cc b/paddle/fluid/operators/async_listen_and_serv_op.cc index ec0ddedf3d..bdb20240f6 100644 --- a/paddle/fluid/operators/async_listen_and_serv_op.cc +++ b/paddle/fluid/operators/async_listen_and_serv_op.cc @@ -96,6 +96,8 @@ void AsyncListenAndServOp::RunImpl(const framework::Scope &scope, block_list.push_back(blkid); } } + PADDLE_ENFORCE_EQ(grad_map_str.size(), block_list.size(), + "grad num should be equal to optimize block num"); auto optimize_prepared = executor.Prepare(*program, block_list); std::unordered_mapSetScope(&recv_scope); rpc_service_->SetDevCtx(&dev_ctx); - // TODO(qiao) set proper fields for table lookup and update + + // set proper fields for table lookup and update rpc_service_->SetExecutor(&executor); VLOG(3) << "prefetch block id is " << prefetch_block->ID(); auto prefetch_prepared = executor.Prepare(*program, prefetch_block->ID()); @@ -142,6 +145,7 @@ void AsyncListenAndServOp::RunImpl(const framework::Scope &scope, } AsyncExecuteBlock(&executor, grad_to_prepared[recv_var_name].get(), &recv_scope); + // TODO(qiao): explain why if (var->IsType()) { var->GetMutable()->mutable_rows()->clear(); } diff --git a/paddle/fluid/operators/detail/async_grpc_server.cc b/paddle/fluid/operators/detail/async_grpc_server.cc index 7e45bc6b2f..b9a228e1d2 100644 --- a/paddle/fluid/operators/detail/async_grpc_server.cc +++ b/paddle/fluid/operators/detail/async_grpc_server.cc @@ -91,7 +91,7 @@ class RequestGet final : public RequestBase { framework::Scope* scope, const platform::DeviceContext* dev_ctx) : RequestBase(service, cq, dev_ctx), responder_(&ctx_), scope_(scope) { - int method_id = static_cast(detail::GrpcMethod::kGetVariable); + auto method_id = static_cast(detail::GrpcMethod::kGetVariable); service_->RequestAsyncUnary(method_id, &ctx_, &request_, &responder_, cq_, cq_, this); } From d002aa7abf9deabeddbfb611bdc2676cad551dac Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 20 Apr 2018 11:02:57 +0800 Subject: [PATCH 04/72] update --- paddle/fluid/operators/detail/async_grpc_server.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/detail/async_grpc_server.cc b/paddle/fluid/operators/detail/async_grpc_server.cc index b9a228e1d2..fb0258f942 100644 --- a/paddle/fluid/operators/detail/async_grpc_server.cc +++ b/paddle/fluid/operators/detail/async_grpc_server.cc @@ -60,7 +60,7 @@ class RequestSend final : public RequestBase { framework::Scope* scope, ReceivedQueue* queue, const platform::DeviceContext* dev_ctx) : RequestBase(service, cq, dev_ctx), queue_(queue), responder_(&ctx_) { - request_.reset(new VariableResponse(scope, dev_ctx_)); + request_.reset(new VariableResponse(true, scope, dev_ctx_)); int method_id = static_cast(detail::GrpcMethod::kSendVariable); service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, cq_, cq_, this); @@ -133,7 +133,7 @@ class RequestPrefetch final : public RequestBase { executor_(executor), program_(program), prefetch_ctx_(prefetch_ctx) { - request_.reset(new VariableResponse(scope, dev_ctx_)); + request_.reset(new VariableResponse(true, scope, dev_ctx_)); int method_id = static_cast(detail::GrpcMethod::kPrefetchVariable); service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, cq_, cq_, this); From 1e30c41e7be026a7702ecd4db1ee031de7d5d0b6 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 20 Apr 2018 11:54:38 +0800 Subject: [PATCH 05/72] add split string --- .../operators/async_listen_and_serv_op.cc | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/async_listen_and_serv_op.cc b/paddle/fluid/operators/async_listen_and_serv_op.cc index bdb20240f6..14d9121eff 100644 --- a/paddle/fluid/operators/async_listen_and_serv_op.cc +++ b/paddle/fluid/operators/async_listen_and_serv_op.cc @@ -24,6 +24,24 @@ limitations under the License. */ namespace paddle { namespace operators { +static void split(const std::string &str, char sep, + std::vector *pieces) { + pieces->clear(); + if (str.empty()) { + return; + } + size_t pos = 0; + size_t next = str.find(sep, pos); + while (next != std::string::npos) { + pieces->push_back(str.substr(pos, next - pos)); + pos = next + 1; + next = str.find(sep, pos); + } + if (!str.substr(pos).empty()) { + pieces->push_back(str.substr(pos)); + } +} + void RunServer(std::shared_ptr service) { service->RunAsyncUpdate(); VLOG(4) << "RunServer thread end"; @@ -74,7 +92,7 @@ void AsyncListenAndServOp::RunImpl(const framework::Scope &scope, auto grad_map_str = Attr>("grad_map"); for (auto &grad_and_id : grad_map_str) { std::vector pieces; - paddle::str::split(grad_and_id, ' ', &pieces); + split(grad_and_id, ' ', &pieces); PADDLE_ENFORCE_EQ(pieces.size(), 2); PADDLE_ENFORCE_EQ(grad_to_id.count(pieces[0]), 0); int block_id = std::stoi(pieces[1]); From 0a881a1ecfd1a71303fc09b1ba7f566e2044e16c Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 20 Apr 2018 16:37:23 +0800 Subject: [PATCH 06/72] init RunAsyncUpdate --- paddle/fluid/operators/listen_and_serv_op.cc | 112 ++++++++++++++++++- paddle/fluid/operators/listen_and_serv_op.h | 5 + 2 files changed, 116 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 49e191b7bb..10064357ea 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -27,6 +27,36 @@ void RunServer(std::shared_ptr service) { VLOG(4) << "RunServer thread end"; } +static void split(const std::string &str, char sep, + std::vector *pieces) { + pieces->clear(); + if (str.empty()) { + return; + } + size_t pos = 0; + size_t next = str.find(sep, pos); + while (next != std::string::npos) { + pieces->push_back(str.substr(pos, next - pos)); + pos = next + 1; + next = str.find(sep, pos); + } + if (!str.substr(pos).empty()) { + pieces->push_back(str.substr(pos)); + } +} + +static void AsyncExecuteBlock(framework::Executor *executor, + framework::ExecutorPrepareContext *prepared, + framework::Scope *scope) { + framework::Async([&executor, &prepared, &scope]() { + try { + executor->RunPreparedContext(prepared, scope, false, false); + } catch (std::exception &e) { + LOG(ERROR) << "run sub program error " << e.what(); + } + }); +} + static void ParallelExecuteBlocks( const std::vector ¶llel_blkids, framework::Executor *executor, const std::vector> @@ -168,12 +198,82 @@ void ListenAndServOp::RunSyncUpdate( } // while(true) } +void ListenAndServOp::RunAsyncUpdate( + framework::Executor *executor, framework::ProgramDesc *program, + framework::Scope *recv_scope, framework::BlockDesc *prefetch_block) const { + // grad name to block id + std::unordered_map grad_to_id; + std::unordered_map id_to_grad; + + auto grad_map_str = Attr>("grad_map"); + for (auto &grad_and_id : grad_map_str) { + std::vector pieces; + split(grad_and_id, ' ', &pieces); + PADDLE_ENFORCE_EQ(pieces.size(), 2); + PADDLE_ENFORCE_EQ(grad_to_id.count(pieces[0]), 0); + int block_id = std::stoi(pieces[1]); + grad_to_id[pieces[0]] = block_id; + id_to_grad[block_id] = pieces[0]; + } + size_t num_blocks = program->Size(); + PADDLE_ENFORCE_GE(num_blocks, 2, + "server program should have at least 2 blocks"); + + std::vector block_list; + for (size_t blkid = 1; blkid < num_blocks; ++blkid) { + if (blkid != static_cast(prefetch_block->ID())) { + block_list.push_back(blkid); + } + } + PADDLE_ENFORCE_EQ(grad_map_str.size(), block_list.size(), + "grad num should be equal to optimize block num"); + auto optimize_prepared = executor->Prepare(*program, block_list); + + std::unordered_map> + grad_to_prepared; + for (size_t i = 0; i < block_list.size(); ++i) { + grad_to_prepared[id_to_grad[block_list[i]]] = optimize_prepared[i]; + } + + bool exit_flag = false; + while (!exit_flag) { + const detail::ReceivedMessage v = rpc_service_->Get(); + auto recv_var_name = v.first; + if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { + LOG(INFO) << "received terminate message and exit"; + exit_flag = true; + break; + } else { + VLOG(3) << "received grad: " << recv_var_name; + auto var = v.second->GetVar(); + if (var == nullptr) { + LOG(ERROR) << "Can not find server side var: " << recv_var_name; + PADDLE_THROW("Can not find server side var"); + } + AsyncExecuteBlock(executor, grad_to_prepared[recv_var_name].get(), + recv_scope); + // TODO(qiao): explain why + if (var->IsType()) { + var->GetMutable()->mutable_rows()->clear(); + } + } + + if (exit_flag) { + rpc_service_->ShutDown(); + break; + } + } // while(true) +} + void ListenAndServOp::RunImpl(const framework::Scope &scope, const platform::Place &dev_place) const { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(dev_place); framework::Scope &recv_scope = scope.NewScope(); + bool sync_mode = Attr("sync_mode"); + PADDLE_ENFORCE(!rpc_service_); std::string endpoint = Attr("endpoint"); rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); @@ -201,7 +301,11 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, sleep(5); // Write to a file of server selected port for python use. SavePort(rpc_service_); - RunSyncUpdate(&executor, program, &recv_scope, prefetch_block); + if (sync_mode) { + RunSyncUpdate(&executor, program, &recv_scope, prefetch_block); + } else { + RunAsyncUpdate(&executor, program, &recv_scope, prefetch_block); + } } class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { @@ -220,6 +324,12 @@ from send_op and send back variables to recv_op. "IP address to listen on.") .SetDefault("127.0.0.1:6164") .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); + AddAttr>( + "grad_map(['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'])", + "a map from grad name to it's optimize block id") + .SetDefault({}); + AddAttr("sync_mode", "if works at sync_mode or not") + .SetDefault(false); AddAttr(kOptimizeBlock, "BlockID to run on server side."); AddAttr(kPrefetchBlock, diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h index 68c8da4d38..9b915a5941 100644 --- a/paddle/fluid/operators/listen_and_serv_op.h +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -46,6 +46,11 @@ class ListenAndServOp : public framework::OperatorBase { framework::Scope* recv_scope, framework::BlockDesc* prefetch_block) const; + void RunAsyncUpdate(framework::Executor* executor, + framework::ProgramDesc* program, + framework::Scope* recv_scope, + framework::BlockDesc* prefetch_block) const; + void Stop() override; void RunImpl(const framework::Scope& scope, From e2ace032ae74cbd98a1d897687a91014fd1a0c67 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 20 Apr 2018 17:15:40 +0800 Subject: [PATCH 07/72] rename RunAsyncUpdate to RunAsyncLoop --- paddle/fluid/operators/listen_and_serv_op.cc | 9 +++++---- paddle/fluid/operators/listen_and_serv_op.h | 8 ++++---- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 7e2f8e74d6..e5e447164f 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -199,9 +199,10 @@ void ListenAndServOp::RunSyncLoop(framework::Executor *executor, } // while(true) } -void ListenAndServOp::RunAsyncUpdate( - framework::Executor *executor, framework::ProgramDesc *program, - framework::Scope *recv_scope, framework::BlockDesc *prefetch_block) const { +void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, + framework::ProgramDesc *program, + framework::Scope *recv_scope, + framework::BlockDesc *prefetch_block) const { // grad name to block id std::unordered_map grad_to_id; std::unordered_map id_to_grad; @@ -305,7 +306,7 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, if (sync_mode) { RunSyncLoop(&executor, program, &recv_scope, prefetch_block); } else { - RunAsyncUpdate(&executor, program, &recv_scope, prefetch_block); + RunAsyncLoop(&executor, program, &recv_scope, prefetch_block); } } diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h index 2372e3cf3e..3cc0f30477 100644 --- a/paddle/fluid/operators/listen_and_serv_op.h +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -46,10 +46,10 @@ class ListenAndServOp : public framework::OperatorBase { framework::Scope* recv_scope, framework::BlockDesc* prefetch_block) const; - void RunAsyncUpdate(framework::Executor* executor, - framework::ProgramDesc* program, - framework::Scope* recv_scope, - framework::BlockDesc* prefetch_block) const; + void RunAsyncLoop(framework::Executor* executor, + framework::ProgramDesc* program, + framework::Scope* recv_scope, + framework::BlockDesc* prefetch_block) const; void Stop() override; From 63fbdcf979e155afa45e00d5291357d7a5accb01 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 20 Apr 2018 17:31:25 +0800 Subject: [PATCH 08/72] update send_recv_op_test --- paddle/fluid/operators/send_recv_op_test.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 81350fee38..f247583ce6 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -137,6 +137,8 @@ void StartServerNet(bool is_sparse) { attrs.insert({"GradList", std::vector({"x1"})}); attrs.insert({"OptimizeBlock", optimize_block}); attrs.insert({"PrefetchBlock", prefetch_block}); + attrs.insert({"grad_map", {}}); + attrs.insert({"sync_mode", true}); listen_and_serv_op = f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); listen_and_serv_op->Run(scope, place); From 260bf5aceba7ef4a54c144d7397ad470c48b18e4 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 20 Apr 2018 18:22:35 +0800 Subject: [PATCH 09/72] add sync_mode --- paddle/fluid/operators/detail/grpc_server.cc | 48 ++++++++++++------- paddle/fluid/operators/detail/grpc_server.h | 4 +- .../operators/detail/grpc_server_test.cc | 2 +- paddle/fluid/operators/listen_and_serv_op.cc | 3 +- 4 files changed, 37 insertions(+), 20 deletions(-) diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index b92dc59491..be14de9f5e 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -30,9 +30,13 @@ enum CallStatus { PROCESS = 0, FINISH }; class RequestBase { public: explicit RequestBase(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, + ::grpc::ServerCompletionQueue* cq, bool sync_mode, const platform::DeviceContext* dev_ctx) - : service_(service), cq_(cq), status_(PROCESS), dev_ctx_(dev_ctx) { + : service_(service), + cq_(cq), + sync_mode_(sync_mode), + status_(PROCESS), + dev_ctx_(dev_ctx) { PADDLE_ENFORCE(cq_); } virtual ~RequestBase() {} @@ -49,6 +53,7 @@ class RequestBase { ::grpc::ServerContext ctx_; GrpcService::AsyncService* service_; ::grpc::ServerCompletionQueue* cq_; + const bool sync_mode_; CallStatus status_; const platform::DeviceContext* dev_ctx_; }; @@ -56,11 +61,17 @@ class RequestBase { class RequestSend final : public RequestBase { public: explicit RequestSend(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, + ::grpc::ServerCompletionQueue* cq, bool sync_mode, framework::Scope* scope, ReceivedQueue* queue, const platform::DeviceContext* dev_ctx) - : RequestBase(service, cq, dev_ctx), queue_(queue), responder_(&ctx_) { - request_.reset(new VariableResponse(false, scope, dev_ctx_)); + : RequestBase(service, cq, sync_mode, dev_ctx), + queue_(queue), + responder_(&ctx_) { + if (sync_mode_) { + request_.reset(new VariableResponse(false, scope, dev_ctx_)); + } else { + request_.reset(new VariableResponse(true, scope, dev_ctx_)); + } int method_id = static_cast(detail::GrpcMethod::kSendVariable); service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, cq_, cq_, this); @@ -87,11 +98,11 @@ class RequestSend final : public RequestBase { class RequestGet final : public RequestBase { public: explicit RequestGet(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, + ::grpc::ServerCompletionQueue* cq, bool sync_mode, framework::Scope* scope, const platform::DeviceContext* dev_ctx, SimpleBlockQueue* queue) - : RequestBase(service, cq, dev_ctx), + : RequestBase(service, cq, sync_mode, dev_ctx), responder_(&ctx_), scope_(scope), queue_(queue) { @@ -134,19 +145,23 @@ class RequestGet final : public RequestBase { class RequestPrefetch final : public RequestBase { public: explicit RequestPrefetch(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, + ::grpc::ServerCompletionQueue* cq, bool sync_mode, framework::Scope* scope, const platform::DeviceContext* dev_ctx, framework::Executor* executor, framework::ProgramDesc* program, framework::ExecutorPrepareContext* prefetch_ctx) - : RequestBase(service, cq, dev_ctx), + : RequestBase(service, cq, sync_mode, dev_ctx), responder_(&ctx_), scope_(scope), executor_(executor), program_(program), prefetch_ctx_(prefetch_ctx) { - request_.reset(new VariableResponse(false, scope, dev_ctx_)); + if (sync_mode_) { + request_.reset(new VariableResponse(false, scope, dev_ctx_)); + } else { + request_.reset(new VariableResponse(true, scope, dev_ctx_)); + } int method_id = static_cast(detail::GrpcMethod::kPrefetchVariable); service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, cq_, cq_, this); @@ -181,7 +196,6 @@ class RequestPrefetch final : public RequestBase { framework::Executor* executor_; framework::ProgramDesc* program_; framework::ExecutorPrepareContext* prefetch_ctx_; - int blkid_; }; void AsyncGRPCServer::WaitClientGet(int count) { @@ -254,8 +268,8 @@ void AsyncGRPCServer::TryToRegisterNewSendOne() { VLOG(3) << "shutdown, do not TryToRegisterNewSendOne"; return; } - RequestSend* send = new RequestSend(&service_, cq_send_.get(), scope_, - &var_recv_queue_, dev_ctx_); + RequestSend* send = new RequestSend(&service_, cq_send_.get(), sync_mode_, + scope_, &var_recv_queue_, dev_ctx_); VLOG(4) << "Create RequestSend status:" << send->Status(); } @@ -265,8 +279,8 @@ void AsyncGRPCServer::TryToRegisterNewGetOne() { VLOG(3) << "shutdown, do not TryToRegisterNewGetOne"; return; } - RequestGet* get = new RequestGet(&service_, cq_get_.get(), scope_, dev_ctx_, - &var_get_queue_); + RequestGet* get = new RequestGet(&service_, cq_get_.get(), sync_mode_, scope_, + dev_ctx_, &var_get_queue_); VLOG(4) << "Create RequestGet status:" << get->Status(); } @@ -277,8 +291,8 @@ void AsyncGRPCServer::TryToRegisterNewPrefetchOne() { return; } RequestPrefetch* prefetch = - new RequestPrefetch(&service_, cq_prefetch_.get(), scope_, dev_ctx_, - executor_, program_, prefetch_ctx_); + new RequestPrefetch(&service_, cq_prefetch_.get(), sync_mode_, scope_, + dev_ctx_, executor_, program_, prefetch_ctx_); VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status(); } diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index 452ff5e967..ae660ef480 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -44,7 +44,8 @@ class RequestBase; class AsyncGRPCServer final { public: - explicit AsyncGRPCServer(const std::string &address) : address_(address) {} + explicit AsyncGRPCServer(const std::string &address, bool sync_mode) + : address_(address), sync_mode_(sync_mode) {} void RunSyncUpdate(); @@ -95,6 +96,7 @@ class AsyncGRPCServer final { std::unique_ptr<::grpc::Server> server_; std::string address_; + const bool sync_mode_; framework::Scope *scope_; const platform::DeviceContext *dev_ctx_; diff --git a/paddle/fluid/operators/detail/grpc_server_test.cc b/paddle/fluid/operators/detail/grpc_server_test.cc index c51933718f..25b95d608d 100644 --- a/paddle/fluid/operators/detail/grpc_server_test.cc +++ b/paddle/fluid/operators/detail/grpc_server_test.cc @@ -89,7 +89,7 @@ void InitTensorsOnServer(framework::Scope* scope, platform::CPUPlace* place, } void StartServer(const std::string& endpoint) { - rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); + rpc_service_.reset(new detail::AsyncGRPCServer(endpoint, true)); framework::ProgramDesc program; framework::Scope scope; platform::CPUPlace place; diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index e5e447164f..db8a0cd631 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -278,7 +278,8 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, PADDLE_ENFORCE(!rpc_service_); std::string endpoint = Attr("endpoint"); - rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); + + rpc_service_.reset(new detail::AsyncGRPCServer(endpoint, sync_mode)); auto *optimize_block = Attr(kOptimizeBlock); auto *prefetch_block = Attr(kPrefetchBlock); From dc3d2dc8ffa4b1243e381bb582fc094940d57cb9 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 20 Apr 2018 18:31:52 +0800 Subject: [PATCH 10/72] rename grad_map to grad_to_id --- paddle/fluid/operators/async_listen_and_serv_op.cc | 4 ++-- paddle/fluid/operators/listen_and_serv_op.cc | 8 ++++---- paddle/fluid/operators/send_recv_op_test.cc | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/operators/async_listen_and_serv_op.cc b/paddle/fluid/operators/async_listen_and_serv_op.cc index 14d9121eff..093d44e2d1 100644 --- a/paddle/fluid/operators/async_listen_and_serv_op.cc +++ b/paddle/fluid/operators/async_listen_and_serv_op.cc @@ -89,7 +89,7 @@ void AsyncListenAndServOp::RunImpl(const framework::Scope &scope, std::unordered_map grad_to_id; std::unordered_map id_to_grad; - auto grad_map_str = Attr>("grad_map"); + auto grad_map_str = Attr>("grad_to_id"); for (auto &grad_and_id : grad_map_str) { std::vector pieces; split(grad_and_id, ' ', &pieces); @@ -193,7 +193,7 @@ from send_op and send back variables to recv_op. .SetDefault("127.0.0.1:6164") .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); AddAttr>( - "grad_map(['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'])", + "grad_to_id(['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'])", "a map from grad name to it's optimize block id") .SetDefault({}); AddAttr(kOptimizeBlock, diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index db8a0cd631..d5cb165351 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -207,8 +207,8 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, std::unordered_map grad_to_id; std::unordered_map id_to_grad; - auto grad_map_str = Attr>("grad_map"); - for (auto &grad_and_id : grad_map_str) { + auto grad_to_id_str = Attr>("grad_to_id"); + for (auto &grad_and_id : grad_to_id_str) { std::vector pieces; split(grad_and_id, ' ', &pieces); PADDLE_ENFORCE_EQ(pieces.size(), 2); @@ -227,7 +227,7 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, block_list.push_back(blkid); } } - PADDLE_ENFORCE_EQ(grad_map_str.size(), block_list.size(), + PADDLE_ENFORCE_EQ(grad_to_id_str.size(), block_list.size(), "grad num should be equal to optimize block num"); auto optimize_prepared = executor->Prepare(*program, block_list); @@ -328,7 +328,7 @@ from send_op and send back variables to recv_op. .SetDefault("127.0.0.1:6164") .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); AddAttr>( - "grad_map(['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'])", + "grad_to_id(['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'])", "a map from grad name to it's optimize block id") .SetDefault({}); AddAttr("sync_mode", "if works at sync_mode or not") diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index f247583ce6..2b440fe2de 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -137,7 +137,7 @@ void StartServerNet(bool is_sparse) { attrs.insert({"GradList", std::vector({"x1"})}); attrs.insert({"OptimizeBlock", optimize_block}); attrs.insert({"PrefetchBlock", prefetch_block}); - attrs.insert({"grad_map", {}}); + attrs.insert({"grad_to_id", {}}); attrs.insert({"sync_mode", true}); listen_and_serv_op = f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); From 0763ae9a1a3d4b262f3c43b8398c1b5a75e1185c Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 20 Apr 2018 18:36:19 +0800 Subject: [PATCH 11/72] remove unused file --- .../operators/async_listen_and_serv_op.cc | 214 ------------ .../operators/async_listen_and_serv_op.h | 55 ---- .../operators/detail/async_grpc_server.cc | 311 ------------------ .../operators/detail/async_grpc_server.h | 111 ------- 4 files changed, 691 deletions(-) delete mode 100644 paddle/fluid/operators/async_listen_and_serv_op.cc delete mode 100644 paddle/fluid/operators/async_listen_and_serv_op.h delete mode 100644 paddle/fluid/operators/detail/async_grpc_server.cc delete mode 100644 paddle/fluid/operators/detail/async_grpc_server.h diff --git a/paddle/fluid/operators/async_listen_and_serv_op.cc b/paddle/fluid/operators/async_listen_and_serv_op.cc deleted file mode 100644 index 093d44e2d1..0000000000 --- a/paddle/fluid/operators/async_listen_and_serv_op.cc +++ /dev/null @@ -1,214 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include -#include // NOLINT -#include - -#include "paddle/fluid/operators/async_listen_and_serv_op.h" - -#include "paddle/utils/StringUtil.h" - -namespace paddle { -namespace operators { - -static void split(const std::string &str, char sep, - std::vector *pieces) { - pieces->clear(); - if (str.empty()) { - return; - } - size_t pos = 0; - size_t next = str.find(sep, pos); - while (next != std::string::npos) { - pieces->push_back(str.substr(pos, next - pos)); - pos = next + 1; - next = str.find(sep, pos); - } - if (!str.substr(pos).empty()) { - pieces->push_back(str.substr(pos)); - } -} - -void RunServer(std::shared_ptr service) { - service->RunAsyncUpdate(); - VLOG(4) << "RunServer thread end"; -} - -static void AsyncExecuteBlock(framework::Executor *executor, - framework::ExecutorPrepareContext *prepared, - framework::Scope *scope) { - framework::Async([&executor, &prepared, &scope]() { - try { - executor->RunPreparedContext(prepared, scope, false, false); - } catch (std::exception &e) { - LOG(ERROR) << "run sub program error " << e.what(); - } - }); -} - -AsyncListenAndServOp::AsyncListenAndServOp( - const std::string &type, const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : OperatorBase(type, inputs, outputs, attrs) {} - -int AsyncListenAndServOp::GetSelectedPort() const { - return rpc_service_->GetSelectedPort(); -} - -void AsyncListenAndServOp::Stop() { - rpc_service_->Push(LISTEN_TERMINATE_MESSAGE); - server_thread_->join(); -} - -void AsyncListenAndServOp::RunImpl(const framework::Scope &scope, - const platform::Place &dev_place) const { - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto &dev_ctx = *pool.Get(dev_place); - framework::Scope &recv_scope = scope.NewScope(); - - if (!rpc_service_) { - std::string endpoint = Attr("endpoint"); - rpc_service_.reset(new detail::SyncGRPCServer(endpoint)); - } - - // grad name to block id - std::unordered_map grad_to_id; - std::unordered_map id_to_grad; - - auto grad_map_str = Attr>("grad_to_id"); - for (auto &grad_and_id : grad_map_str) { - std::vector pieces; - split(grad_and_id, ' ', &pieces); - PADDLE_ENFORCE_EQ(pieces.size(), 2); - PADDLE_ENFORCE_EQ(grad_to_id.count(pieces[0]), 0); - int block_id = std::stoi(pieces[1]); - grad_to_id[pieces[0]] = block_id; - id_to_grad[block_id] = pieces[0]; - } - - auto *optimize_block = Attr(kOptimizeBlock); - auto *prefetch_block = Attr(kPrefetchBlock); - auto *program = optimize_block->Program(); - size_t num_blocks = program->Size(); - PADDLE_ENFORCE_GE(num_blocks, 2, - "server program should have at least 2 blocks"); - - framework::Executor executor(dev_place); - std::vector block_list; - for (size_t blkid = 1; blkid < num_blocks; ++blkid) { - if (blkid != static_cast(prefetch_block->ID())) { - block_list.push_back(blkid); - } - } - PADDLE_ENFORCE_EQ(grad_map_str.size(), block_list.size(), - "grad num should be equal to optimize block num"); - auto optimize_prepared = executor.Prepare(*program, block_list); - - std::unordered_map> - grad_to_prepared; - for (size_t i = 0; i < block_list.size(); ++i) { - grad_to_prepared[id_to_grad[block_list[i]]] = optimize_prepared[i]; - } - - rpc_service_->SetScope(&recv_scope); - rpc_service_->SetDevCtx(&dev_ctx); - - // set proper fields for table lookup and update - rpc_service_->SetExecutor(&executor); - VLOG(3) << "prefetch block id is " << prefetch_block->ID(); - auto prefetch_prepared = executor.Prepare(*program, prefetch_block->ID()); - rpc_service_->SetPrefetchPreparedCtx(prefetch_prepared.get()); - prefetch_prepared.release(); - rpc_service_->SetProgram(program); - - // start the server listening after all member initialized. - server_thread_.reset(new std::thread(RunServer, rpc_service_)); - VLOG(3) << "wait server thread to become ready..."; - sleep(5); - // Write to a file of server selected port for python use. - std::ofstream port_file; - port_file.open("/tmp/paddle.selected_port"); - port_file << rpc_service_->GetSelectedPort(); - port_file.close(); - - bool exit_flag = false; - while (!exit_flag) { - const detail::ReceivedMessage v = rpc_service_->Get(); - auto recv_var_name = v.first; - if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { - LOG(INFO) << "received terminate message and exit"; - exit_flag = true; - break; - } else { - VLOG(3) << "received grad: " << recv_var_name; - auto var = v.second->GetVar(); - if (var == nullptr) { - LOG(ERROR) << "Can not find server side var: " << recv_var_name; - PADDLE_THROW("Can not find server side var"); - } - AsyncExecuteBlock(&executor, grad_to_prepared[recv_var_name].get(), - &recv_scope); - // TODO(qiao): explain why - if (var->IsType()) { - var->GetMutable()->mutable_rows()->clear(); - } - } - - if (exit_flag) { - rpc_service_->ShutDown(); - break; - } - } // while(true) -} - -class AsyncListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { - public: - AsyncListenAndServOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(Tensor) Variables that server recv.").AsDuplicable(); - AddComment(R"DOC( -ListenAndServ operator - -This operator will start a RPC server which can receive variables -from send_op and send back variables to recv_op. -)DOC"); - AddAttr("endpoint", - "(string, default 127.0.0.1:6164)" - "IP address to listen on.") - .SetDefault("127.0.0.1:6164") - .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); - AddAttr>( - "grad_to_id(['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'])", - "a map from grad name to it's optimize block id") - .SetDefault({}); - AddAttr(kOptimizeBlock, - "BlockID to run on server side."); - AddAttr(kPrefetchBlock, - "prefetch block to run on server side."); - AddAttr("Fanin", "How many clients send to this server.") - .SetDefault(1); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; - -REGISTER_OPERATOR(async_listen_and_serv, ops::AsyncListenAndServOp, - ops::AsyncListenAndServOpMaker); diff --git a/paddle/fluid/operators/async_listen_and_serv_op.h b/paddle/fluid/operators/async_listen_and_serv_op.h deleted file mode 100644 index 9df351b929..0000000000 --- a/paddle/fluid/operators/async_listen_and_serv_op.h +++ /dev/null @@ -1,55 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include - -#include "paddle/fluid/framework/executor.h" -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/threadpool.h" -#include "paddle/fluid/operators/detail/async_grpc_server.h" - -namespace paddle { -namespace operators { - -constexpr char kOptimizeBlock[] = "OptimizeBlock"; -constexpr char kPrefetchBlock[] = "PrefetchBlock"; - -void RunServer(std::shared_ptr service); - -class AsyncListenAndServOp : public framework::OperatorBase { - public: - AsyncListenAndServOp(const std::string &type, - const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs); - - int GetSelectedPort() const; - - void Stop() override; - - void RunImpl(const framework::Scope &scope, - const platform::Place &dev_place) const override; - - protected: - mutable std::shared_ptr rpc_service_; - mutable std::shared_ptr server_thread_; -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/detail/async_grpc_server.cc b/paddle/fluid/operators/detail/async_grpc_server.cc deleted file mode 100644 index fb0258f942..0000000000 --- a/paddle/fluid/operators/detail/async_grpc_server.cc +++ /dev/null @@ -1,311 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/detail/async_grpc_server.h" - -#include -#include - -using ::grpc::ServerAsyncResponseWriter; - -namespace paddle { -namespace operators { -namespace detail { - -enum CallStatus { PROCESS = 0, FINISH }; - -// reference: -// https://stackoverflow.com/questions/41732884/grpc-multiple-services-in-cpp-async-server -class RequestBase { - public: - explicit RequestBase(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, - const platform::DeviceContext* dev_ctx) - : service_(service), cq_(cq), status_(PROCESS), dev_ctx_(dev_ctx) { - PADDLE_ENFORCE(cq_); - } - virtual ~RequestBase() {} - virtual void Process() { assert(false); } - - CallStatus Status() { return status_; } - void SetStatus(CallStatus status) { status_ = status; } - virtual std::string GetReqName() { - assert(false); - return ""; - } - - protected: - ::grpc::ServerContext ctx_; - GrpcService::AsyncService* service_; - ::grpc::ServerCompletionQueue* cq_; - CallStatus status_; - const platform::DeviceContext* dev_ctx_; -}; - -class RequestSend final : public RequestBase { - public: - explicit RequestSend(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, - framework::Scope* scope, ReceivedQueue* queue, - const platform::DeviceContext* dev_ctx) - : RequestBase(service, cq, dev_ctx), queue_(queue), responder_(&ctx_) { - request_.reset(new VariableResponse(true, scope, dev_ctx_)); - int method_id = static_cast(detail::GrpcMethod::kSendVariable); - service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, - cq_, cq_, this); - } - - virtual ~RequestSend() {} - - virtual std::string GetReqName() { return request_->Varname(); } - - virtual void Process() { - queue_->Push(std::make_pair(request_->Varname(), request_)); - - sendrecv::VoidMessage reply; - responder_.Finish(reply, ::grpc::Status::OK, this); - status_ = FINISH; - } - - protected: - std::shared_ptr request_; - ReceivedQueue* queue_; - ServerAsyncResponseWriter responder_; -}; - -class RequestGet final : public RequestBase { - public: - explicit RequestGet(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, - framework::Scope* scope, - const platform::DeviceContext* dev_ctx) - : RequestBase(service, cq, dev_ctx), responder_(&ctx_), scope_(scope) { - auto method_id = static_cast(detail::GrpcMethod::kGetVariable); - service_->RequestAsyncUnary(method_id, &ctx_, &request_, &responder_, cq_, - cq_, this); - } - - virtual ~RequestGet() {} - - virtual std::string GetReqName() { return request_.varname(); } - - virtual void Process() { - // proc request. - std::string var_name = request_.varname(); - auto* var = scope_->FindVar(var_name); - - ::grpc::ByteBuffer reply; - SerializeToByteBuffer(var_name, var, *dev_ctx_, &reply); - - responder_.Finish(reply, ::grpc::Status::OK, this); - status_ = FINISH; - } - - protected: - sendrecv::VariableMessage request_; - ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; - framework::Scope* scope_; -}; - -class RequestPrefetch final : public RequestBase { - public: - explicit RequestPrefetch(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, - framework::Scope* scope, - const platform::DeviceContext* dev_ctx, - framework::Executor* executor, - framework::ProgramDesc* program, - framework::ExecutorPrepareContext* prefetch_ctx) - : RequestBase(service, cq, dev_ctx), - responder_(&ctx_), - scope_(scope), - executor_(executor), - program_(program), - prefetch_ctx_(prefetch_ctx) { - request_.reset(new VariableResponse(true, scope, dev_ctx_)); - int method_id = static_cast(detail::GrpcMethod::kPrefetchVariable); - service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, - cq_, cq_, this); - } - - virtual ~RequestPrefetch() {} - - virtual std::string GetReqName() { return request_->Varname(); } - - virtual void Process() { - // prefetch process... - ::grpc::ByteBuffer reply; - - std::string var_name = request_->OutVarname(); - VLOG(3) << "prefetch var " << var_name; - auto var_desc = program_->Block(0).FindVar(var_name); - framework::Scope* local_scope = &scope_->NewScope(); - auto* var = local_scope->FindVar(var_name); - InitializeVariable(var, var_desc->GetType()); - executor_->RunPreparedContext(prefetch_ctx_, scope_, false, false); - - SerializeToByteBuffer(var_name, var, *dev_ctx_, &reply); - - responder_.Finish(reply, ::grpc::Status::OK, this); - status_ = FINISH; - } - - protected: - std::shared_ptr request_; - ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; - framework::Scope* scope_; - framework::Executor* executor_; - framework::ProgramDesc* program_; - framework::ExecutorPrepareContext* prefetch_ctx_; -}; - -void SyncGRPCServer::RunAsyncUpdate() { - ::grpc::ServerBuilder builder; - builder.AddListeningPort(address_, ::grpc::InsecureServerCredentials(), - &selected_port_); - builder.SetMaxSendMessageSize(std::numeric_limits::max()); - builder.SetMaxReceiveMessageSize(std::numeric_limits::max()); - builder.RegisterService(&service_); - - cq_send_ = builder.AddCompletionQueue(); - cq_get_ = builder.AddCompletionQueue(); - cq_prefetch_ = builder.AddCompletionQueue(); - - server_ = builder.BuildAndStart(); - LOG(INFO) << "Server listening on " << address_ - << " selected port: " << selected_port_; - - std::function send_register = - std::bind(&SyncGRPCServer::TryToRegisterNewSendOne, this); - std::function get_register = - std::bind(&SyncGRPCServer::TryToRegisterNewGetOne, this); - std::function prefetch_register = - std::bind(&SyncGRPCServer::TryToRegisterNewPrefetchOne, this); - - // TODO(wuyi): Run these "HandleRequest" in thread pool - t_send_.reset( - new std::thread(std::bind(&SyncGRPCServer::HandleRequest, this, - cq_send_.get(), "cq_send", send_register))); - t_get_.reset( - new std::thread(std::bind(&SyncGRPCServer::HandleRequest, this, - cq_get_.get(), "cq_get", get_register))); - t_prefetch_.reset(new std::thread( - std::bind(&SyncGRPCServer::HandleRequest, this, cq_prefetch_.get(), - "cq_prefetch", prefetch_register))); - // wait server - server_->Wait(); - t_send_->join(); - t_get_->join(); - t_prefetch_->join(); -} - -void SyncGRPCServer::ShutdownQueue() { - std::unique_lock lock(cq_mutex_); - cq_send_->Shutdown(); - cq_get_->Shutdown(); - cq_prefetch_->Shutdown(); -} - -// This URL explains why shutdown is complicate: -void SyncGRPCServer::ShutDown() { - is_shut_down_ = true; - ShutdownQueue(); - server_->Shutdown(); -} - -void SyncGRPCServer::TryToRegisterNewSendOne() { - std::unique_lock lock(cq_mutex_); - if (is_shut_down_) { - VLOG(3) << "shutdown, do not TryToRegisterNewSendOne"; - return; - } - RequestSend* send = new RequestSend(&service_, cq_send_.get(), scope_, - &var_recv_queue_, dev_ctx_); - VLOG(4) << "Create RequestSend status:" << send->Status(); -} - -void SyncGRPCServer::TryToRegisterNewGetOne() { - std::unique_lock lock(cq_mutex_); - if (is_shut_down_) { - VLOG(3) << "shutdown, do not TryToRegisterNewGetOne"; - return; - } - RequestGet* get = new RequestGet(&service_, cq_get_.get(), scope_, dev_ctx_); - VLOG(4) << "Create RequestGet status:" << get->Status(); -} - -void SyncGRPCServer::TryToRegisterNewPrefetchOne() { - std::unique_lock lock(cq_mutex_); - if (is_shut_down_) { - VLOG(3) << "shutdown, do not TryToRegisterNewPrefetchOne"; - return; - } - RequestPrefetch* prefetch = - new RequestPrefetch(&service_, cq_prefetch_.get(), scope_, dev_ctx_, - executor_, program_, prefetch_ctx_); - - VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status(); -} - -void SyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, - const std::string& cq_name, - std::function TryToRegisterNewOne) { - TryToRegisterNewOne(); - - void* tag = NULL; - bool ok = false; - - while (true) { - VLOG(3) << "HandleRequest for " << cq_name << " while in"; - if (!cq->Next(&tag, &ok)) { - LOG(INFO) << cq_name << " CompletionQueue shutdown!"; - break; - } - VLOG(3) << "HandleRequest for " << cq_name << " while after Next"; - - PADDLE_ENFORCE(tag); - - RequestBase* base = reinterpret_cast(tag); - // reference: - // https://github.com/tensorflow/tensorflow/issues/5596 - // https://groups.google.com/forum/#!topic/grpc-io/xftlRy-IQwM - // https://groups.google.com/forum/#!topic/grpc-io/ywATt88Ef_I - if (!ok) { - LOG(WARNING) << cq_name << " recv no regular event:argument name[" - << base->GetReqName() << "]"; - TryToRegisterNewOne(); - delete base; - continue; - } - - switch (base->Status()) { - case PROCESS: { - VLOG(4) << cq_name << " status:" << base->Status(); - TryToRegisterNewOne(); - base->Process(); - break; - } - case FINISH: { - VLOG(4) << cq_name << " status:" << base->Status(); - delete base; - break; - } - default: { assert(false); } - } - } -} - -} // namespace detail -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/detail/async_grpc_server.h b/paddle/fluid/operators/detail/async_grpc_server.h deleted file mode 100644 index 870b684cfa..0000000000 --- a/paddle/fluid/operators/detail/async_grpc_server.h +++ /dev/null @@ -1,111 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include // NOLINT -#include - -#include "grpc++/grpc++.h" -#include "paddle/fluid/framework/executor.h" -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/framework/selected_rows.h" -#include "paddle/fluid/framework/var_type.h" -#include "paddle/fluid/operators/detail/grpc_service.h" -#include "paddle/fluid/operators/detail/send_recv.grpc.pb.h" -#include "paddle/fluid/operators/detail/send_recv.pb.h" -#include "paddle/fluid/operators/detail/sendrecvop_utils.h" -#include "paddle/fluid/operators/detail/simple_block_queue.h" - -namespace paddle { -namespace operators { -namespace detail { - -typedef std::pair> - ReceivedMessage; -typedef SimpleBlockQueue ReceivedQueue; - -typedef std::pair MessageWithName; -class RequestBase; - -class SyncGRPCServer final { - public: - explicit SyncGRPCServer(const std::string &address) : address_(address) {} - - void RunAsyncUpdate(); - - void SetScope(framework::Scope *scope) { scope_ = scope; } - - void SetDevCtx(const platform::DeviceContext *dev_ctx) { dev_ctx_ = dev_ctx; } - - void SetProgram(framework::ProgramDesc *program) { program_ = program; } - - void SetExecutor(framework::Executor *executor) { executor_ = executor; } - - void SetPrefetchPreparedCtx(framework::ExecutorPrepareContext *prepared) { - prefetch_ctx_ = prepared; - } - - int GetSelectedPort() { return selected_port_; } - - const ReceivedMessage Get() { return this->var_recv_queue_.Pop(); } - - void Push(const std::string &msg_name) { - this->var_recv_queue_.Push(std::make_pair(msg_name, nullptr)); - } - - void ShutDown(); - - protected: - void HandleRequest(::grpc::ServerCompletionQueue *cq, - const std::string &cq_name, - std::function TryToRegisterNewOne); - void TryToRegisterNewSendOne(); - void TryToRegisterNewGetOne(); - void TryToRegisterNewPrefetchOne(); - void ShutdownQueue(); - - private: - std::mutex cq_mutex_; - volatile bool is_shut_down_ = false; - std::unique_ptr<::grpc::ServerCompletionQueue> cq_send_; - std::unique_ptr<::grpc::ServerCompletionQueue> cq_get_; - std::unique_ptr<::grpc::ServerCompletionQueue> cq_prefetch_; - - GrpcService::AsyncService service_; - std::unique_ptr<::grpc::Server> server_; - - std::string address_; - framework::Scope *scope_; - const platform::DeviceContext *dev_ctx_; - - // client send variable to this queue. - ReceivedQueue var_recv_queue_; - - std::unique_ptr t_send_; - std::unique_ptr t_get_; - std::unique_ptr t_prefetch_; - - framework::ExecutorPrepareContext *prefetch_ctx_; - framework::ProgramDesc *program_; - framework::Executor *executor_; - int selected_port_; -}; - -}; // namespace detail -}; // namespace operators -}; // namespace paddle From c6937abdd18f3e6edd958db6a0044bbc5eac0fa7 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Sat, 21 Apr 2018 21:51:11 +0800 Subject: [PATCH 12/72] tmp --- paddle/fluid/operators/CMakeLists.txt | 2 -- paddle/fluid/operators/detail/CMakeLists.txt | 2 +- python/paddle/fluid/distribute_transpiler.py | 8 ++++++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 2d38663df9..256aded8ca 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -194,8 +194,6 @@ if(WITH_DISTRIBUTE) set_source_files_properties(recv_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) op_library(listen_and_serv_op DEPS ${DISTRIBUTE_DEPS}) set_source_files_properties(listen_and_serv_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - op_library(async_listen_and_serv_op DEPS ${DISTRIBUTE_DEPS}) - set_source_files_properties(async_listen_and_serv_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) op_library(send_vars_op DEPS ${DISTRIBUTE_DEPS}) set_source_files_properties(send_vars_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) op_library(send_barrier_op DEPS ${DISTRIBUTE_DEPS}) diff --git a/paddle/fluid/operators/detail/CMakeLists.txt b/paddle/fluid/operators/detail/CMakeLists.txt index 059099ee0a..719a7465b8 100644 --- a/paddle/fluid/operators/detail/CMakeLists.txt +++ b/paddle/fluid/operators/detail/CMakeLists.txt @@ -1,6 +1,6 @@ if(WITH_DISTRIBUTE) grpc_library(sendrecvop_grpc SRCS bytebuffer_stream.cc sendrecvop_utils.cc grpc_client.cc - grpc_server.cc async_grpc_server.cc variable_response.cc PROTO send_recv.proto DEPS lod_tensor selected_rows) + grpc_server.cc variable_response.cc PROTO send_recv.proto DEPS lod_tensor selected_rows) set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") set_source_files_properties(serde_test.cc grpc_server_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) cc_test(serde_test SRCS serde_test.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index 349427525d..73b7c85663 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -143,7 +143,8 @@ class DistributeTranspiler: program=None, pservers="127.0.0.1:6174", trainers=1, - split_method=splitter.round_robin): + split_method=splitter.round_robin, + sync_mode=True): """ Transpile the program to distributed data-parallelism programs. The main_program will be transformed to use a remote parameter server @@ -191,6 +192,7 @@ class DistributeTranspiler: self.origin_program = program self.trainer_num = trainers self.optimize_ops = optimize_ops + self.sync_mode = sync_mode # TODO(typhoonzero): currently trainer_id is fetched from cluster system # like Kubernetes, we should port this to use etcd later when developing # fluid distributed training with fault-tolerance. @@ -473,7 +475,9 @@ class DistributeTranspiler: "OptimizeBlock": optimize_block, "endpoint": endpoint, "Fanin": self.trainer_num, - "PrefetchBlock": prefetch_block + "PrefetchBlock": prefetch_block, + "sync_mode": self.sync_mode, + "grad_to_id": [] }) pserver_program.sync_with_cpp() From 63055a3e08768b1e376dbca749fa4c0fb42ef0a0 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 23 Apr 2018 11:17:59 +0800 Subject: [PATCH 13/72] complete grad_to_id --- paddle/fluid/operators/listen_and_serv_op.cc | 3 ++- python/paddle/fluid/distribute_transpiler.py | 13 ++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index d5cb165351..a01f0aef8d 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -328,7 +328,8 @@ from send_op and send back variables to recv_op. .SetDefault("127.0.0.1:6164") .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); AddAttr>( - "grad_to_id(['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'])", + "grad_to_id", + "['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'] " "a map from grad name to it's optimize block id") .SetDefault({}); AddAttr("sync_mode", "if works at sync_mode or not") diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index 5f37a790f1..9a514bd9a3 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -408,9 +408,9 @@ class DistributeTranspiler: in_name.startswith("beta2_pow_acc"): global_ops.append(op) - def __append_optimize_op__(op, block): + def __append_optimize_op__(op, block, grad_to_block_id): if self._is_opt_op(op): - self._append_pserver_ops(block, op, endpoint, + self._append_pserver_ops(block, op, endpoint, grad_to_block_id, default_main_program()) else: self._append_pserver_non_opt_ops(block, op) @@ -424,13 +424,14 @@ class DistributeTranspiler: self._append_pserver_non_opt_ops(lr_decay_block, op) # append op to the current block + grad_to_block_id = [] pre_block_idx = pserver_program.num_blocks - 1 for idx, opt_op in enumerate(opt_op_on_pserver): per_opt_block = pserver_program.create_block(pre_block_idx) for _, op in enumerate(self.optimize_ops): # optimizer is connected to itself if ufind.is_connected(op, opt_op) and op not in global_ops: - __append_optimize_op__(op, per_opt_block) + __append_optimize_op__(op, per_opt_block, grad_to_block_id) # append global ops opt_state_block = None @@ -476,7 +477,7 @@ class DistributeTranspiler: "Fanin": self.trainer_num, "PrefetchBlock": prefetch_block, "sync_mode": self.sync_mode, - "grad_to_id": [] + "grad_to_id": grad_to_block_id }) pserver_program.sync_with_cpp() @@ -883,7 +884,7 @@ class DistributeTranspiler: return orig_var_name def _append_pserver_ops(self, optimize_block, opt_op, endpoint, - origin_program): + grad_to_block_id, origin_program): program = optimize_block.program pserver_block = program.global_block() new_inputs = dict() @@ -904,6 +905,8 @@ class DistributeTranspiler: return merged_var = \ pserver_block.vars[self._orig_varname(grad_block.name)] + grad_to_block_id.append(merged_var.name + ":" + str( + optimize_block.idx)) if self.trainer_num > 1: vars2merge = [] for i in xrange(self.trainer_num): From 34f28185497c17a7024b501d4e6e0c276cafbeab Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 23 Apr 2018 13:00:54 +0800 Subject: [PATCH 14/72] distribute transpiler support async config --- python/paddle/fluid/distribute_transpiler.py | 43 +++++++++++--------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index 9a514bd9a3..6ac3b82671 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -358,7 +358,7 @@ class DistributeTranspiler: type=v.type, dtype=v.dtype, shape=v.shape) - if self.trainer_num > 1: + if self.sync_mode and self.trainer_num > 1: for trainer_id in xrange(self.trainer_num): var = pserver_program.global_block().create_var( name="%s.trainer_%d" % (orig_var_name, trainer_id), @@ -688,17 +688,6 @@ class DistributeTranspiler: self.table_name)], persistable=False) - # create grad vars in pserver program - table_grad_var = self.table_param_grad[1] - table_grad_list = [ - pserver_program.global_block().create_var( - name="%s.trainer_%d.pserver_%d" % - (table_grad_var.name, index, pserver_index), - type=table_grad_var.type, - shape=table_grad_var.shape, - dtype=table_grad_var.dtype) for index in range(self.trainer_num) - ] - # create table optimize block in pserver program table_opt_op = [ op for op in self.optimize_ops @@ -708,11 +697,24 @@ class DistributeTranspiler: # only support sgd now assert table_opt_op.type == "sgd" - # append sum op for table_grad_list - table_opt_block.append_op( - type="sum", - inputs={"X": table_grad_list}, - outputs={"Out": [grad_var]}) + if self.sync_mode: + # create grad vars in pserver program + table_grad_var = self.table_param_grad[1] + table_grad_list = [ + pserver_program.global_block().create_var( + name="%s.trainer_%d.pserver_%d" % + (table_grad_var.name, index, pserver_index), + type=table_grad_var.type, + shape=table_grad_var.shape, + dtype=table_grad_var.dtype) + for index in range(self.trainer_num) + ] + + # append sum op for table_grad_list + table_opt_block.append_op( + type="sum", + inputs={"X": table_grad_list}, + outputs={"Out": [grad_var]}) lr_var = pserver_program.global_block().vars[table_opt_op.input( "LearningRate")[0]] @@ -751,7 +753,7 @@ class DistributeTranspiler: for varname, splited in block_map.iteritems(): orig_var = program.global_block().var(varname) if len(splited) == 1: - if add_trainer_suffix: + if self.sync_mode and add_trainer_suffix: new_var_name = "%s.trainer_%d" % \ (orig_var.name, self.trainer_id) program.global_block().rename_var(varname, new_var_name) @@ -775,7 +777,7 @@ class DistributeTranspiler: if len(orig_shape) >= 2: splited_shape.extend(orig_shape[1:]) new_var_name = "" - if add_trainer_suffix: + if self.sync_mode and add_trainer_suffix: new_var_name = "%s.block%d.trainer_%d" % \ (varname, i, self.trainer_id) else: @@ -907,7 +909,7 @@ class DistributeTranspiler: pserver_block.vars[self._orig_varname(grad_block.name)] grad_to_block_id.append(merged_var.name + ":" + str( optimize_block.idx)) - if self.trainer_num > 1: + if self.sync_mode and self.trainer_num > 1: vars2merge = [] for i in xrange(self.trainer_num): per_trainer_name = "%s.trainer_%d" % \ @@ -925,6 +927,7 @@ class DistributeTranspiler: inputs={"X": merged_var}, outputs={"Out": merged_var}, attrs={"scale": 1.0 / float(self.trainer_num)}) + new_inputs[key] = merged_var elif key == "Param": # param is already created on global program From a0ced3df82e53ca9acc6db9827da5dfb172a7097 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 23 Apr 2018 18:33:11 +0800 Subject: [PATCH 15/72] async update can run --- paddle/fluid/operators/detail/grpc_server.cc | 12 +++++++----- .../fluid/operators/detail/variable_response.h | 2 +- paddle/fluid/operators/listen_and_serv_op.cc | 18 +++++++++--------- paddle/fluid/operators/send_op.cc | 13 +++++++++---- python/paddle/fluid/distribute_transpiler.py | 12 +++++++----- 5 files changed, 33 insertions(+), 24 deletions(-) diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 27ddb67500..60d7cc68fc 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -315,9 +315,11 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, VLOG(3) << "HandleRequest for " << cq_name << " while after Next"; PADDLE_ENFORCE(tag); - // FIXME(typhoonzero): de-couple the barriers with recv_op - if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1); - if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0); + if (sync_mode_) { + // FIXME(typhoonzero): de-couple the barriers with recv_op + if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1); + if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0); + } RequestBase* base = reinterpret_cast(tag); // reference: @@ -334,13 +336,13 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, switch (base->Status()) { case PROCESS: { - VLOG(4) << cq_name << " status:" << base->Status(); + VLOG(4) << cq_name << " PROCESS status:" << base->Status(); TryToRegisterNewOne(); base->Process(); break; } case FINISH: { - VLOG(4) << cq_name << " status:" << base->Status(); + VLOG(4) << cq_name << " FINISH status:" << base->Status(); delete base; break; } diff --git a/paddle/fluid/operators/detail/variable_response.h b/paddle/fluid/operators/detail/variable_response.h index 3018a5c4af..59a92f7155 100644 --- a/paddle/fluid/operators/detail/variable_response.h +++ b/paddle/fluid/operators/detail/variable_response.h @@ -61,7 +61,7 @@ class VariableResponse { // other: number of error field. int Parse(const ::grpc::ByteBuffer& byte_buffer); - const framework::Scope& GetLocalScope() const { return *local_scope_; } + framework::Scope& GetLocalScope() const { return *local_scope_; } inline std::string Varname() { return meta_.varname(); } inline std::string OutVarname() { return meta_.out_varname(); } diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index a01f0aef8d..bf351a2f52 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -48,13 +48,15 @@ static void split(const std::string &str, char sep, static void AsyncExecuteBlock(framework::Executor *executor, framework::ExecutorPrepareContext *prepared, framework::Scope *scope) { - framework::Async([&executor, &prepared, &scope]() { + std::future future = framework::Async([&executor, &prepared, &scope]() { try { executor->RunPreparedContext(prepared, scope, false, false); } catch (std::exception &e) { LOG(ERROR) << "run sub program error " << e.what(); } }); + // TODO(qiao) maybe we can remove this + future.wait(); } static void ParallelExecuteBlocks( @@ -203,6 +205,7 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, framework::ProgramDesc *program, framework::Scope *recv_scope, framework::BlockDesc *prefetch_block) const { + VLOG(3) << "RunAsyncLoop in"; // grad name to block id std::unordered_map grad_to_id; std::unordered_map id_to_grad; @@ -210,7 +213,8 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, auto grad_to_id_str = Attr>("grad_to_id"); for (auto &grad_and_id : grad_to_id_str) { std::vector pieces; - split(grad_and_id, ' ', &pieces); + split(grad_and_id, ':', &pieces); + VLOG(3) << "after split, grad = " << pieces[0] << ", id=" << pieces[1]; PADDLE_ENFORCE_EQ(pieces.size(), 2); PADDLE_ENFORCE_EQ(grad_to_id.count(pieces[0]), 0); int block_id = std::stoi(pieces[1]); @@ -223,14 +227,9 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, std::vector block_list; for (size_t blkid = 1; blkid < num_blocks; ++blkid) { - if (blkid != static_cast(prefetch_block->ID())) { - block_list.push_back(blkid); - } + block_list.push_back(blkid); } - PADDLE_ENFORCE_EQ(grad_to_id_str.size(), block_list.size(), - "grad num should be equal to optimize block num"); auto optimize_prepared = executor->Prepare(*program, block_list); - std::unordered_map> grad_to_prepared; @@ -238,6 +237,7 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, grad_to_prepared[id_to_grad[block_list[i]]] = optimize_prepared[i]; } + VLOG(3) << "RunAsyncLoop into while"; bool exit_flag = false; while (!exit_flag) { const detail::ReceivedMessage v = rpc_service_->Get(); @@ -254,7 +254,7 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, PADDLE_THROW("Can not find server side var"); } AsyncExecuteBlock(executor, grad_to_prepared[recv_var_name].get(), - recv_scope); + &(v.second->GetLocalScope())); // TODO(qiao): explain why if (var->IsType()) { var->GetMutable()->mutable_rows()->clear(); diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index 82ff087d0a..e4386b640a 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -41,6 +41,8 @@ class SendOp : public framework::OperatorBase { std::vector endpoints = Attr>("endpoints"); + bool sync_mode = Attr("sync_mode"); + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); @@ -64,11 +66,13 @@ class SendOp : public framework::OperatorBase { } PADDLE_ENFORCE(rpc_client->Wait()); - for (auto& ep : endpoints) { - VLOG(3) << "batch barrier, ep: " << ep; - rpc_client->AsyncSendBatchBarrier(ep); + if (sync_mode) { + for (auto& ep : endpoints) { + VLOG(3) << "batch barrier, ep: " << ep; + rpc_client->AsyncSendBatchBarrier(ep); + } + PADDLE_ENFORCE(rpc_client->Wait()); } - PADDLE_ENFORCE(rpc_client->Wait()); if (outs.size() > 0) { for (size_t i = 0; i < outs.size(); i++) { @@ -112,6 +116,7 @@ This operator will send tensor to recv_op at the parameter server. "Server endpoints in the order of input " "variables for mapping") .SetDefault({}); + AddAttr("sync_mode", "work in sync_mode or not").SetDefault(true); } }; diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index 6ac3b82671..3a3a94640a 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -297,8 +297,11 @@ class DistributeTranspiler: inputs={"X": send_inputs}, outputs={"Out": send_outputs, "RPCClient": rpc_client_var}, - attrs={"endpoints": pserver_endpoints, - "epmap": eplist}) + attrs={ + "endpoints": pserver_endpoints, + "epmap": eplist, + "sync_mode": self.sync_mode + }) # step4: Concat the parameters splits together after recv. for varname, splited_var in param_var_mapping.iteritems(): if len(splited_var) <= 1: @@ -404,8 +407,8 @@ class DistributeTranspiler: for op in self.optimize_ops: if op.type == "scale": for in_name in op.input_arg_names: - if in_name.startswith("beta1_pow_acc") or\ - in_name.startswith("beta2_pow_acc"): + if in_name.startswith("beta1_pow_acc") or \ + in_name.startswith("beta2_pow_acc"): global_ops.append(op) def __append_optimize_op__(op, block, grad_to_block_id): @@ -434,7 +437,6 @@ class DistributeTranspiler: __append_optimize_op__(op, per_opt_block, grad_to_block_id) # append global ops - opt_state_block = None if global_ops: opt_state_block = pserver_program.create_block( pserver_program.num_blocks - 1) From a29e352b80ed6fe11c4f5234db550532e3df09be Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 23 Apr 2018 19:03:44 +0800 Subject: [PATCH 16/72] optimize code --- paddle/fluid/operators/detail/variable_response.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/detail/variable_response.h b/paddle/fluid/operators/detail/variable_response.h index 59a92f7155..8c05e60cec 100644 --- a/paddle/fluid/operators/detail/variable_response.h +++ b/paddle/fluid/operators/detail/variable_response.h @@ -46,7 +46,9 @@ class VariableResponse { } virtual ~VariableResponse() { - if (create_scope_) scope_->DeleteScope(local_scope_); + if (create_scope_) { + scope_->DeleteScope(local_scope_); + } } // return: From 3503c47f9a733390f17aa4d027275291d714b02a Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 23 Apr 2018 22:59:49 +0800 Subject: [PATCH 17/72] listen and serv default sync mode --- paddle/fluid/operators/listen_and_serv_op.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index bf351a2f52..3ee642dedb 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -332,8 +332,7 @@ from send_op and send back variables to recv_op. "['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'] " "a map from grad name to it's optimize block id") .SetDefault({}); - AddAttr("sync_mode", "if works at sync_mode or not") - .SetDefault(false); + AddAttr("sync_mode", "if works at sync_mode or not").SetDefault(true); AddAttr(kOptimizeBlock, "BlockID to run on server side."); AddAttr(kPrefetchBlock, From 8081e15774a4640cfe6ac66395e4fe296f940a40 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 24 Apr 2018 13:07:54 +0800 Subject: [PATCH 18/72] fix send_recv_op_test --- paddle/fluid/operators/send_recv_op_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 2b440fe2de..72dc1586c6 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -137,7 +137,7 @@ void StartServerNet(bool is_sparse) { attrs.insert({"GradList", std::vector({"x1"})}); attrs.insert({"OptimizeBlock", optimize_block}); attrs.insert({"PrefetchBlock", prefetch_block}); - attrs.insert({"grad_to_id", {}}); + attrs.insert({"grad_to_id", std::vector({""})}); attrs.insert({"sync_mode", true}); listen_and_serv_op = f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); From 1bdea0a8d2fffe282c741712ade39d3604472fb9 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Tue, 24 Apr 2018 13:41:39 +0800 Subject: [PATCH 19/72] Add init interface for customize devices. --- paddle/fluid/framework/init.cc | 73 ++++++++++++++++++++++++++++++++++ paddle/fluid/framework/init.h | 4 ++ paddle/fluid/inference/io.cc | 2 + paddle/fluid/inference/io.h | 2 + 4 files changed, 81 insertions(+) diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/framework/init.cc index 75c557fa42..3ce37041cb 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/framework/init.cc @@ -15,19 +15,40 @@ limitations under the License. */ #include #include #include +#include #include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/string/piece.h" namespace paddle { namespace framework { +DEFINE_string(devices, "", "The devices to be used."); +DEFINE_bool(init_p2p, true, "Whether to init p2p."); + std::once_flag gflags_init_flag; std::once_flag p2p_init_flag; +using paddle::platform::DeviceContextPool; + +void Init(int argc, char **argv) { + std::call_once(gflags_init_flag, + [&]() { google::ParseCommandLineFlags(&argc, &argv, true); }); + + // init devices + std::vector devices; + std::string token; + std::istringstream tokenStream(FLAGS_devices); + while (std::getline(tokenStream, token, ',')) { + devices.push_back(std::stoi(token)); + } + InitDevices(FLAGS_init_p2p, devices); +} + void InitGflags(std::vector &argv) { std::call_once(gflags_init_flag, [&]() { int argc = argv.size(); @@ -64,6 +85,30 @@ void InitP2P(int count) { #endif } +void InitP2P(std::vector devices) { +#ifdef PADDLE_WITH_CUDA + std::call_once(p2p_init_flag, [&]() { + int count = devices.size(); + for (int i = 0; i < count; ++i) { + for (int j = 0; j < count; ++j) { + if (devices[i] == devices[j]) continue; + int can_acess = -1; + PADDLE_ENFORCE( + cudaDeviceCanAccessPeer(&can_acess, devices[i], devices[j]), + "Failed to test P2P access."); + if (can_acess != 1) { + LOG(WARNING) << "Cannot enable P2P access from " << devices[i] + << " to " << devices[j]; + } else { + cudaSetDevice(devices[i]); + cudaDeviceEnablePeerAccess(devices[j], 0); + } + } + } + }); +#endif +} + void InitDevices(bool init_p2p) { /*Init all avaiable devices by default */ @@ -91,6 +136,34 @@ void InitDevices(bool init_p2p) { platform::DeviceContextPool::Init(places); } +void InitDevices(bool init_p2p, const std::vector devices) { + std::vector places; + int count = 0; +#ifdef PADDLE_WITH_CUDA + try { + count = platform::GetCUDADeviceCount(); + } catch (const std::exception &exp) { + LOG(WARNING) << "Compiled with WITH_GPU, but no GPU found in runtime."; + } +#else + LOG(WARNING) + << "'CUDA' is not supported, Please re-compile with WITH_GPU option"; +#endif + + for (size_t i = 0; i < devices.size(); ++i) { + if (devices[i] >= count) { + LOG(WARNING) << "Invalid devices id."; + continue; + } + places.emplace_back(platform::CUDAPlace(devices[i])); + } + if (init_p2p) { + InitP2P(devices); + } + places.emplace_back(platform::CPUPlace()); + platform::DeviceContextPool::Init(places); +} + void InitGLOG(const std::string &prog_name) { // glog will not hold the ARGV[0] inside. // Use strdup to alloc a new string. diff --git a/paddle/fluid/framework/init.h b/paddle/fluid/framework/init.h index fae98a60b5..38604d232c 100644 --- a/paddle/fluid/framework/init.h +++ b/paddle/fluid/framework/init.h @@ -20,11 +20,15 @@ limitations under the License. */ namespace paddle { namespace framework { +void Init(int argc, char **argv); + void InitGflags(std::vector &argv); void InitGLOG(const std::string &prog_name); void InitDevices(bool init_p2p); +void InitDevices(bool init_p2p, const std::vector devices); + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 78d2f16746..74068d9dbe 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -28,6 +28,8 @@ namespace inference { // linking the inference shared library. void Init(bool init_p2p) { framework::InitDevices(init_p2p); } +void Init(int argc, char** argv) { framework::Init(argc, argv); } + void ReadBinaryFile(const std::string& filename, std::string* contents) { std::ifstream fin(filename, std::ios::in | std::ios::binary); PADDLE_ENFORCE(static_cast(fin), "Cannot open file %s", filename); diff --git a/paddle/fluid/inference/io.h b/paddle/fluid/inference/io.h index ba3e45099a..988b8aebbe 100644 --- a/paddle/fluid/inference/io.h +++ b/paddle/fluid/inference/io.h @@ -27,6 +27,8 @@ namespace inference { void Init(bool init_p2p); +void Init(int argc, char** argv); + void LoadPersistables(framework::Executor* executor, framework::Scope* scope, const framework::ProgramDesc& main_program, const std::string& dirname, From 48b7b543213d2f1584efca610d6d50ccb1ee56e0 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Tue, 24 Apr 2018 14:52:36 +0800 Subject: [PATCH 20/72] Refine code. --- paddle/fluid/framework/init.cc | 10 ++++------ paddle/fluid/framework/init.h | 2 +- paddle/fluid/inference/io.cc | 6 +----- paddle/fluid/inference/io.h | 4 +--- 4 files changed, 7 insertions(+), 15 deletions(-) diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/framework/init.cc index 3ce37041cb..642b892105 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/framework/init.cc @@ -20,7 +20,6 @@ limitations under the License. */ #include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/string/piece.h" @@ -35,10 +34,8 @@ std::once_flag p2p_init_flag; using paddle::platform::DeviceContextPool; -void Init(int argc, char **argv) { - std::call_once(gflags_init_flag, - [&]() { google::ParseCommandLineFlags(&argc, &argv, true); }); - +void Init(std::vector &argv) { + InitGflags(argv); // init devices std::vector devices; std::string token; @@ -51,6 +48,7 @@ void Init(int argc, char **argv) { void InitGflags(std::vector &argv) { std::call_once(gflags_init_flag, [&]() { + argv.push_back("dummy"); int argc = argv.size(); char **arr = new char *[argv.size()]; std::string line; @@ -151,7 +149,7 @@ void InitDevices(bool init_p2p, const std::vector devices) { #endif for (size_t i = 0; i < devices.size(); ++i) { - if (devices[i] >= count) { + if (devices[i] >= count || devices[i] < 0) { LOG(WARNING) << "Invalid devices id."; continue; } diff --git a/paddle/fluid/framework/init.h b/paddle/fluid/framework/init.h index 38604d232c..cf792f18b7 100644 --- a/paddle/fluid/framework/init.h +++ b/paddle/fluid/framework/init.h @@ -20,7 +20,7 @@ limitations under the License. */ namespace paddle { namespace framework { -void Init(int argc, char **argv); +void Init(std::vector &argv); void InitGflags(std::vector &argv); diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 74068d9dbe..9c37e0178a 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -24,11 +24,7 @@ limitations under the License. */ namespace paddle { namespace inference { -// Temporarily add this function for exposing framework::InitDevices() when -// linking the inference shared library. -void Init(bool init_p2p) { framework::InitDevices(init_p2p); } - -void Init(int argc, char** argv) { framework::Init(argc, argv); } +void Init(std::vector &argv) { framework::Init(argv); } void ReadBinaryFile(const std::string& filename, std::string* contents) { std::ifstream fin(filename, std::ios::in | std::ios::binary); diff --git a/paddle/fluid/inference/io.h b/paddle/fluid/inference/io.h index 988b8aebbe..799693b0c5 100644 --- a/paddle/fluid/inference/io.h +++ b/paddle/fluid/inference/io.h @@ -25,9 +25,7 @@ limitations under the License. */ namespace paddle { namespace inference { -void Init(bool init_p2p); - -void Init(int argc, char** argv); +void Init(std::vector &argv); void LoadPersistables(framework::Executor* executor, framework::Scope* scope, const framework::ProgramDesc& main_program, From a0b258278ed0c0419a1eb3a86cddd8a6a7562409 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Tue, 24 Apr 2018 16:42:22 +0800 Subject: [PATCH 21/72] Reuse 'initP2P(bool, std::vector)' in 'initP2P(bool)' --- paddle/fluid/framework/init.cc | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/framework/init.cc index 721364e4bd..c8775ec727 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/framework/init.cc @@ -108,15 +108,14 @@ void InitP2P(std::vector devices) { } void InitDevices(bool init_p2p) { - /*Init all available devices by default */ - - std::vector places; - places.emplace_back(platform::CPUPlace()); - int count = 0; - +/*Init all available devices by default */ #ifdef PADDLE_WITH_CUDA + std::vector devices; try { - count = platform::GetCUDADeviceCount(); + int count = platform::GetCUDADeviceCount(); + for (int i = 0; i < count; ++i) { + devices.push_back(i); + } } catch (const std::exception &exp) { LOG(WARNING) << "Compiled with WITH_GPU, but no GPU found in runtime."; } @@ -124,14 +123,7 @@ void InitDevices(bool init_p2p) { LOG(WARNING) << "'CUDA' is not supported, Please re-compile with WITH_GPU option"; #endif - - for (int i = 0; i < count; ++i) { - places.emplace_back(platform::CUDAPlace(i)); - } - if (init_p2p) { - InitP2P(count); - } - platform::DeviceContextPool::Init(places); + InitDevices(init_p2p, devices); } void InitDevices(bool init_p2p, const std::vector devices) { From e4708565f4e1fc08d09465dc60f7267ec561d4e3 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Tue, 24 Apr 2018 16:50:15 +0800 Subject: [PATCH 22/72] Fix cpplint format. --- paddle/fluid/inference/io.cc | 2 +- paddle/fluid/inference/io.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 9c37e0178a..734b220a1d 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -24,7 +24,7 @@ limitations under the License. */ namespace paddle { namespace inference { -void Init(std::vector &argv) { framework::Init(argv); } +void Init(const std::vector argv) { framework::Init(argv); } void ReadBinaryFile(const std::string& filename, std::string* contents) { std::ifstream fin(filename, std::ios::in | std::ios::binary); diff --git a/paddle/fluid/inference/io.h b/paddle/fluid/inference/io.h index 799693b0c5..caf599b1a6 100644 --- a/paddle/fluid/inference/io.h +++ b/paddle/fluid/inference/io.h @@ -25,7 +25,7 @@ limitations under the License. */ namespace paddle { namespace inference { -void Init(std::vector &argv); +void Init(const std::vector argv); void LoadPersistables(framework::Executor* executor, framework::Scope* scope, const framework::ProgramDesc& main_program, From a4b452a2d60733ffc39c8033515a8eb4081883aa Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Tue, 24 Apr 2018 18:55:16 +0800 Subject: [PATCH 23/72] Remove initP2P(bool) and init function in framework. --- paddle/fluid/framework/init.cc | 39 ---------------------------------- paddle/fluid/framework/init.h | 2 -- paddle/fluid/inference/io.cc | 16 +++++++++++++- 3 files changed, 15 insertions(+), 42 deletions(-) diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/framework/init.cc index c8775ec727..ee42bc725b 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/framework/init.cc @@ -15,7 +15,6 @@ limitations under the License. */ #include #include #include -#include #include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/operator.h" @@ -26,26 +25,9 @@ limitations under the License. */ namespace paddle { namespace framework { -DEFINE_string(devices, "", "The devices to be used."); -DEFINE_bool(init_p2p, true, "Whether to init p2p."); - std::once_flag gflags_init_flag; std::once_flag p2p_init_flag; -using paddle::platform::DeviceContextPool; - -void Init(std::vector argv) { - InitGflags(argv); - // init devices - std::vector devices; - std::string token; - std::istringstream tokenStream(FLAGS_devices); - while (std::getline(tokenStream, token, ',')) { - devices.push_back(std::stoi(token)); - } - InitDevices(FLAGS_init_p2p, devices); -} - void InitGflags(std::vector argv) { std::call_once(gflags_init_flag, [&]() { argv.push_back("dummy"); @@ -62,27 +44,6 @@ void InitGflags(std::vector argv) { }); } -void InitP2P(int count) { -#ifdef PADDLE_WITH_CUDA - std::call_once(p2p_init_flag, [&]() { - for (int i = 0; i < count; ++i) { - for (int j = 0; j < count; ++j) { - if (i == j) continue; - int can_acess = -1; - PADDLE_ENFORCE(cudaDeviceCanAccessPeer(&can_acess, i, j), - "Failed to test P2P access."); - if (can_acess != 1) { - LOG(WARNING) << "Cannot enable P2P access from " << i << " to " << j; - } else { - cudaSetDevice(i); - cudaDeviceEnablePeerAccess(j, 0); - } - } - } - }); -#endif -} - void InitP2P(std::vector devices) { #ifdef PADDLE_WITH_CUDA std::call_once(p2p_init_flag, [&]() { diff --git a/paddle/fluid/framework/init.h b/paddle/fluid/framework/init.h index 05ce3376fe..0e30594672 100644 --- a/paddle/fluid/framework/init.h +++ b/paddle/fluid/framework/init.h @@ -22,8 +22,6 @@ limitations under the License. */ namespace paddle { namespace framework { -void Init(std::vector argv); - void InitGflags(std::vector argv); void InitGLOG(const std::string &prog_name); diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 734b220a1d..5b8dec199d 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -16,15 +16,29 @@ limitations under the License. */ #include #include +#include #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/pybind/pybind.h" +DEFINE_string(devices, "", "The devices to be used."); +DEFINE_bool(init_p2p, true, "Whether to init p2p."); + namespace paddle { namespace inference { -void Init(const std::vector argv) { framework::Init(argv); } +void Init(const std::vector argv) { + framework::InitGflags(argv); + // init devices + std::vector devices; + std::string token; + std::istringstream tokenStream(FLAGS_devices); + while (std::getline(tokenStream, token, ',')) { + devices.push_back(std::stoi(token)); + } + framework::InitDevices(FLAGS_init_p2p, devices); +} void ReadBinaryFile(const std::string& filename, std::string* contents) { std::ifstream fin(filename, std::ios::in | std::ios::binary); From 3d96b3811afa66ad0d9559fb91a2b5325f6d61f9 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Tue, 24 Apr 2018 21:01:45 +0800 Subject: [PATCH 24/72] Fix InitGflags. --- paddle/fluid/framework/init.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/framework/init.cc index ee42bc725b..457dc662b1 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/framework/init.cc @@ -30,7 +30,7 @@ std::once_flag p2p_init_flag; void InitGflags(std::vector argv) { std::call_once(gflags_init_flag, [&]() { - argv.push_back("dummy"); + argv.insert(argv.begin(), "dummy"); int argc = argv.size(); char **arr = new char *[argv.size()]; std::string line; From ad3f6f4ad565eac65ff9781bbe739f4bba86853b Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Wed, 25 Apr 2018 09:40:23 +0800 Subject: [PATCH 25/72] Fix devices 'not undefined' error. --- paddle/fluid/framework/init.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/framework/init.cc index 457dc662b1..85beae775b 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/framework/init.cc @@ -69,9 +69,9 @@ void InitP2P(std::vector devices) { } void InitDevices(bool init_p2p) { -/*Init all available devices by default */ -#ifdef PADDLE_WITH_CUDA + /*Init all available devices by default */ std::vector devices; +#ifdef PADDLE_WITH_CUDA try { int count = platform::GetCUDADeviceCount(); for (int i = 0; i < count; ++i) { From 2a06e307d0b5de691859d103de217251c5cf21bd Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 25 Apr 2018 15:43:02 +0800 Subject: [PATCH 26/72] Fix batch_gemm bugs stride should be int64_t, not int --- paddle/fluid/operators/math/math_function.cc | 10 +++++++--- paddle/fluid/operators/math/math_function.cu | 16 ++++++++++------ paddle/fluid/operators/math/math_function.h | 7 ++++--- 3 files changed, 21 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index 44fd739fb1..249ab6e903 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/math_function.h" +#include #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/operators/math/math_function_impl.h" #include "paddle/fluid/platform/float16.h" @@ -161,7 +162,8 @@ void batched_gemm( const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float16 alpha, const float16* A, const float16* B, const float16 beta, - float16* C, const int batchCount, const int strideA, const int strideB) { + float16* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { PADDLE_THROW("float16 batched_gemm not supported on CPU"); } @@ -172,7 +174,8 @@ void batched_gemm( const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, - float* C, const int batchCount, const int strideA, const int strideB) { + float* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { int lda = (transA == CblasNoTrans) ? K : M; int ldb = (transB == CblasNoTrans) ? N : K; int ldc = N; @@ -194,7 +197,8 @@ void batched_gemm( const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, - double* C, const int batchCount, const int strideA, const int strideB) { + double* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { int lda = (transA == CblasNoTrans) ? K : M; int ldb = (transB == CblasNoTrans) ? N : K; int ldc = N; diff --git a/paddle/fluid/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu index 9badf26c9b..2aa819625e 100644 --- a/paddle/fluid/operators/math/math_function.cu +++ b/paddle/fluid/operators/math/math_function.cu @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU +#include #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function_impl.h" @@ -267,7 +268,8 @@ void batched_gemm( const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float16 alpha, const float16* A, const float16* B, const float16 beta, - float16* C, const int batchCount, const int strideA, const int strideB) { + float16* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { #if CUDA_VERSION >= 8000 // Note that cublas follows fortran order, so the order is different from // the cblas convention. @@ -278,7 +280,7 @@ void batched_gemm( (transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; - const int strideC = M * N; + const int64_t strideC = M * N; const half h_alpha = static_cast(alpha); const half h_beta = static_cast(beta); @@ -303,7 +305,8 @@ void batched_gemm( const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, - float* C, const int batchCount, const int strideA, const int strideB) { + float* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { #if CUDA_VERSION >= 8000 // Note that cublas follows fortran order, so the order is different from // the cblas convention. @@ -314,7 +317,7 @@ void batched_gemm( (transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; - const int strideC = M * N; + const int64_t strideC = M * N; PADDLE_ENFORCE(platform::dynload::cublasSgemmStridedBatched( context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, @@ -329,7 +332,8 @@ void batched_gemm( const platform::CUDADeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, - double* C, const int batchCount, const int strideA, const int strideB) { + double* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { #if CUDA_VERSION >= 8000 // Note that cublas follows fortran order, so the order is different from // the cblas convention. @@ -340,7 +344,7 @@ void batched_gemm( (transA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (transB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; - const int strideC = M * N; + const int64_t strideC = M * N; PADDLE_ENFORCE(platform::dynload::cublasDgemmStridedBatched( context.cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, diff --git a/paddle/fluid/operators/math/math_function.h b/paddle/fluid/operators/math/math_function.h index cdbc7bfb37..cdd0297472 100644 --- a/paddle/fluid/operators/math/math_function.h +++ b/paddle/fluid/operators/math/math_function.h @@ -26,7 +26,7 @@ limitations under the License. */ #ifndef LAPACK_FOUND extern "C" { -#include +#include // NOLINT int LAPACKE_sgetrf(int matrix_layout, int m, int n, float* a, int lda, int* ipiv); int LAPACKE_dgetrf(int matrix_layout, int m, int n, double* a, int lda, @@ -39,6 +39,7 @@ int LAPACKE_dgetri(int matrix_layout, int n, double* a, int lda, #endif #include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/tensor.h" @@ -78,8 +79,8 @@ template void batched_gemm(const DeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const T alpha, const T* A, const T* B, - const T beta, T* C, const int batchCount, const int strideA, - const int strideB); + const T beta, T* C, const int batchCount, + const int64_t strideA, const int64_t strideB); template void gemv(const DeviceContext& context, const bool trans_a, const int M, From 3d53631bad0c6b949aacac121f89b0b77e716fe0 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 25 Apr 2018 16:29:28 +0800 Subject: [PATCH 27/72] Make dyload strictly use the same ABI in header --- paddle/fluid/platform/dynload/cublas.h | 10 ++++++---- paddle/fluid/platform/dynload/cudnn.h | 2 +- paddle/fluid/platform/dynload/cupti.h | 2 +- paddle/fluid/platform/dynload/curand.h | 2 +- paddle/fluid/platform/dynload/nccl.h | 2 +- paddle/fluid/platform/dynload/warpctc.h | 2 +- 6 files changed, 11 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/platform/dynload/cublas.h b/paddle/fluid/platform/dynload/cublas.h index 1ab55d6b9b..81acaff87d 100644 --- a/paddle/fluid/platform/dynload/cublas.h +++ b/paddle/fluid/platform/dynload/cublas.h @@ -14,10 +14,12 @@ #pragma once +#include #include #include #include #include // NOLINT +#include #include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { @@ -37,14 +39,14 @@ extern void *cublas_dso_handle; #ifdef PADDLE_USE_DSO #define DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) \ struct DynLoad__##__name { \ + using FUNC_TYPE = decltype(&::__name); \ template \ inline cublasStatus_t operator()(Args... args) { \ - typedef cublasStatus_t (*cublasFunc)(Args...); \ std::call_once(cublas_dso_flag, []() { \ cublas_dso_handle = paddle::platform::dynload::GetCublasDsoHandle(); \ }); \ void *p_##__name = dlsym(cublas_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ + return reinterpret_cast(p_##__name)(args...); \ } \ }; \ extern DynLoad__##__name __name @@ -71,8 +73,8 @@ extern void *cublas_dso_handle; __macro(cublasDgemm_v2); \ __macro(cublasHgemm); \ __macro(cublasSgemmEx); \ - __macro(cublasSgeam_v2); \ - __macro(cublasDgeam_v2); \ + __macro(cublasSgeam); \ + __macro(cublasDgeam); \ __macro(cublasCreate_v2); \ __macro(cublasDestroy_v2); \ __macro(cublasSetStream_v2); \ diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index 24475b62ca..34d83e3956 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -34,7 +34,7 @@ extern void EnforceCUDNNLoaded(const char* fn_name); struct DynLoad__##__name { \ template \ auto operator()(Args... args) -> decltype(__name(args...)) { \ - using cudnn_func = decltype(__name(args...)) (*)(Args...); \ + using cudnn_func = decltype(&::__name); \ std::call_once(cudnn_dso_flag, []() { \ cudnn_dso_handle = paddle::platform::dynload::GetCUDNNDsoHandle(); \ }); \ diff --git a/paddle/fluid/platform/dynload/cupti.h b/paddle/fluid/platform/dynload/cupti.h index d0d676b9d8..e64de7c20f 100644 --- a/paddle/fluid/platform/dynload/cupti.h +++ b/paddle/fluid/platform/dynload/cupti.h @@ -41,7 +41,7 @@ extern void *cupti_dso_handle; struct DynLoad__##__name { \ template \ inline CUptiResult CUPTIAPI operator()(Args... args) { \ - typedef CUptiResult CUPTIAPI (*cuptiFunc)(Args...); \ + using cuptiFunc = decltype(&::__name); \ std::call_once(cupti_dso_flag, []() { \ cupti_dso_handle = paddle::platform::dynload::GetCUPTIDsoHandle(); \ }); \ diff --git a/paddle/fluid/platform/dynload/curand.h b/paddle/fluid/platform/dynload/curand.h index 4697fb6cd9..46ad4379d5 100644 --- a/paddle/fluid/platform/dynload/curand.h +++ b/paddle/fluid/platform/dynload/curand.h @@ -30,7 +30,7 @@ extern void *curand_dso_handle; struct DynLoad__##__name { \ template \ curandStatus_t operator()(Args... args) { \ - typedef curandStatus_t (*curandFunc)(Args...); \ + using curandFunc = decltype(&::__name); \ std::call_once(curand_dso_flag, []() { \ curand_dso_handle = paddle::platform::dynload::GetCurandDsoHandle(); \ }); \ diff --git a/paddle/fluid/platform/dynload/nccl.h b/paddle/fluid/platform/dynload/nccl.h index c5a10a78a4..37902ae20c 100644 --- a/paddle/fluid/platform/dynload/nccl.h +++ b/paddle/fluid/platform/dynload/nccl.h @@ -33,7 +33,7 @@ extern void* nccl_dso_handle; struct DynLoad__##__name { \ template \ auto operator()(Args... args) -> decltype(__name(args...)) { \ - using nccl_func = decltype(__name(args...)) (*)(Args...); \ + using nccl_func = decltype(&::__name); \ std::call_once(nccl_dso_flag, []() { \ nccl_dso_handle = paddle::platform::dynload::GetNCCLDsoHandle(); \ }); \ diff --git a/paddle/fluid/platform/dynload/warpctc.h b/paddle/fluid/platform/dynload/warpctc.h index 7fa4683704..7c70649d21 100644 --- a/paddle/fluid/platform/dynload/warpctc.h +++ b/paddle/fluid/platform/dynload/warpctc.h @@ -36,7 +36,7 @@ extern void* warpctc_dso_handle; struct DynLoad__##__name { \ template \ auto operator()(Args... args) -> decltype(__name(args...)) { \ - using warpctcFunc = decltype(__name(args...)) (*)(Args...); \ + using warpctcFunc = decltype(&::__name); \ std::call_once(warpctc_dso_flag, []() { \ warpctc_dso_handle = paddle::platform::dynload::GetWarpCTCDsoHandle(); \ }); \ From 580dad0c2c634f7d7f66c5a9f8032a32fd6ffcbd Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 25 Apr 2018 16:32:57 +0800 Subject: [PATCH 28/72] Fix compile when there is no mkl --- paddle/fluid/operators/math/math_function.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index 249ab6e903..b5ae41c8f9 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -224,7 +224,8 @@ void batched_gemm( const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, - float* C, const int batchCount, const int strideA, const int strideB) { + float* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { for (int k = 0; k < batchCount; ++k) { const float* Ak = &A[k * strideA]; const float* Bk = &B[k * strideB]; @@ -239,7 +240,8 @@ void batched_gemm( const platform::CPUDeviceContext& context, const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, - double* C, const int batchCount, const int strideA, const int strideB) { + double* C, const int batchCount, const int64_t strideA, + const int64_t strideB) { for (int k = 0; k < batchCount; ++k) { const double* Ak = &A[k * strideA]; const double* Bk = &B[k * strideB]; From 0c24b3f937f27c43e549ef68cc5f6f3427917c69 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 25 Apr 2018 19:24:32 +0800 Subject: [PATCH 29/72] Clean memcpy async --- .../framework/details/fetch_op_handle.cc | 1 - paddle/fluid/pybind/tensor_py.h | 40 ++++--------------- 2 files changed, 8 insertions(+), 33 deletions(-) diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index 423449abff..b57c7dab3a 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -67,7 +67,6 @@ void FetchOpHandle::RunImpl() { if (platform::is_gpu_place(t.place())) { #ifdef PADDLE_WITH_CUDA TensorCopy(t, cpu, *dev_ctxes_[t.place()], &tensors_[i], true); - dev_ctxes_.at(t.place())->Wait(); #endif } else { tensors_[i].ShareDataWith(t); diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index 159d1d5f4e..dcd711a33f 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -63,15 +63,9 @@ struct CastToPyBufferImpl { auto *dst_ptr = static_cast(dst_tensor.mutable_data( tensor.dims(), platform::CPUPlace())); - platform::DeviceContextPool &pool = - platform::DeviceContextPool::Instance(); - auto dev_ctx = static_cast( - pool.Get(tensor.place())); - - paddle::platform::GpuMemcpyAsync( - dst_ptr, src_ptr, sizeof(CUR_TYPE) * tensor.numel(), - cudaMemcpyDeviceToHost, dev_ctx->stream()); - dev_ctx->Wait(); + paddle::platform::GpuMemcpySync(dst_ptr, src_ptr, + sizeof(CUR_TYPE) * tensor.numel(), + cudaMemcpyDeviceToHost); #else PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); #endif @@ -184,17 +178,8 @@ void PyCUDATensorSetFromArray( self->Resize(framework::make_ddim(dims)); auto *dst = self->mutable_data(place); - - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto dev_ctx = - static_cast(pool.Get(place)); - paddle::platform::GpuMemcpyAsync(dst, array.data(), sizeof(T) * array.size(), - cudaMemcpyHostToDevice, dev_ctx->stream()); - // NOTE: For safety, here wait the copy complete. - // It because the CPU array.data() could be destroyed after this method. - // If we make this method async, it could be copied data from a memory buffer - // that has been freed. - dev_ctx->Wait(); + paddle::platform::GpuMemcpySync(dst, array.data(), sizeof(T) * array.size(), + cudaMemcpyHostToDevice); } template <> @@ -214,18 +199,9 @@ void PyCUDATensorSetFromArray( self->Resize(framework::make_ddim(dims)); auto *dst = self->mutable_data(place); - - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto dev_ctx = - static_cast(pool.Get(place)); - paddle::platform::GpuMemcpyAsync(dst, array.data(), - sizeof(uint16_t) * array.size(), - cudaMemcpyHostToDevice, dev_ctx->stream()); - // NOTE: For safety, here wait the copy complete. - // It because the CPU array.data() could be destroyed after this method. - // If we make this method async, it could be copied data from a memory buffer - // that has been freed. - dev_ctx->Wait(); + paddle::platform::GpuMemcpySync(dst, array.data(), + sizeof(uint16_t) * array.size(), + cudaMemcpyHostToDevice); } template From 1a25f3cd07dd52e50e52d6071fa5aa32b531db12 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 25 Apr 2018 19:32:08 +0800 Subject: [PATCH 30/72] Add reader blocking queue --- paddle/fluid/operators/reader/CMakeLists.txt | 2 + .../fluid/operators/reader/blocking_queue.h | 117 +++++++++ .../reader/reader_blocking_queue_test.cc | 224 ++++++++++++++++++ 3 files changed, 343 insertions(+) create mode 100644 paddle/fluid/operators/reader/blocking_queue.h create mode 100644 paddle/fluid/operators/reader/reader_blocking_queue_test.cc diff --git a/paddle/fluid/operators/reader/CMakeLists.txt b/paddle/fluid/operators/reader/CMakeLists.txt index 845528860f..3106978eb0 100644 --- a/paddle/fluid/operators/reader/CMakeLists.txt +++ b/paddle/fluid/operators/reader/CMakeLists.txt @@ -23,5 +23,7 @@ reader_library(create_recordio_file_reader_op SRCS create_recordio_file_reader_o reader_library(create_double_buffer_reader_op SRCS create_double_buffer_reader_op.cc) reader_library(create_multi_pass_reader_op SRCS create_multi_pass_reader_op.cc) reader_library(create_threaded_reader_op SRCS create_threaded_reader_op.cc) + +cc_test(reader_blocking_queue_test SRCS reader_blocking_queue_test.cc) # Export local libraries to parent set(READER_LIBRARY ${LOCAL_READER_LIBS} PARENT_SCOPE) diff --git a/paddle/fluid/operators/reader/blocking_queue.h b/paddle/fluid/operators/reader/blocking_queue.h new file mode 100644 index 0000000000..4a54ccd5e8 --- /dev/null +++ b/paddle/fluid/operators/reader/blocking_queue.h @@ -0,0 +1,117 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include // NOLINT +#include +#include + +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace operators { +namespace reader { + +template +class BlockingQueue { + public: + explicit BlockingQueue(size_t capacity) + : capacity_(capacity), closed_(false) { + PADDLE_ENFORCE_GT( + capacity_, 0, + "The capacity of a reader::BlockingQueue must be greater than 0."); + } + + bool Send(const T& elem) { + std::unique_lock lock(mutex_); + send_cv_.wait(lock, [&] { return queue_.size() < capacity_ || closed_; }); + if (closed_) { + return false; + } else { + PADDLE_ENFORCE_LT(queue_.size(), capacity_); + queue_.push_back(elem); + receive_cv_.notify_one(); + return true; + } + } + + bool Send(T&& elem) { + std::unique_lock lock(mutex_); + send_cv_.wait(lock, [&] { return queue_.size() < capacity_ || closed_; }); + if (closed_) { + return false; + } else { + PADDLE_ENFORCE_LT(queue_.size(), capacity_); + queue_.emplace_back(std::move(elem)); + receive_cv_.notify_one(); + return true; + } + } + + bool Receive(T* elem) { + std::unique_lock lock(mutex_); + receive_cv_.wait(lock, [&] { return !queue_.empty() || closed_; }); + if (!queue_.empty()) { + PADDLE_ENFORCE_NOT_NULL(elem); + *elem = queue_.front(); + queue_.pop_front(); + send_cv_.notify_one(); + return true; + } else { + PADDLE_ENFORCE(closed_); + return false; + } + } + + void Close() { + std::lock_guard lock(mutex_); + closed_ = true; + send_cv_.notify_all(); + receive_cv_.notify_all(); + } + + bool IsClosed() { + std::lock_guard lock(mutex_); + return closed_; + } + + bool CanSend() { + std::lock_guard lock(mutex_); + return !closed_ && queue_.size() < capacity_; + } + + bool CanReceive() { + std::lock_guard lock(mutex_); + return !queue_.empty(); + } + + size_t Cap() { + std::lock_guard lock(mutex_); + return capacity_; + } + + private: + size_t capacity_; + bool closed_; + std::deque queue_; + + std::mutex mutex_; + std::condition_variable receive_cv_; + std::condition_variable send_cv_; +}; +} // namespace reader +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reader/reader_blocking_queue_test.cc b/paddle/fluid/operators/reader/reader_blocking_queue_test.cc new file mode 100644 index 0000000000..07c22fe2bf --- /dev/null +++ b/paddle/fluid/operators/reader/reader_blocking_queue_test.cc @@ -0,0 +1,224 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include // NOLINT +#include +#include // NOLINT +#include +#include "gtest/gtest.h" + +#include "paddle/fluid/operators/reader/blocking_queue.h" + +using paddle::operators::reader::BlockingQueue; + +TEST(BlockingQueue, CapacityTest) { + size_t cap = 10; + BlockingQueue q(cap); + EXPECT_EQ(q.Cap(), cap); +} + +TEST(BlockingQueue, CanSendTest) { + size_t cap = 1; + BlockingQueue q(cap); + q.Send(1); + EXPECT_FALSE(q.CanSend()); +} + +void FirstInFirstOut(size_t queue_cap, size_t elem_num, size_t send_time_gap, + size_t receive_time_gap) { + BlockingQueue q(queue_cap); + std::thread sender([&]() { + for (size_t i = 0; i < elem_num; ++i) { + std::this_thread::sleep_for(std::chrono::milliseconds(send_time_gap)); + EXPECT_TRUE(q.Send(i)); + } + q.Close(); + }); + size_t count = 0; + while (true) { + std::this_thread::sleep_for(std::chrono::milliseconds(receive_time_gap)); + size_t elem; + if (!q.Receive(&elem)) { + break; + } + EXPECT_EQ(elem, count++); + } + sender.join(); + EXPECT_EQ(count, elem_num); + EXPECT_TRUE(q.IsClosed()); +} + +TEST(BlockingQueue, FirstInFirstOutTest) { + FirstInFirstOut(2, 5, 2, 50); + FirstInFirstOut(2, 5, 50, 2); + FirstInFirstOut(10, 3, 50, 2); + FirstInFirstOut(10, 3, 2, 50); +} + +TEST(BlockingQueue, SenderBlockingTest) { + const size_t queue_cap = 2; + BlockingQueue q(queue_cap); + size_t send_count = 0; + std::thread sender([&]() { + for (size_t i = 0; i < 5; ++i) { + if (!q.Send(i)) { + break; + } + ++send_count; + } + }); + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + q.Close(); + sender.join(); + EXPECT_EQ(send_count, queue_cap); + std::vector res; + while (q.CanReceive()) { + size_t elem; + EXPECT_TRUE(q.Receive(&elem)); + res.push_back(elem); + } + EXPECT_EQ(res.size(), queue_cap); + for (size_t i = 0; i < res.size(); ++i) { + EXPECT_EQ(res[i], i); + } +} + +TEST(BlockingQueue, ReceiverBlockingTest) { + const size_t queue_cap = 5; + BlockingQueue q(queue_cap); + std::vector receive_res; + std::thread receiver([&]() { + size_t elem; + while (true) { + if (!q.Receive(&elem)) { + break; + } + receive_res.push_back(elem); + } + }); + std::vector to_send{2, 1, 7}; + for (auto e : to_send) { + q.Send(e); + } + q.Close(); + receiver.join(); + EXPECT_EQ(receive_res.size(), to_send.size()); + for (size_t i = 0; i < to_send.size(); ++i) { + EXPECT_EQ(receive_res[i], to_send[i]); + } +} + +void CheckIsUnorderedSame(const std::vector>& v1, + const std::vector>& v2) { + std::set s1; + std::set s2; + for (auto vec : v1) { + for (size_t elem : vec) { + s1.insert(elem); + } + } + for (auto vec : v2) { + for (size_t elem : vec) { + s2.insert(elem); + } + } + EXPECT_EQ(s1.size(), s2.size()); + auto it1 = s1.begin(); + auto it2 = s2.begin(); + while (it1 != s1.end()) { + EXPECT_EQ(*it1, *it2); + ++it1; + ++it2; + } +} + +void MultiSenderMultiReceiver(const size_t queue_cap, + const std::vector>& to_send, + size_t receiver_num, size_t send_time_gap, + size_t receive_time_gap) { + BlockingQueue q(queue_cap); + size_t sender_num = to_send.size(); + std::vector senders; + for (size_t s_idx = 0; s_idx < sender_num; ++s_idx) { + senders.emplace_back(std::thread([&, s_idx] { + for (size_t elem : to_send[s_idx]) { + std::this_thread::sleep_for(std::chrono::milliseconds(send_time_gap)); + EXPECT_TRUE(q.Send(elem)); + } + })); + } + std::vector receivers; + std::mutex mu; + std::vector> res; + for (size_t r_idx = 0; r_idx < receiver_num; ++r_idx) { + receivers.emplace_back(std::thread([&] { + std::vector receiver_res; + while (true) { + std::this_thread::sleep_for( + std::chrono::milliseconds(receive_time_gap)); + size_t elem; + if (!q.Receive(&elem)) { + break; + } + receiver_res.push_back(elem); + } + std::lock_guard lock(mu); + res.push_back(receiver_res); + })); + } + for (auto& t : senders) { + t.join(); + } + q.Close(); + for (auto& t : receivers) { + t.join(); + } + CheckIsUnorderedSame(to_send, res); +} + +TEST(BlockingQueue, MultiSenderMultiReaderTest) { + std::vector> to_send_1{{2, 3, 4}, {9}, {0, 7, 15, 6}}; + MultiSenderMultiReceiver(2, to_send_1, 2, 0, 0); + MultiSenderMultiReceiver(10, to_send_1, 2, 0, 0); + MultiSenderMultiReceiver(2, to_send_1, 20, 0, 0); + MultiSenderMultiReceiver(2, to_send_1, 2, 50, 0); + MultiSenderMultiReceiver(2, to_send_1, 2, 0, 50); + + std::vector> to_send_2{ + {2, 3, 4}, {}, {0, 7, 15, 6, 9, 32}}; + MultiSenderMultiReceiver(2, to_send_2, 3, 0, 0); + MultiSenderMultiReceiver(20, to_send_2, 3, 0, 0); + MultiSenderMultiReceiver(2, to_send_2, 30, 0, 0); + MultiSenderMultiReceiver(2, to_send_2, 3, 50, 0); + MultiSenderMultiReceiver(2, to_send_2, 3, 0, 50); +} + +struct MyClass { + MyClass() : val_(0) {} + explicit MyClass(int val) : val_(val) {} + MyClass(const MyClass& b) { val_ = b.val_; } + MyClass(MyClass&& b) { val_ = b.val_; } + void operator=(const MyClass& b) { val_ = b.val_; } + + int val_; +}; + +TEST(BlockingQueue, MyClassTest) { + BlockingQueue q(2); + MyClass a(200); + q.Send(std::move(a)); + MyClass b; + q.Receive(&b); + EXPECT_EQ(a.val_, b.val_); +} From a7866113146229514870f51c5c7d2c6001eada60 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 25 Apr 2018 19:41:52 +0800 Subject: [PATCH 31/72] Replace Channel in MultiFileReader with BlockingQueue --- .../fluid/operators/reader/open_files_op.cc | 26 +++++++++---------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/operators/reader/open_files_op.cc b/paddle/fluid/operators/reader/open_files_op.cc index 779dc8a6a0..461b56c7a4 100644 --- a/paddle/fluid/operators/reader/open_files_op.cc +++ b/paddle/fluid/operators/reader/open_files_op.cc @@ -14,7 +14,7 @@ #include // NOLINT -#include "paddle/fluid/framework/channel.h" +#include "paddle/fluid/operators/reader/blocking_queue.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" namespace paddle { @@ -48,9 +48,9 @@ class MultiFileReader : public framework::ReaderBase { std::thread scheduler_; std::vector prefetchers_; size_t buffer_size_; - framework::Channel* waiting_file_idx_; - framework::Channel* available_thread_idx_; - framework::Channel>* buffer_; + reader::BlockingQueue* waiting_file_idx_; + reader::BlockingQueue* available_thread_idx_; + reader::BlockingQueue>* buffer_; }; void MultiFileReader::ReadNext(std::vector* out) { @@ -73,17 +73,17 @@ bool MultiFileReader::HasNext() { void MultiFileReader::StartNewScheduler() { size_t thread_num = prefetchers_.size(); - waiting_file_idx_ = framework::MakeChannel(file_names_.size()); - available_thread_idx_ = framework::MakeChannel(thread_num); - buffer_ = - framework::MakeChannel>(buffer_size_); + waiting_file_idx_ = new reader::BlockingQueue(file_names_.size()); + available_thread_idx_ = new reader::BlockingQueue(thread_num); + buffer_ = new reader::BlockingQueue>( + buffer_size_); for (size_t i = 0; i < file_names_.size(); ++i) { - waiting_file_idx_->Send(&i); + waiting_file_idx_->Send(i); } waiting_file_idx_->Close(); for (size_t i = 0; i < thread_num; ++i) { - available_thread_idx_->Send(&i); + available_thread_idx_->Send(i); } scheduler_ = std::thread([this] { ScheduleThreadFunc(); }); @@ -149,7 +149,7 @@ void MultiFileReader::PrefetchThreadFunc(std::string file_name, break; } try { - buffer_->Send(&ins); + buffer_->Send(std::move(ins)); } catch (paddle::platform::EnforceNotMet e) { VLOG(5) << "WARNING: The buffer channel has been closed. The prefetch " "thread of file '" @@ -158,9 +158,7 @@ void MultiFileReader::PrefetchThreadFunc(std::string file_name, } } - try { - available_thread_idx_->Send(&thread_idx); - } catch (paddle::platform::EnforceNotMet e) { + if (!available_thread_idx_->Send(thread_idx)) { VLOG(5) << "WARNING: The available_thread_idx_ channel has been closed. " "Fail to send thread_idx."; } From e2ca42408bc1580ffecbdb0b922f67eddd7aad94 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 25 Apr 2018 19:54:46 +0800 Subject: [PATCH 32/72] Replace Channel in DoubleBufferReader with BlockingQueue --- .../reader/create_double_buffer_reader_op.cc | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index 4372f23fc1..504e069b65 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -14,7 +14,7 @@ #include // NOLINT -#include "paddle/fluid/framework/channel.h" +#include "paddle/fluid/operators/reader/blocking_queue.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" namespace paddle { @@ -23,13 +23,13 @@ namespace reader { // 'Double buffer' means we shall maintain two batches of input data at the same // time. So the kCacheSize shoul be at least 2. -static constexpr size_t kCacheSize = 2; +static constexpr size_t kCacheSize = 3; // There will be two bacthes out of the channel during training: // 1. the one waiting to be sent to the channel // 2. the one just be received from the channel, which is also being used by // subsequent operators. // So the channel size should be kChacheSize - 2 -static constexpr size_t kChannelSize = 0; // kCacheSize - 2 +static constexpr size_t kChannelSize = 1; // kCacheSize - 2 class DoubleBufferReader : public framework::DecoratedReader { public: @@ -58,7 +58,7 @@ class DoubleBufferReader : public framework::DecoratedReader { bool HasNext() const; void StartPrefetcher() { - channel_ = framework::MakeChannel(kChannelSize); + channel_ = new reader::BlockingQueue(kChannelSize); prefetcher_ = std::thread([this] { PrefetchThreadFunc(); }); } @@ -74,7 +74,7 @@ class DoubleBufferReader : public framework::DecoratedReader { void PrefetchThreadFunc(); std::thread prefetcher_; - framework::Channel* channel_; + reader::BlockingQueue* channel_; platform::Place place_; std::vector> cpu_tensor_cache_; std::vector> gpu_tensor_cache_; @@ -185,10 +185,7 @@ void DoubleBufferReader::PrefetchThreadFunc() { gpu_batch[i].set_lod(cpu_batch[i].lod()); } } - try { - size_t tmp = cached_tensor_id; - channel_->Send(&tmp); - } catch (paddle::platform::EnforceNotMet e) { + if (!channel_->Send(cached_tensor_id)) { VLOG(5) << "WARNING: The double buffer channel has been closed. The " "prefetch thread will terminate."; break; From 2d57158e2ba24e53e98f3df5da44d48aa5d82878 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Wed, 25 Apr 2018 20:29:25 +0800 Subject: [PATCH 33/72] fea/init tensorrt engine (#10003) --- paddle/fluid/inference/engine.h | 53 +++++++ .../fluid/inference/tensorrt/CMakeLists.txt | 5 +- paddle/fluid/inference/tensorrt/engine.cc | 134 ++++++++++++++++ paddle/fluid/inference/tensorrt/engine.h | 144 ++++++++++++++++++ paddle/fluid/inference/tensorrt/helper.h | 88 +++++++++++ .../fluid/inference/tensorrt/test_engine.cc | 83 ++++++++++ .../fluid/inference/tensorrt/test_tensorrt.cc | 18 +-- 7 files changed, 515 insertions(+), 10 deletions(-) create mode 100644 paddle/fluid/inference/engine.h create mode 100644 paddle/fluid/inference/tensorrt/engine.cc create mode 100644 paddle/fluid/inference/tensorrt/engine.h create mode 100644 paddle/fluid/inference/tensorrt/helper.h create mode 100644 paddle/fluid/inference/tensorrt/test_engine.cc diff --git a/paddle/fluid/inference/engine.h b/paddle/fluid/inference/engine.h new file mode 100644 index 0000000000..0633c052e4 --- /dev/null +++ b/paddle/fluid/inference/engine.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/framework.pb.h" + +namespace paddle { +namespace inference { + +/* + * EngineBase is the base class of all inference engines. An inference engine + * takes a paddle program as input, and outputs the result in fluid Tensor + * format. It can be used to optimize performance of computation sub-blocks, for + * example, break down the original block into sub-blocks and execute each + * sub-blocks in different engines. + * + * For example: + * When inference, the resnet50 model can put most of the model into subgraph + * and run it on a TensorRT engine. + * + * There are several engines such as TensorRT and other frameworks, so an + * EngineBase is put forward to give an unified interface for all the + * different engine implemention. + */ +class EngineBase { + public: + using DescType = ::paddle::framework::proto::BlockDesc; + + // Build the model and do some preparation, for example, in TensorRT, run + // createInferBuilder, buildCudaEngine. + virtual void Build(const DescType& paddle_model) = 0; + + // Execute the engine, that will run the inference network. + virtual void Execute(int batch_size) = 0; + + virtual ~EngineBase() {} + +}; // class EngineBase + +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/CMakeLists.txt b/paddle/fluid/inference/tensorrt/CMakeLists.txt index e39c0daac7..4b5866ad5d 100644 --- a/paddle/fluid/inference/tensorrt/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/CMakeLists.txt @@ -1 +1,4 @@ -nv_test(test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader) +if(WITH_TESTING) + nv_test(test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader) + nv_test(test_tensorrt_engine SRCS test_engine.cc engine.cc DEPS dynload_cuda) +endif() diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc new file mode 100644 index 0000000000..276502e499 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -0,0 +1,134 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/tensorrt/engine.h" + +#include +#include +#include +#include "paddle/fluid/inference/tensorrt/helper.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +void TensorRTEngine::Build(const DescType& paddle_model) { + PADDLE_ENFORCE(false, "not implemented"); +} + +void TensorRTEngine::Execute(int batch_size) { + infer_context_->enqueue(batch_size, buffers_.data(), *stream_, nullptr); + cudaStreamSynchronize(*stream_); +} + +TensorRTEngine::~TensorRTEngine() { + // clean buffer + for (auto& buffer : buffers_) { + if (buffer != nullptr) { + PADDLE_ENFORCE_EQ(0, cudaFree(buffer)); + buffer = nullptr; + } + } +} + +void TensorRTEngine::FreezeNetwork() { + PADDLE_ENFORCE(infer_builder_ != nullptr, + "Call InitNetwork first to initialize network."); + PADDLE_ENFORCE(infer_network_ != nullptr, + "Call InitNetwork first to initialize network."); + // build engine. + infer_builder_->setMaxBatchSize(max_batch_); + infer_builder_->setMaxWorkspaceSize(max_workspace_); + + infer_engine_.reset(infer_builder_->buildCudaEngine(*infer_network_)); + PADDLE_ENFORCE(infer_engine_ != nullptr, "build cuda engine failed!"); + + infer_context_.reset(infer_engine_->createExecutionContext()); + + // allocate GPU buffers. + buffers_.resize(buffer_sizes_.size(), nullptr); + for (auto& item : buffer_sizes_) { + if (item.second == 0) { + auto slot_offset = infer_engine_->getBindingIndex(item.first.c_str()); + item.second = kDataTypeSize[static_cast( + infer_engine_->getBindingDataType(slot_offset))] * + AccumDims(infer_engine_->getBindingDimensions(slot_offset)); + } + PADDLE_ENFORCE_EQ(0, cudaMalloc(&buffer(item.first), item.second)); + } +} + +nvinfer1::ITensor* TensorRTEngine::DeclareInput(const std::string& name, + nvinfer1::DataType dtype, + const nvinfer1::Dims& dim) { + PADDLE_ENFORCE_EQ(0, buffer_sizes_.count(name), "duplicate input name %s", + name); + + PADDLE_ENFORCE(infer_network_ != nullptr, "should initnetwork first"); + auto* input = infer_network_->addInput(name.c_str(), dtype, dim); + PADDLE_ENFORCE(input, "infer network add input %s failed", name); + + buffer_sizes_[name] = kDataTypeSize[static_cast(dtype)] * AccumDims(dim); + return input; +} + +void TensorRTEngine::DeclareOutput(const nvinfer1::ILayer* layer, int offset, + const std::string& name) { + PADDLE_ENFORCE_EQ(0, buffer_sizes_.count(name), "duplicate output name %s", + name); + + auto* output = layer->getOutput(offset); + PADDLE_ENFORCE(output != nullptr); + output->setName(name.c_str()); + infer_network_->markOutput(*output); + // output buffers' size can only be decided latter, set zero here to mark this + // and will reset latter. + buffer_sizes_[name] = 0; +} + +void* TensorRTEngine::GetOutputInGPU(const std::string& name) { + return buffer(name); +} + +void TensorRTEngine::GetOutputInCPU(const std::string& name, void* dst, + size_t max_size) { + // determine data size + auto it = buffer_sizes_.find(name); + PADDLE_ENFORCE(it != buffer_sizes_.end()); + PADDLE_ENFORCE_GT(it->second, 0); + PADDLE_ENFORCE_GE(max_size, it->second); + + PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(dst, buffer(name), it->second, + cudaMemcpyDeviceToHost, *stream_)); +} + +void*& TensorRTEngine::buffer(const std::string& name) { + PADDLE_ENFORCE(infer_engine_ != nullptr, "call FreezeNetwork first."); + auto it = buffer_sizes_.find(name); + PADDLE_ENFORCE(it != buffer_sizes_.end()); + auto slot_offset = infer_engine_->getBindingIndex(name.c_str()); + return buffers_[slot_offset]; +} + +void TensorRTEngine::SetInputFromCPU(const std::string& name, void* data, + size_t size) { + void* buf = buffer(name); + PADDLE_ENFORCE_EQ( + 0, cudaMemcpyAsync(buf, data, size, cudaMemcpyHostToDevice, *stream_)); +} + +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h new file mode 100644 index 0000000000..ff853455b8 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -0,0 +1,144 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include +#include "paddle/fluid/inference/engine.h" +#include "paddle/fluid/inference/tensorrt/helper.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +/* + * TensorRT Engine. + * + * There are two alternative ways to use it, one is to build from a paddle + * protobuf model, another way is to manully construct the network. + */ +class TensorRTEngine : public EngineBase { + public: + // Weight is model parameter. + class Weight { + public: + Weight(nvinfer1::DataType dtype, void* value, int num_elem) { + w_.type = dtype; + w_.values = value; + w_.count = num_elem; + } + const nvinfer1::Weights& get() { return w_; } + + private: + nvinfer1::Weights w_; + }; + + TensorRTEngine(int max_batch, int max_workspace, cudaStream_t* stream, + nvinfer1::ILogger& logger = NaiveLogger::Global()) + : max_batch_(max_batch), + max_workspace_(max_workspace), + stream_(stream), + logger_(logger) {} + + virtual ~TensorRTEngine(); + + // TODO(Superjomn) implement it later when graph segmentation is supported. + virtual void Build(const DescType& paddle_model) override; + + virtual void Execute(int batch_size) override; + + // Initialize the inference network, so that TensorRT layers can add to this + // network. + void InitNetwork() { + infer_builder_.reset(createInferBuilder(logger_)); + infer_network_.reset(infer_builder_->createNetwork()); + } + // After finishing adding ops, freeze this network and creates the executation + // environment. + void FreezeNetwork(); + + // Add an input and set its name, data type and dimention. + nvinfer1::ITensor* DeclareInput(const std::string& name, + nvinfer1::DataType dtype, + const nvinfer1::Dims& dim); + // Set the offset-th output from a layer as the network's output, and set its + // name. + void DeclareOutput(const nvinfer1::ILayer* layer, int offset, + const std::string& name); + + // GPU memory address for an ITensor with specific name. One can operate on + // these memory directly for acceleration, for example, output the converted + // data directly to the buffer to save data copy overhead. + // NOTE this should be used after calling `FreezeNetwork`. + void*& buffer(const std::string& name); + + // Fill an input from CPU memory with name and size. + void SetInputFromCPU(const std::string& name, void* data, size_t size); + // TODO(Superjomn) is this method necessary given that buffer(xxx) can be + // accessed directly. Fill an input from GPU memory with name and size. + void SetInputFromGPU(const std::string& name, void* data, size_t size); + // Get an output called name, the output of tensorrt is in GPU, so this method + // will just return the output's GPU memory address. + void* GetOutputInGPU(const std::string& name); + // LOW EFFICENCY! Get output to CPU, this will trigger a memory copy from GPU + // to CPU. + void GetOutputInCPU(const std::string& name, void* dst, size_t max_size); + + nvinfer1::ICudaEngine* engine() { return infer_engine_.get(); } + nvinfer1::INetworkDefinition* network() { return infer_network_.get(); } + + private: + // the max batch size + int max_batch_; + // the max memory size the engine uses + int max_workspace_; + cudaStream_t* stream_; + nvinfer1::ILogger& logger_; + + std::vector buffers_; + // max data size for the buffers. + std::unordered_map buffer_sizes_; + + // TensorRT related internal members + template + struct Destroyer { + void operator()(T* x) { x->destroy(); } + }; + template + using infer_ptr = std::unique_ptr>; + infer_ptr infer_builder_; + infer_ptr infer_network_; + infer_ptr infer_engine_; + infer_ptr infer_context_; +}; // class TensorRTEngine + +// Add an layer__ into engine__ with args ARGS. +// For example: +// TRT_ENGINE_ADD_LAYER(xxx, FullyConnected, input, dim, weights, bias) +// +// Reference +// https://docs.nvidia.com/deeplearning/sdk/tensorrt-developer-guide/index.html#charRNN_define_network +// +// will add a fully connected layer into the engine. +// TensorRT has too many layers, so that is not wise to add member functions for +// them, and an macro like this is more extensible when underlying TensorRT +// library add new layer supports. +#define TRT_ENGINE_ADD_LAYER(engine__, layer__, ARGS...) \ + engine__->network()->add##layer__(ARGS); + +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/helper.h b/paddle/fluid/inference/tensorrt/helper.h new file mode 100644 index 0000000000..796283d325 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/helper.h @@ -0,0 +1,88 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include +#include +#include +#include "paddle/fluid/platform/dynload/tensorrt.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +namespace dy = paddle::platform::dynload; + +static size_t AccumDims(nvinfer1::Dims dims) { + size_t num = dims.nbDims == 0 ? 0 : 1; + for (int i = 0; i < dims.nbDims; i++) { + PADDLE_ENFORCE_GT(dims.d[i], 0); + num *= dims.d[i]; + } + return num; +} + +// TensorRT data type to size +const int kDataTypeSize[] = { + 4, // kFLOAT + 2, // kHALF + 1, // kINT8 + 4 // kINT32 +}; + +// The following two API are implemented in TensorRT's header file, cannot load +// from the dynamic library. So create our own implementation and directly +// trigger the method from the dynamic library. +static nvinfer1::IBuilder* createInferBuilder(nvinfer1::ILogger& logger) { + return static_cast( + dy::createInferBuilder_INTERNAL(&logger, NV_TENSORRT_VERSION)); +} +static nvinfer1::IRuntime* createInferRuntime(nvinfer1::ILogger& logger) { + return static_cast( + dy::createInferRuntime_INTERNAL(&logger, NV_TENSORRT_VERSION)); +} + +// A logger for create TensorRT infer builder. +class NaiveLogger : public nvinfer1::ILogger { + public: + void log(nvinfer1::ILogger::Severity severity, const char* msg) override { + switch (severity) { + case Severity::kINFO: + LOG(INFO) << msg; + break; + case Severity::kWARNING: + LOG(WARNING) << msg; + break; + case Severity::kINTERNAL_ERROR: + case Severity::kERROR: + LOG(ERROR) << msg; + break; + default: + break; + } + } + + static nvinfer1::ILogger& Global() { + static nvinfer1::ILogger* x = new NaiveLogger; + return *x; + } + + virtual ~NaiveLogger() override {} +}; + +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/test_engine.cc b/paddle/fluid/inference/tensorrt/test_engine.cc new file mode 100644 index 0000000000..f3dbdf11f2 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/test_engine.cc @@ -0,0 +1,83 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/tensorrt/engine.h" + +#include +#include +#include +#include + +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +class TensorRTEngineTest : public ::testing::Test { + protected: + void SetUp() override { + ASSERT_EQ(0, cudaStreamCreate(&stream_)); + engine_ = new TensorRTEngine(1, 1 << 10, &stream_); + engine_->InitNetwork(); + } + + void TearDown() override { + delete engine_; + cudaStreamDestroy(stream_); + } + + protected: + TensorRTEngine* engine_; + cudaStream_t stream_; +}; + +TEST_F(TensorRTEngineTest, add_layer) { + const int size = 1; + + float raw_weight[size] = {2.}; // Weight in CPU memory. + float raw_bias[size] = {3.}; + + LOG(INFO) << "create weights"; + TensorRTEngine::Weight weight(nvinfer1::DataType::kFLOAT, raw_weight, size); + TensorRTEngine::Weight bias(nvinfer1::DataType::kFLOAT, raw_bias, size); + auto* x = engine_->DeclareInput("x", nvinfer1::DataType::kFLOAT, + nvinfer1::DimsCHW{1, 1, 1}); + auto* fc_layer = TRT_ENGINE_ADD_LAYER(engine_, FullyConnected, *x, size, + weight.get(), bias.get()); + PADDLE_ENFORCE(fc_layer != nullptr); + + engine_->DeclareOutput(fc_layer, 0, "y"); + LOG(INFO) << "freeze network"; + engine_->FreezeNetwork(); + ASSERT_EQ(engine_->engine()->getNbBindings(), 2); + + // fill in real data + float x_v = 1234; + engine_->SetInputFromCPU("x", (void*)&x_v, 1 * sizeof(float)); + LOG(INFO) << "to execute"; + engine_->Execute(1); + + LOG(INFO) << "to get output"; + // void* y_v = + float y_cpu; + engine_->GetOutputInCPU("y", &y_cpu, sizeof(float)); + + LOG(INFO) << "to checkout output"; + ASSERT_EQ(y_cpu, x_v * 2 + 3); +} + +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/test_tensorrt.cc b/paddle/fluid/inference/tensorrt/test_tensorrt.cc index a81a708e7a..aed5b5e1a2 100644 --- a/paddle/fluid/inference/tensorrt/test_tensorrt.cc +++ b/paddle/fluid/inference/tensorrt/test_tensorrt.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include From 3cd99f4b6a7297dd6e7c417b822cc611629013f5 Mon Sep 17 00:00:00 2001 From: Missy <514816977@qq.com> Date: Wed, 25 Apr 2018 20:45:28 +0800 Subject: [PATCH 34/72] update multi_cluster/index_en.rst (#9790) * Update doc/v2/howto/cluster/multi_cluster/index_en.rst * update --- .../howto/cluster/multi_cluster/index_en.rst | 36 +++++++++++++------ 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/doc/v2/howto/cluster/multi_cluster/index_en.rst b/doc/v2/howto/cluster/multi_cluster/index_en.rst index dac7aaef08..b69bd5b2db 100644 --- a/doc/v2/howto/cluster/multi_cluster/index_en.rst +++ b/doc/v2/howto/cluster/multi_cluster/index_en.rst @@ -1,19 +1,35 @@ Use different clusters ====================== -PaddlePaddle supports running jobs on several platforms including: -- `Kubernetes `_ open-source system for automating deployment, scaling, and management of containerized applications from Google. -- `OpenMPI `_ Mature high performance parallel computing framework. -- `Fabric `_ A cluster management tool. Write scripts to submit jobs or manage the cluster. +The user's cluster environment is not the same. To facilitate everyone's deployment, we provide a variety of cluster deployment methods to facilitate the submission of cluster training tasks, which will be introduced as follows: -We'll introduce cluster job management on these platforms. The examples can be found under `cluster_train_v2 `_ . +`Kubernetes `_ is a scheduling framework of Google open source container cluster, supporting a complete cluster solution for large-scale cluster production environment. The following guidelines show PaddlePaddle's support for Kubernetes: -These cluster platforms provide API or environment variables for training processes, when the job is dispatched to different nodes. Like node ID, IP or total number of nodes etc. +.. toctree:: + :maxdepth: 1 + + k8s_cn.md + k8s_distributed_cn.md + +`OpenMPI `_ is a mature high-performance parallel computing framework, which is widely used in the field of HPC. The following guide describes how to use OpenMPI to build PaddlePaddle's cluster training task: .. toctree:: :maxdepth: 1 - fabric_en.md - openmpi_en.md - k8s_en.md - k8s_aws_en.md + openmpi_cn.md + +`Fabric `_ is a convenient tool for program deployment and management. We provide a way to deploy and manage with Fabric. If you want to know more about it, please read the following guidelines: + +.. toctree:: + :maxdepth: 1 + + fabric_cn.md + +We also support the deployment of PaddlePaddle on AWS. Learn more about: + +.. toctree:: + :maxdepth: 1 + + k8s_aws_cn.md + +The examples can be found under `cluster_train_v2 `_ . \ No newline at end of file From 4cb63d845150b0b5b27505e174fe1023e3c0b229 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 26 Apr 2018 00:53:44 +0800 Subject: [PATCH 35/72] Remove unnecessary header files --- paddle/fluid/operators/reader/blocking_queue.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/paddle/fluid/operators/reader/blocking_queue.h b/paddle/fluid/operators/reader/blocking_queue.h index 4a54ccd5e8..5d0c27d30c 100644 --- a/paddle/fluid/operators/reader/blocking_queue.h +++ b/paddle/fluid/operators/reader/blocking_queue.h @@ -14,10 +14,8 @@ #pragma once -#include #include // NOLINT #include -#include #include "paddle/fluid/platform/enforce.h" From 5fe1fe3a27614555228eb45fe20dd2ce67f11c70 Mon Sep 17 00:00:00 2001 From: Siddharth Goyal Date: Wed, 25 Apr 2018 15:18:47 -0700 Subject: [PATCH 36/72] Fix signed/unsigned comparison warning (#10211) --- paddle/fluid/operators/softmax_mkldnn_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/operators/softmax_mkldnn_op.cc b/paddle/fluid/operators/softmax_mkldnn_op.cc index d00bd1447e..71b541d98f 100644 --- a/paddle/fluid/operators/softmax_mkldnn_op.cc +++ b/paddle/fluid/operators/softmax_mkldnn_op.cc @@ -77,7 +77,7 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel { const bool is_test = ctx.Attr("is_test"); if (!is_test) { T threshold = exp(-64); - for (size_t i = 0; i < dst_tz[0] * dst_tz[1]; ++i) { + for (int i = 0; i < dst_tz[0] * dst_tz[1]; ++i) { output_data[i] = output_data[i] < threshold ? threshold : output_data[i]; } From 4c8ff72615f862aa2f661ba9e287a436cbd25b09 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Wed, 25 Apr 2018 16:15:26 -0700 Subject: [PATCH 37/72] Fix CPPLint errors with rxecutor (#10212) --- paddle/fluid/framework/executor.cc | 26 +++++++++++----------- paddle/fluid/framework/executor.h | 8 +++---- paddle/fluid/inference/tests/test_helper.h | 12 +++++----- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 513e720fd0..766bf0ab0c 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -226,15 +226,15 @@ static bool has_fetch_operators( } void Executor::Run(const ProgramDesc& program, Scope* scope, - std::map& feed_targets, - std::map& fetch_targets, + std::map* feed_targets, + std::map* fetch_targets, bool create_vars, const std::string& feed_holder_name, const std::string& fetch_holder_name) { platform::RecordBlock b(kProgramId); bool has_feed_ops = - has_feed_operators(program.Block(0), feed_targets, feed_holder_name); + has_feed_operators(program.Block(0), *feed_targets, feed_holder_name); bool has_fetch_ops = - has_fetch_operators(program.Block(0), fetch_targets, fetch_holder_name); + has_fetch_operators(program.Block(0), *fetch_targets, fetch_holder_name); ProgramDesc* copy_program = const_cast(&program); if (!has_feed_ops || !has_fetch_ops) { @@ -250,7 +250,7 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, feed_holder->SetPersistable(true); int i = 0; - for (auto& feed_target : feed_targets) { + for (auto& feed_target : (*feed_targets)) { std::string var_name = feed_target.first; VLOG(3) << "feed target's name: " << var_name; @@ -273,7 +273,7 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, fetch_holder->SetPersistable(true); int i = 0; - for (auto& fetch_target : fetch_targets) { + for (auto& fetch_target : (*fetch_targets)) { std::string var_name = fetch_target.first; VLOG(3) << "fetch target's name: " << var_name; @@ -361,16 +361,16 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, void Executor::RunPreparedContext( ExecutorPrepareContext* ctx, Scope* scope, - std::map& feed_targets, - std::map& fetch_targets, bool create_vars, + std::map* feed_targets, + std::map* fetch_targets, bool create_vars, const std::string& feed_holder_name, const std::string& fetch_holder_name) { auto& global_block = ctx->prog_.Block(ctx->block_id_); PADDLE_ENFORCE( - has_feed_operators(global_block, feed_targets, feed_holder_name), + has_feed_operators(global_block, *feed_targets, feed_holder_name), "Program in ExecutorPrepareContext should has feed_ops."); PADDLE_ENFORCE( - has_fetch_operators(global_block, fetch_targets, fetch_holder_name), + has_fetch_operators(global_block, *fetch_targets, fetch_holder_name), "Program in the prepared context should has fetch_ops."); // map the data of feed_targets to feed_holder @@ -378,8 +378,8 @@ void Executor::RunPreparedContext( if (op->Type() == kFeedOpType) { std::string feed_target_name = op->Output("Out")[0]; int idx = boost::get(op->GetAttr("col")); - SetFeedVariable(scope, *feed_targets[feed_target_name], feed_holder_name, - idx); + SetFeedVariable(scope, *(*feed_targets)[feed_target_name], + feed_holder_name, idx); } } @@ -390,7 +390,7 @@ void Executor::RunPreparedContext( if (op->Type() == kFetchOpType) { std::string fetch_target_name = op->Input("X")[0]; int idx = boost::get(op->GetAttr("col")); - *fetch_targets[fetch_target_name] = + *(*fetch_targets)[fetch_target_name] = GetFetchVariable(*scope, fetch_holder_name, idx); } } diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h index 43defdacf2..4a3d637e2d 100644 --- a/paddle/fluid/framework/executor.h +++ b/paddle/fluid/framework/executor.h @@ -55,8 +55,8 @@ class Executor { bool create_local_scope = true, bool create_vars = true); void Run(const ProgramDesc& program, Scope* scope, - std::map& feed_targets, - std::map& fetch_targets, + std::map* feed_targets, + std::map* fetch_targets, bool create_vars = true, const std::string& feed_holder_name = "feed", const std::string& fetch_holder_name = "fetch"); @@ -74,8 +74,8 @@ class Executor { bool create_vars = true); void RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, - std::map& feed_targets, - std::map& fetch_targets, + std::map* feed_targets, + std::map* fetch_targets, bool create_vars = true, const std::string& feed_holder_name = "feed", const std::string& fetch_holder_name = "fetch"); diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index 117472599f..af2a7a5620 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -178,10 +178,10 @@ void TestInference(const std::string& dirname, std::unique_ptr ctx; if (PrepareContext) { ctx = executor.Prepare(*inference_program, 0); - executor.RunPreparedContext(ctx.get(), scope, feed_targets, fetch_targets, - CreateVars); + executor.RunPreparedContext(ctx.get(), scope, &feed_targets, + &fetch_targets, CreateVars); } else { - executor.Run(*inference_program, scope, feed_targets, fetch_targets, + executor.Run(*inference_program, scope, &feed_targets, &fetch_targets, CreateVars); } @@ -197,10 +197,10 @@ void TestInference(const std::string& dirname, if (PrepareContext) { // Note: if you change the inference_program, you need to call // executor.Prepare() again to get a new ExecutorPrepareContext. - executor.RunPreparedContext(ctx.get(), scope, feed_targets, - fetch_targets, CreateVars); + executor.RunPreparedContext(ctx.get(), scope, &feed_targets, + &fetch_targets, CreateVars); } else { - executor.Run(*inference_program, scope, feed_targets, fetch_targets, + executor.Run(*inference_program, scope, &feed_targets, &fetch_targets, CreateVars); } } From b8b2e897cd61a394b99112c02dfcad6e9d747016 Mon Sep 17 00:00:00 2001 From: Lei Wang Date: Wed, 25 Apr 2018 16:53:40 -0700 Subject: [PATCH 38/72] Scripts: refactor paddle bash shell scripts. (#10038) * Scripts: refactor paddle bash shell scripts. * Fix indent. * Modify readme documents. --- paddle/scripts/{docker => }/README.md | 104 ++-- ...paddle-development-environment-gpu.graffle | Bin .../paddle-development-environment-gpu.png | Bin .../paddle-development-environment.graffle | Bin .../doc/paddle-development-environment.png | Bin paddle/scripts/paddle_build.sh | 508 ++++++++++++++++++ paddle/scripts/paddle_docker_build.sh | 89 +++ 7 files changed, 642 insertions(+), 59 deletions(-) rename paddle/scripts/{docker => }/README.md (66%) rename paddle/scripts/{docker => }/doc/paddle-development-environment-gpu.graffle (100%) rename paddle/scripts/{docker => }/doc/paddle-development-environment-gpu.png (100%) rename paddle/scripts/{docker => }/doc/paddle-development-environment.graffle (100%) rename paddle/scripts/{docker => }/doc/paddle-development-environment.png (100%) create mode 100755 paddle/scripts/paddle_build.sh create mode 100755 paddle/scripts/paddle_docker_build.sh diff --git a/paddle/scripts/docker/README.md b/paddle/scripts/README.md similarity index 66% rename from paddle/scripts/docker/README.md rename to paddle/scripts/README.md index 78c0cc3782..9e8b135c1b 100644 --- a/paddle/scripts/docker/README.md +++ b/paddle/scripts/README.md @@ -13,40 +13,49 @@ We want to make the building procedures: 1. Build docker images with PaddlePaddle pre-installed, so that we can run PaddlePaddle applications directly in docker or on Kubernetes clusters. -To achieve this, we created a repo: https://github.com/PaddlePaddle/buildtools -which gives several docker images that are `manylinux1` sufficient. Then we -can build PaddlePaddle using these images to generate corresponding `whl` -binaries. +To achieve this, we maintain a dockerhub repo:https://hub.docker.com/r/paddlepaddle/paddle +which provides pre-built environment images to build PaddlePaddle and generate corresponding `whl` +binaries.(**We strongly recommend building paddlepaddle in our pre-specified Docker environment.**) -## Run The Build +## Development Workflow + +Here we describe how the workflow goes on. We start from considering our daily development environment. + +Developers work on a computer, which is usually a laptop or desktop: + + + +or, they might rely on a more sophisticated box (like with GPUs): + + + +A principle here is that source code lies on the development computer (host) so that editors like Eclipse can parse the source code to support auto-completion. + +## Build With Docker ### Build Environments -The pre-built build environment images are: +The lastest pre-built build environment images are: | Image | Tag | | ----- | --- | -| paddlepaddle/paddle_manylinux_devel | cuda7.5_cudnn5 | -| paddlepaddle/paddle_manylinux_devel | cuda8.0_cudnn5 | -| paddlepaddle/paddle_manylinux_devel | cuda7.5_cudnn7 | -| paddlepaddle/paddle_manylinux_devel | cuda9.0_cudnn7 | +| paddlepaddle/paddle | latest-dev | +| paddlepaddle/paddle | latest-dev-android | ### Start Build -Choose one docker image that suit your environment and run the following -command to start a build: - ```bash git clone https://github.com/PaddlePaddle/Paddle.git cd Paddle -docker run --rm -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TESTING=OFF" -e "RUN_TEST=OFF" -e "PYTHON_ABI=cp27-cp27mu" paddlepaddle/paddle_manylinux_devel /paddle/paddle/scripts/docker/build.sh +./paddle/scripts/paddle_docker_build.sh build ``` After the build finishes, you can get output `whl` package under `build/python/dist`. -This command mounts the source directory on the host into `/paddle` in the container, then run the build script `/paddle/paddle/scripts/docker/build.sh` -in the container. When it writes to `/paddle/build` in the container, it writes to `$PWD/build` on the host indeed. +This command will download the most recent dev image from docker hub, start a container in the backend and then run the build script `/paddle/paddle/scripts/paddle_build.sh build` in the container. +The container mounts the source directory on the host into `/paddle`. +When it writes to `/paddle/build` in the container, it writes to `$PWD/build` on the host indeed. ### Build Options @@ -68,7 +77,6 @@ Users can specify the following Docker build arguments with either "ON" or "OFF" | `WITH_DOC` | OFF | Build docs after build binaries. | | `WOBOQ` | OFF | Generate WOBOQ code viewer under `build/woboq_out` | - ## Docker Images You can get the latest PaddlePaddle docker images by @@ -144,59 +152,37 @@ docker push kubectl ... ``` -## Docker Images for Developers - -We have a special docker image for developers: -`paddlepaddle/paddle:-dev`. This image is also generated from -https://github.com/PaddlePaddle/buildtools - -This a development image contains only the -development tools and standardizes the building procedure. Users include: - -- developers -- no longer need to install development tools on the host, and can build their current work on the host (development computer). -- release engineers -- use this to build the official release from certain branch/tag on Github.com. -- document writers / Website developers -- Our documents are in the source repo in the form of .md/.rst files and comments in source code. We need tools to extract the information, typeset, and generate Web pages. - -Of course, developers can install building tools on their development computers. But different versions of PaddlePaddle might require different set or version of building tools. Also, it makes collaborative debugging easier if all developers use a unified development environment. - -The development image contains the following tools: - - - gcc/clang - - nvcc - - Python - - sphinx - - woboq - - sshd - -Many developers work on a remote computer with GPU; they could ssh into the computer and `docker exec` into the development container. However, running `sshd` in the container allows developers to ssh into the container directly. - - -### Development Workflow - -Here we describe how the workflow goes on. We start from considering our daily development environment. +### Reading source code with woboq codebrowser -Developers work on a computer, which is usually a laptop or desktop: +For developers who are interested in the C++ source code, you can build C++ source code into HTML pages using [Woboq codebrowser](https://github.com/woboq/woboq_codebrowser). - +- The following command builds PaddlePaddle, generates HTML pages from C++ source code, and writes HTML pages into `$HOME/woboq_out` on the host: -or, they might rely on a more sophisticated box (like with GPUs): +```bash +./paddle/scripts/paddle_docker_build.sh html +``` - +- You can open the generated HTML files in your Web browser. Or, if you want to run a Nginx container to serve them for a wider audience, you can run: -A principle here is that source code lies on the development computer (host) so that editors like Eclipse can parse the source code to support auto-completion. +``` +docker run -v $HOME/woboq_out:/usr/share/nginx/html -d -p 8080:80 nginx +``` -### Reading source code with woboq codebrowser +## More Options -For developers who are interested in the C++ source code, please use -e "WOBOQ=ON" to enable the building of C++ source code into HTML pages using [Woboq codebrowser](https://github.com/woboq/woboq_codebrowser). +### Build Without Docker -- The following command builds PaddlePaddle, generates HTML pages from C++ source code, and writes HTML pages into `$HOME/woboq_out` on the host: +Follow the *Dockerfile* in the paddlepaddle repo to set up your local dev environment and run: ```bash -docker run -v $PWD:/paddle -v $HOME/woboq_out:/woboq_out -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TESTING=ON" -e "WOBOQ=ON" paddlepaddle/paddle:latest-dev +./paddle/scripts/paddle_build.sh build ``` -- You can open the generated HTML files in your Web browser. Or, if you want to run a Nginx container to serve them for a wider audience, you can run: +### Additional Tasks -``` -docker run -v $HOME/woboq_out:/usr/share/nginx/html -d -p 8080:80 nginx +You can get the help menu for the build scripts by running with no options: + +```bash +./paddle/scripts/paddle_build.sh +or ./paddle/scripts/paddle_docker_build.sh ``` diff --git a/paddle/scripts/docker/doc/paddle-development-environment-gpu.graffle b/paddle/scripts/doc/paddle-development-environment-gpu.graffle similarity index 100% rename from paddle/scripts/docker/doc/paddle-development-environment-gpu.graffle rename to paddle/scripts/doc/paddle-development-environment-gpu.graffle diff --git a/paddle/scripts/docker/doc/paddle-development-environment-gpu.png b/paddle/scripts/doc/paddle-development-environment-gpu.png similarity index 100% rename from paddle/scripts/docker/doc/paddle-development-environment-gpu.png rename to paddle/scripts/doc/paddle-development-environment-gpu.png diff --git a/paddle/scripts/docker/doc/paddle-development-environment.graffle b/paddle/scripts/doc/paddle-development-environment.graffle similarity index 100% rename from paddle/scripts/docker/doc/paddle-development-environment.graffle rename to paddle/scripts/doc/paddle-development-environment.graffle diff --git a/paddle/scripts/docker/doc/paddle-development-environment.png b/paddle/scripts/doc/paddle-development-environment.png similarity index 100% rename from paddle/scripts/docker/doc/paddle-development-environment.png rename to paddle/scripts/doc/paddle-development-environment.png diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh new file mode 100755 index 0000000000..654c8272a1 --- /dev/null +++ b/paddle/scripts/paddle_build.sh @@ -0,0 +1,508 @@ +#!/usr/bin/env bash + +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#================================================= +# Utils +#================================================= + +function print_usage() { + RED='\033[0;31m' + BLUE='\033[0;34m' + BOLD='\033[1m' + NONE='\033[0m' + + echo -e "\n${RED}Usage${NONE}: + ${BOLD}$0${NONE} [OPTION]" + + echo -e "\n${RED}Options${NONE}: + ${BLUE}build${NONE}: run build for x86 platform + ${BLUE}build_android${NONE}: run build for android platform + ${BLUE}build_ios${NONE}: run build for ios platform + ${BLUE}test${NONE}: run all unit tests + ${BLUE}bind_test${NONE}: parallel tests bind to different GPU + ${BLUE}doc${NONE}: generate paddle documents + ${BLUE}html${NONE}: convert C++ source code into HTML + ${BLUE}dockerfile${NONE}: generate paddle release dockerfile + ${BLUE}capi${NONE}: generate paddle CAPI package + ${BLUE}fluid_inference_lib${NONE}: deploy fluid inference library + ${BLUE}check_style${NONE}: run code style check + " +} + +function init() { + PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../" && pwd )" +} + +function cmake_gen() { + mkdir -p ${PADDLE_ROOT}/build + cd ${PADDLE_ROOT}/build + + # build script will not fail if *.deb does not exist + rm *.deb 2>/dev/null || true + # delete previous built whl packages + rm -rf python/dist 2>/dev/null || true + + # Support build for all python versions, currently + # including cp27-cp27m and cp27-cp27mu. + PYTHON_FLAGS="" + if [ "$1" != "" ]; then + echo "using python abi: $1" + if [ "$1" == "cp27-cp27m" ]; then + export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs4/lib:} + export PATH=/opt/python/cp27-cp27m/bin/:${PATH} + PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27m/bin/python + -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27m/include/python2.7 + -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs2/lib/libpython2.7.so" + elif [ "$1" == "cp27-cp27mu" ]; then + export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs2/lib:} + export PATH=/opt/python/cp27-cp27mu/bin/:${PATH} + PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27mu/bin/python + -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27mu/include/python2.7 + -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs4/lib/libpython2.7.so" + fi + fi + + cat <&2 + echo "Please use pre-commit to check what is wrong." 1>&2 + exit 1 +} + +function check_style() { + trap 'abort' 0 + set -e + + # install glide + curl https://glide.sh/get | bash + eval "$(GIMME_GO_VERSION=1.8.3 gimme)" + + # set up go environment for running gometalinter + mkdir -p $GOPATH/src/github.com/PaddlePaddle/ + ln -sf ${PADDLE_ROOT} $GOPATH/src/github.com/PaddlePaddle/Paddle + cd $GOPATH/src/github.com/PaddlePaddle/Paddle/go; glide install; cd - + + go get github.com/alecthomas/gometalinter + gometalinter --install + + cd ${PADDLE_ROOT} + export PATH=/usr/bin:$PATH + pre-commit install + clang-format --version + + if ! pre-commit run -a ; then + git diff + exit 1 + fi + + trap : 0 +} + +#================================================= +# Build +#================================================= + +function build() { + mkdir -p ${PADDLE_ROOT}/build + cd ${PADDLE_ROOT}/build + cat <= 21." + ANDROID_API=21 + fi + else # armeabi, armeabi-v7a + ANDROID_ARCH=arm + fi + + ANDROID_STANDALONE_TOOLCHAIN=$ANDROID_TOOLCHAINS_DIR/$ANDROID_ARCH-android-$ANDROID_API + + cat < ${PADDLE_ROOT}/build/Dockerfile < + ENV HOME /root +EOF + + if [[ ${WITH_GPU} == "ON" ]]; then + NCCL_DEPS="apt-get install -y libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 &&" + else + NCCL_DEPS="" + fi + + if [[ ${WITH_FLUID_ONLY:-OFF} == "OFF" ]]; then + PADDLE_VERSION="paddle version" + CMD='"paddle", "version"' + else + PADDLE_VERSION="true" + CMD='"true"' + fi + + cat >> /paddle/build/Dockerfile < /dev/null + return $? +} + +function start_build_docker() { + docker pull $IMG + + if container_running "${CONTAINER_ID}"; then + docker stop "${CONTAINER_ID}" 1>/dev/null + docker rm -f "${CONTAINER_ID}" 1>/dev/null + fi + + DOCKER_ENV=$(cat < Date: Wed, 25 Apr 2018 17:34:36 -0700 Subject: [PATCH 39/72] Scripts: fix syntax error. --- paddle/scripts/paddle_docker_build.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/scripts/paddle_docker_build.sh b/paddle/scripts/paddle_docker_build.sh index 53df94980f..252227ef88 100755 --- a/paddle/scripts/paddle_docker_build.sh +++ b/paddle/scripts/paddle_docker_build.sh @@ -75,6 +75,7 @@ function main() { build_android) start_build_docker docker exec ${CONTAINER_ID} bash -c "./paddle/scripts/paddle_build.sh $@" + ;; *) if container_running "${CONTAINER_ID}"; then docker exec ${CONTAINER_ID} bash -c "./paddle/scripts/paddle_build.sh $@" From 31ad00395ad6b3c0fcc38cb9700fc0cf13e88b60 Mon Sep 17 00:00:00 2001 From: weixing02 Date: Thu, 26 Apr 2018 10:09:57 +0800 Subject: [PATCH 40/72] Change paddle.v2.dataset to paddle.dataset for V2 docs --- doc/v2/api/data/dataset.rst | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/doc/v2/api/data/dataset.rst b/doc/v2/api/data/dataset.rst index 02e41564b1..e7c8be4452 100644 --- a/doc/v2/api/data/dataset.rst +++ b/doc/v2/api/data/dataset.rst @@ -1,82 +1,82 @@ Dataset ======= -.. automodule:: paddle.v2.dataset +.. automodule:: paddle.dataset :members: :noindex: mnist +++++ -.. automodule:: paddle.v2.dataset.mnist +.. automodule:: paddle.dataset.mnist :members: :noindex: cifar +++++ -.. automodule:: paddle.v2.dataset.cifar +.. automodule:: paddle.dataset.cifar :members: :noindex: conll05 +++++++ -.. automodule:: paddle.v2.dataset.conll05 +.. automodule:: paddle.dataset.conll05 :members: get_dict,get_embedding,test :noindex: imdb ++++ -.. automodule:: paddle.v2.dataset.imdb +.. automodule:: paddle.dataset.imdb :members: :noindex: imikolov ++++++++ -.. automodule:: paddle.v2.dataset.imikolov +.. automodule:: paddle.dataset.imikolov :members: :noindex: movielens +++++++++ -.. automodule:: paddle.v2.dataset.movielens +.. automodule:: paddle.dataset.movielens :members: :noindex: -.. autoclass:: paddle.v2.dataset.movielens.MovieInfo +.. autoclass:: paddle.dataset.movielens.MovieInfo :noindex: - -.. autoclass:: paddle.v2.dataset.movielens.UserInfo + +.. autoclass:: paddle.dataset.movielens.UserInfo :noindex: sentiment +++++++++ -.. automodule:: paddle.v2.dataset.sentiment +.. automodule:: paddle.dataset.sentiment :members: :noindex: uci_housing +++++++++++ -.. automodule:: paddle.v2.dataset.uci_housing +.. automodule:: paddle.dataset.uci_housing :members: :noindex: wmt14 +++++ -.. automodule:: paddle.v2.dataset.wmt14 +.. automodule:: paddle.dataset.wmt14 :members: :noindex: wmt16 +++++ -.. automodule:: paddle.v2.dataset.wmt16 +.. automodule:: paddle.dataset.wmt16 :members: :noindex: From 304b6b71389e5bc035e6c04c6fdb76d2717f3588 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 26 Apr 2018 11:11:25 +0800 Subject: [PATCH 41/72] Follow comments --- .../fluid/operators/reader/blocking_queue.h | 37 +++++++++---------- .../reader/create_double_buffer_reader_op.cc | 17 ++------- .../fluid/operators/reader/open_files_op.cc | 12 +----- 3 files changed, 23 insertions(+), 43 deletions(-) diff --git a/paddle/fluid/operators/reader/blocking_queue.h b/paddle/fluid/operators/reader/blocking_queue.h index 5d0c27d30c..71684b1417 100644 --- a/paddle/fluid/operators/reader/blocking_queue.h +++ b/paddle/fluid/operators/reader/blocking_queue.h @@ -25,6 +25,11 @@ namespace reader { template class BlockingQueue { + // BlockingQueue is for buffered reading and is supposed to use only the + // reader package. It is true that we could and we should have been using + // framework::Channel, but which has currently a deadlock bug. BlockingQueue + // is a workaround and a simplified version of framework::Channel as it + // doesn't support GPU and it implements on buffered blocking queue. public: explicit BlockingQueue(size_t capacity) : capacity_(capacity), closed_(false) { @@ -37,26 +42,28 @@ class BlockingQueue { std::unique_lock lock(mutex_); send_cv_.wait(lock, [&] { return queue_.size() < capacity_ || closed_; }); if (closed_) { + VLOG(5) + << "WARNING: Sending an element to a closed reader::BlokcingQueue."; return false; - } else { - PADDLE_ENFORCE_LT(queue_.size(), capacity_); - queue_.push_back(elem); - receive_cv_.notify_one(); - return true; } + PADDLE_ENFORCE_LT(queue_.size(), capacity_); + queue_.push_back(elem); + receive_cv_.notify_one(); + return true; } bool Send(T&& elem) { std::unique_lock lock(mutex_); send_cv_.wait(lock, [&] { return queue_.size() < capacity_ || closed_; }); if (closed_) { + VLOG(5) + << "WARNING: Sending an element to a closed reader::BlokcingQueue."; return false; - } else { - PADDLE_ENFORCE_LT(queue_.size(), capacity_); - queue_.emplace_back(std::move(elem)); - receive_cv_.notify_one(); - return true; } + PADDLE_ENFORCE_LT(queue_.size(), capacity_); + queue_.emplace_back(std::move(elem)); + receive_cv_.notify_one(); + return true; } bool Receive(T* elem) { @@ -86,16 +93,6 @@ class BlockingQueue { return closed_; } - bool CanSend() { - std::lock_guard lock(mutex_); - return !closed_ && queue_.size() < capacity_; - } - - bool CanReceive() { - std::lock_guard lock(mutex_); - return !queue_.empty(); - } - size_t Cap() { std::lock_guard lock(mutex_); return capacity_; diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index 504e069b65..3fdc31dfa5 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -55,8 +55,6 @@ class DoubleBufferReader : public framework::DecoratedReader { ~DoubleBufferReader() { EndPrefetcher(); } private: - bool HasNext() const; - void StartPrefetcher() { channel_ = new reader::BlockingQueue(kChannelSize); prefetcher_ = std::thread([this] { PrefetchThreadFunc(); }); @@ -139,17 +137,16 @@ class CreateDoubleBufferReaderOpMaker : public DecoratedReaderMakerBase { }; void DoubleBufferReader::ReadNext(std::vector* out) { - out->clear(); - if (HasNext()) { - size_t cached_tensor_id; - channel_->Receive(&cached_tensor_id); + size_t cached_tensor_id; + if (channel_->Receive(&cached_tensor_id)) { if (platform::is_gpu_place(place_)) { *out = gpu_tensor_cache_[cached_tensor_id]; - ctxs_[cached_tensor_id]->Wait(); } else { // CPU place *out = cpu_tensor_cache_[cached_tensor_id]; } + } else { + out->clear(); } } @@ -159,12 +156,6 @@ void DoubleBufferReader::ReInit() { StartPrefetcher(); } -bool DoubleBufferReader::HasNext() const { - while (!channel_->IsClosed() && !channel_->CanReceive()) { - } - return channel_->CanReceive(); -} - void DoubleBufferReader::PrefetchThreadFunc() { VLOG(5) << "A new prefetch thread starts."; size_t cached_tensor_id = 0; diff --git a/paddle/fluid/operators/reader/open_files_op.cc b/paddle/fluid/operators/reader/open_files_op.cc index 461b56c7a4..91ad7d5658 100644 --- a/paddle/fluid/operators/reader/open_files_op.cc +++ b/paddle/fluid/operators/reader/open_files_op.cc @@ -37,7 +37,6 @@ class MultiFileReader : public framework::ReaderBase { ~MultiFileReader() { EndScheduler(); } private: - bool HasNext(); void StartNewScheduler(); void EndScheduler(); void ScheduleThreadFunc(); @@ -54,9 +53,8 @@ class MultiFileReader : public framework::ReaderBase { }; void MultiFileReader::ReadNext(std::vector* out) { - out->clear(); - if (HasNext()) { - buffer_->Receive(out); + if (!buffer_->Receive(out)) { + out->clear(); } } @@ -65,12 +63,6 @@ void MultiFileReader::ReInit() { StartNewScheduler(); } -bool MultiFileReader::HasNext() { - while (!buffer_->IsClosed() && !buffer_->CanReceive()) { - } - return buffer_->CanReceive(); -} - void MultiFileReader::StartNewScheduler() { size_t thread_num = prefetchers_.size(); waiting_file_idx_ = new reader::BlockingQueue(file_names_.size()); From 17c51d69d12c09cf0af3baec2a8ebe3c9ec1de93 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 26 Apr 2018 11:26:25 +0800 Subject: [PATCH 42/72] fix unit test error --- .../operators/reader/reader_blocking_queue_test.cc | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/operators/reader/reader_blocking_queue_test.cc b/paddle/fluid/operators/reader/reader_blocking_queue_test.cc index 07c22fe2bf..c0229f53f2 100644 --- a/paddle/fluid/operators/reader/reader_blocking_queue_test.cc +++ b/paddle/fluid/operators/reader/reader_blocking_queue_test.cc @@ -28,13 +28,6 @@ TEST(BlockingQueue, CapacityTest) { EXPECT_EQ(q.Cap(), cap); } -TEST(BlockingQueue, CanSendTest) { - size_t cap = 1; - BlockingQueue q(cap); - q.Send(1); - EXPECT_FALSE(q.CanSend()); -} - void FirstInFirstOut(size_t queue_cap, size_t elem_num, size_t send_time_gap, size_t receive_time_gap) { BlockingQueue q(queue_cap); @@ -83,9 +76,11 @@ TEST(BlockingQueue, SenderBlockingTest) { sender.join(); EXPECT_EQ(send_count, queue_cap); std::vector res; - while (q.CanReceive()) { + while (true) { size_t elem; - EXPECT_TRUE(q.Receive(&elem)); + if (!q.Receive(&elem) { + break; + } res.push_back(elem); } EXPECT_EQ(res.size(), queue_cap); From 8bd34664f12c61178a82dd1be535f4ae798a4540 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 26 Apr 2018 11:27:17 +0800 Subject: [PATCH 43/72] fix unit test error --- paddle/fluid/operators/reader/reader_blocking_queue_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/operators/reader/reader_blocking_queue_test.cc b/paddle/fluid/operators/reader/reader_blocking_queue_test.cc index c0229f53f2..7d1b381d56 100644 --- a/paddle/fluid/operators/reader/reader_blocking_queue_test.cc +++ b/paddle/fluid/operators/reader/reader_blocking_queue_test.cc @@ -78,7 +78,7 @@ TEST(BlockingQueue, SenderBlockingTest) { std::vector res; while (true) { size_t elem; - if (!q.Receive(&elem) { + if (!q.Receive(&elem)) { break; } res.push_back(elem); From 63bd38bd74bc623eff75bd909d57981c538444c7 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 26 Apr 2018 12:25:17 +0800 Subject: [PATCH 44/72] code optimize --- .../operators/detail/variable_response.h | 4 +++- paddle/fluid/operators/listen_and_serv_op.cc | 21 ++++++++++--------- paddle/fluid/operators/send_recv_op_test.cc | 2 +- python/paddle/fluid/distribute_transpiler.py | 5 ++++- 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/paddle/fluid/operators/detail/variable_response.h b/paddle/fluid/operators/detail/variable_response.h index 8c05e60cec..bf624da2a6 100644 --- a/paddle/fluid/operators/detail/variable_response.h +++ b/paddle/fluid/operators/detail/variable_response.h @@ -63,7 +63,9 @@ class VariableResponse { // other: number of error field. int Parse(const ::grpc::ByteBuffer& byte_buffer); - framework::Scope& GetLocalScope() const { return *local_scope_; } + const framework::Scope& GetLocalScope() const { return *local_scope_; } + + framework::Scope* GetMutableLocalScope() const { return local_scope_; } inline std::string Varname() { return meta_.varname(); } inline std::string OutVarname() { return meta_.out_varname(); } diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 3ee642dedb..ba2ea0d13e 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -207,18 +207,19 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, framework::BlockDesc *prefetch_block) const { VLOG(3) << "RunAsyncLoop in"; // grad name to block id - std::unordered_map grad_to_id; + std::unordered_map grad_to_block_id; std::unordered_map id_to_grad; - auto grad_to_id_str = Attr>("grad_to_id"); - for (auto &grad_and_id : grad_to_id_str) { + auto grad_to_block_id_str = + Attr>("grad_to_block_id"); + for (auto &grad_and_id : grad_to_block_id_str) { std::vector pieces; split(grad_and_id, ':', &pieces); VLOG(3) << "after split, grad = " << pieces[0] << ", id=" << pieces[1]; PADDLE_ENFORCE_EQ(pieces.size(), 2); - PADDLE_ENFORCE_EQ(grad_to_id.count(pieces[0]), 0); + PADDLE_ENFORCE_EQ(grad_to_block_id.count(pieces[0]), 0); int block_id = std::stoi(pieces[1]); - grad_to_id[pieces[0]] = block_id; + grad_to_block_id[pieces[0]] = block_id; id_to_grad[block_id] = pieces[0]; } size_t num_blocks = program->Size(); @@ -232,9 +233,9 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, auto optimize_prepared = executor->Prepare(*program, block_list); std::unordered_map> - grad_to_prepared; + grad_to_prepared_block; for (size_t i = 0; i < block_list.size(); ++i) { - grad_to_prepared[id_to_grad[block_list[i]]] = optimize_prepared[i]; + grad_to_prepared_block[id_to_grad[block_list[i]]] = optimize_prepared[i]; } VLOG(3) << "RunAsyncLoop into while"; @@ -253,8 +254,8 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, LOG(ERROR) << "Can not find server side var: " << recv_var_name; PADDLE_THROW("Can not find server side var"); } - AsyncExecuteBlock(executor, grad_to_prepared[recv_var_name].get(), - &(v.second->GetLocalScope())); + AsyncExecuteBlock(executor, grad_to_prepared_block[recv_var_name].get(), + v.second->GetMutableLocalScope()); // TODO(qiao): explain why if (var->IsType()) { var->GetMutable()->mutable_rows()->clear(); @@ -328,7 +329,7 @@ from send_op and send back variables to recv_op. .SetDefault("127.0.0.1:6164") .AddCustomChecker([](const std::string &ip) { return !ip.empty(); }); AddAttr>( - "grad_to_id", + "grad_to_block_id", "['param1@GRAD.block0:1', 'param2@GRAD.blockn:2'] " "a map from grad name to it's optimize block id") .SetDefault({}); diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 72dc1586c6..d2e1f3cb2f 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -137,7 +137,7 @@ void StartServerNet(bool is_sparse) { attrs.insert({"GradList", std::vector({"x1"})}); attrs.insert({"OptimizeBlock", optimize_block}); attrs.insert({"PrefetchBlock", prefetch_block}); - attrs.insert({"grad_to_id", std::vector({""})}); + attrs.insert({"grad_to_block_id", std::vector({""})}); attrs.insert({"sync_mode", true}); listen_and_serv_op = f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index 3a3a94640a..d17475cd28 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -185,6 +185,9 @@ class DistributeTranspiler: :param split_method: A function to determin how to split variables to different servers equally. :type split_method: function + :type sync_mode: boolean default True + :param sync_mode: if sync_mode is set True, it means that dist transpiler + will transpile the program into sync_mode pserver and trainer program. """ assert (callable(split_method)) if program is None: @@ -479,7 +482,7 @@ class DistributeTranspiler: "Fanin": self.trainer_num, "PrefetchBlock": prefetch_block, "sync_mode": self.sync_mode, - "grad_to_id": grad_to_block_id + "grad_to_block_id": grad_to_block_id }) pserver_program.sync_with_cpp() From 848fb002153fbd66e88c6d63f8074ebb7be8e3b3 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Thu, 26 Apr 2018 12:45:42 +0800 Subject: [PATCH 45/72] Fix comments. --- paddle/fluid/inference/io.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 5b8dec199d..65db7c7b50 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -22,8 +22,8 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/pybind/pybind.h" -DEFINE_string(devices, "", "The devices to be used."); -DEFINE_bool(init_p2p, true, "Whether to init p2p."); +DEFINE_string(devices, "", "The devices to be used which is joined by comma."); +DEFINE_bool(init_p2p, false, "Whether to init p2p."); namespace paddle { namespace inference { From 20858f98d5894ab65fa0dcc4f8d31f5c8a66fbe9 Mon Sep 17 00:00:00 2001 From: kolinwei <331911734@qq.com> Date: Thu, 26 Apr 2018 12:52:01 +0800 Subject: [PATCH 46/72] update benchmark/fluid fix some import error (#10133) * Update stacked_dynamic_lstm.py * Update machine_translation.py * Update mnist.py * Update resnet.py * Update vgg.py --- benchmark/fluid/machine_translation.py | 2 +- benchmark/fluid/mnist.py | 2 +- benchmark/fluid/resnet.py | 2 +- benchmark/fluid/stacked_dynamic_lstm.py | 6 +++--- benchmark/fluid/vgg.py | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/benchmark/fluid/machine_translation.py b/benchmark/fluid/machine_translation.py index d7a421c109..adde5f21ac 100644 --- a/benchmark/fluid/machine_translation.py +++ b/benchmark/fluid/machine_translation.py @@ -21,7 +21,7 @@ import argparse import time import distutils.util -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.framework as framework diff --git a/benchmark/fluid/mnist.py b/benchmark/fluid/mnist.py index dc10ac2ec1..1e2185dfac 100644 --- a/benchmark/fluid/mnist.py +++ b/benchmark/fluid/mnist.py @@ -20,7 +20,7 @@ import numpy as np import argparse import time -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.profiler as profiler diff --git a/benchmark/fluid/resnet.py b/benchmark/fluid/resnet.py index 1af5eaf6b4..831fa2c019 100644 --- a/benchmark/fluid/resnet.py +++ b/benchmark/fluid/resnet.py @@ -23,7 +23,7 @@ import time import cProfile, pstats, StringIO -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.profiler as profiler diff --git a/benchmark/fluid/stacked_dynamic_lstm.py b/benchmark/fluid/stacked_dynamic_lstm.py index 5fcbdd64af..73bcc47b4d 100644 --- a/benchmark/fluid/stacked_dynamic_lstm.py +++ b/benchmark/fluid/stacked_dynamic_lstm.py @@ -23,10 +23,10 @@ import random import time import numpy -import paddle.v2 as paddle -import paddle.v2.dataset.imdb as imdb +import paddle +import paddle.dataset.imdb as imdb import paddle.fluid as fluid -from paddle.v2 import batch +import paddle.batch as batch import paddle.fluid.profiler as profiler diff --git a/benchmark/fluid/vgg.py b/benchmark/fluid/vgg.py index 9d990eff62..53e34e0cbd 100644 --- a/benchmark/fluid/vgg.py +++ b/benchmark/fluid/vgg.py @@ -17,7 +17,7 @@ from __future__ import print_function import sys import time import numpy as np -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import argparse From 46342a2306d1ea8daac6aaddad895c4be1746cc8 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 26 Apr 2018 13:51:26 +0800 Subject: [PATCH 47/72] delete useless code --- paddle/fluid/operators/listen_and_serv_op.cc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index ba2ea0d13e..4c0da18ba8 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -256,10 +256,6 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, } AsyncExecuteBlock(executor, grad_to_prepared_block[recv_var_name].get(), v.second->GetMutableLocalScope()); - // TODO(qiao): explain why - if (var->IsType()) { - var->GetMutable()->mutable_rows()->clear(); - } } if (exit_flag) { From 3295f31076bd0acc97b3bfc599bf88ccb8e20f9c Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 26 Apr 2018 14:43:34 +0800 Subject: [PATCH 48/72] optimize naming --- paddle/fluid/operators/listen_and_serv_op.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 4c0da18ba8..57cff680ab 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -233,9 +233,9 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, auto optimize_prepared = executor->Prepare(*program, block_list); std::unordered_map> - grad_to_prepared_block; + grad_to_prepared_ctx; for (size_t i = 0; i < block_list.size(); ++i) { - grad_to_prepared_block[id_to_grad[block_list[i]]] = optimize_prepared[i]; + grad_to_prepared_ctx[id_to_grad[block_list[i]]] = optimize_prepared[i]; } VLOG(3) << "RunAsyncLoop into while"; @@ -254,7 +254,7 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, LOG(ERROR) << "Can not find server side var: " << recv_var_name; PADDLE_THROW("Can not find server side var"); } - AsyncExecuteBlock(executor, grad_to_prepared_block[recv_var_name].get(), + AsyncExecuteBlock(executor, grad_to_prepared_ctx[recv_var_name].get(), v.second->GetMutableLocalScope()); } From f457d5da06e09c4b00dcd2a6e70f3fff6f03eb74 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Thu, 26 Apr 2018 00:09:04 -0700 Subject: [PATCH 49/72] Fix more CPPLint errors (#10218) * Fix more CPPLint issues * Fix more CPPLint issues * Fix more CPPLint issues * Fix CPPLint issues in operators/math and operators/reader --- paddle/fluid/inference/tensorrt/engine.cc | 1 + paddle/fluid/operators/math/depthwise_conv.cu | 1 + paddle/fluid/operators/math/depthwise_conv.h | 1 + .../math/detail/activation_functions.h | 1 + paddle/fluid/operators/math/im2col_test.cc | 1 + paddle/fluid/operators/math/matmul.h | 2 + paddle/fluid/operators/math/sampler.cc | 2 +- .../math/selected_rows_functor_test.cc | 99 ++++++++++++------- .../fluid/operators/math/sequence_pooling.cc | 1 + paddle/fluid/operators/math/vol2col.cc | 1 + paddle/fluid/operators/math/vol2col.cu | 2 + paddle/fluid/operators/math/vol2col_test.cc | 1 + .../operators/reader/reader_op_registry.cc | 4 +- 13 files changed, 79 insertions(+), 38 deletions(-) diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index 276502e499..03a25f8e8b 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include +#include #include "paddle/fluid/inference/tensorrt/helper.h" #include "paddle/fluid/platform/enforce.h" diff --git a/paddle/fluid/operators/math/depthwise_conv.cu b/paddle/fluid/operators/math/depthwise_conv.cu index a5e6e4031b..d360728484 100644 --- a/paddle/fluid/operators/math/depthwise_conv.cu +++ b/paddle/fluid/operators/math/depthwise_conv.cu @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "paddle/fluid/operators/math/depthwise_conv.h" #include "paddle/fluid/platform/cuda_helper.h" diff --git a/paddle/fluid/operators/math/depthwise_conv.h b/paddle/fluid/operators/math/depthwise_conv.h index 081bda891d..97aec40188 100644 --- a/paddle/fluid/operators/math/depthwise_conv.h +++ b/paddle/fluid/operators/math/depthwise_conv.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/hostdevice.h" diff --git a/paddle/fluid/operators/math/detail/activation_functions.h b/paddle/fluid/operators/math/detail/activation_functions.h index d205ebf210..b127fbe8c8 100644 --- a/paddle/fluid/operators/math/detail/activation_functions.h +++ b/paddle/fluid/operators/math/detail/activation_functions.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once #include +#include #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/hostdevice.h" diff --git a/paddle/fluid/operators/math/im2col_test.cc b/paddle/fluid/operators/math/im2col_test.cc index b3978536bc..b497c2a661 100644 --- a/paddle/fluid/operators/math/im2col_test.cc +++ b/paddle/fluid/operators/math/im2col_test.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/im2col.h" #include +#include template void testIm2col() { diff --git a/paddle/fluid/operators/math/matmul.h b/paddle/fluid/operators/math/matmul.h index 6e2d35cd0f..0006c5062f 100644 --- a/paddle/fluid/operators/math/matmul.h +++ b/paddle/fluid/operators/math/matmul.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/operators/math/math_function.h" namespace paddle { diff --git a/paddle/fluid/operators/math/sampler.cc b/paddle/fluid/operators/math/sampler.cc index 3ec6538d7f..3066dc0ba2 100644 --- a/paddle/fluid/operators/math/sampler.cc +++ b/paddle/fluid/operators/math/sampler.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "sampler.h" +#include "paddle/fluid/operators/math/sampler.h" namespace paddle { namespace random { diff --git a/paddle/fluid/operators/math/selected_rows_functor_test.cc b/paddle/fluid/operators/math/selected_rows_functor_test.cc index 679b6568ad..70bed820ee 100644 --- a/paddle/fluid/operators/math/selected_rows_functor_test.cc +++ b/paddle/fluid/operators/math/selected_rows_functor_test.cc @@ -13,41 +13,50 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/selected_rows_functor.h" +#include #include "gtest/gtest.h" #include "paddle/fluid/operators/math/math_function.h" TEST(selected_rows_functor, cpu_add) { - using namespace paddle::framework; - using namespace paddle::platform; - using namespace paddle::operators::math; - - CPUPlace cpu_place; - CPUDeviceContext ctx(cpu_place); - SetConstant functor; + paddle::platform::CPUPlace cpu_place; + paddle::platform::CPUDeviceContext ctx(cpu_place); + paddle::operators::math::SetConstant + functor; int64_t height = 10; int64_t row_numel = 10; std::vector rows1{0, 4, 7}; - std::unique_ptr selected_rows1{new SelectedRows(rows1, height)}; + std::unique_ptr selected_rows1{ + new paddle::framework::SelectedRows(rows1, height)}; auto* in1_value = selected_rows1->mutable_value(); in1_value->mutable_data( - make_ddim({static_cast(rows1.size()), row_numel}), cpu_place); + paddle::framework::make_ddim( + {static_cast(rows1.size()), row_numel}), + cpu_place); functor(ctx, in1_value, 1.0); std::vector rows2{0, 5, 7, 9}; - std::unique_ptr selected_rows2{new SelectedRows(rows2, height)}; + std::unique_ptr selected_rows2{ + new paddle::framework::SelectedRows(rows2, height)}; auto* in2_value = selected_rows2->mutable_value(); in2_value->mutable_data( - make_ddim({static_cast(rows2.size()), row_numel}), cpu_place); + paddle::framework::make_ddim( + {static_cast(rows2.size()), row_numel}), + cpu_place); functor(ctx, in2_value, 2.0); - std::unique_ptr output{new SelectedRows()}; + std::unique_ptr output{ + new paddle::framework::SelectedRows()}; auto* out_value = output->mutable_value(); // simplely concat two SelectedRows - out_value->mutable_data(make_ddim({7, 10}), cpu_place); + out_value->mutable_data(paddle::framework::make_ddim({7, 10}), + cpu_place); - SelectedRowsAdd add_functor; + paddle::operators::math::SelectedRowsAdd + add_functor; add_functor(ctx, *selected_rows1, *selected_rows2, output.get()); auto out_height = output->height(); @@ -78,14 +87,20 @@ TEST(selected_rows_functor, cpu_add) { EXPECT_EQ(out_data[5 * row_numel + 7], 2.0); EXPECT_EQ(out_data[6 * row_numel + 9], 2.0); - std::unique_ptr tensor1{new Tensor()}; - tensor1->mutable_data(make_ddim({height, row_numel}), cpu_place); + std::unique_ptr tensor1{ + new paddle::framework::Tensor()}; + tensor1->mutable_data( + paddle::framework::make_ddim({height, row_numel}), cpu_place); functor(ctx, tensor1.get(), 3.0); - std::unique_ptr tensor2{new Tensor()}; - tensor2->mutable_data(make_ddim({height, row_numel}), cpu_place); + std::unique_ptr tensor2{ + new paddle::framework::Tensor()}; + tensor2->mutable_data( + paddle::framework::make_ddim({height, row_numel}), cpu_place); - SelectedRowsAddTensor add_tensor_functor; + paddle::operators::math::SelectedRowsAddTensor< + paddle::platform::CPUDeviceContext, float> + add_tensor_functor; add_tensor_functor(ctx, *output, *tensor1, tensor2.get()); auto* tensor2_data = tensor2->data(); @@ -106,38 +121,46 @@ TEST(selected_rows_functor, cpu_add) { } TEST(selected_rows_functor, cpu_add_to) { - using namespace paddle::framework; - using namespace paddle::platform; - using namespace paddle::operators::math; - - CPUPlace cpu_place; - CPUDeviceContext ctx(cpu_place); - SetConstant functor; + paddle::platform::CPUPlace cpu_place; + paddle::platform::CPUDeviceContext ctx(cpu_place); + paddle::operators::math::SetConstant + functor; int64_t height = 10; int64_t row_numel = 10; std::vector rows1{0, 4, 7}; - std::unique_ptr selected_rows1{new SelectedRows(rows1, height)}; + std::unique_ptr selected_rows1{ + new paddle::framework::SelectedRows(rows1, height)}; auto* in1_value = selected_rows1->mutable_value(); in1_value->mutable_data( - make_ddim({static_cast(rows1.size()), row_numel}), cpu_place); + paddle::framework::make_ddim( + {static_cast(rows1.size()), row_numel}), + cpu_place); functor(ctx, in1_value, 1.0); std::vector rows2{0, 5, 7, 9}; - std::unique_ptr selected_rows2{new SelectedRows(rows2, height)}; + std::unique_ptr selected_rows2{ + new paddle::framework::SelectedRows(rows2, height)}; auto* in2_value = selected_rows2->mutable_value(); in2_value->mutable_data( - make_ddim({static_cast(rows2.size()), row_numel}), cpu_place); + paddle::framework::make_ddim( + {static_cast(rows2.size()), row_numel}), + cpu_place); functor(ctx, in2_value, 2.0); - std::unique_ptr output{new SelectedRows()}; + std::unique_ptr output{ + new paddle::framework::SelectedRows()}; output->set_height(height); auto* out_value = output->mutable_value(); // simplely concat two SelectedRows - out_value->mutable_data(make_ddim({7, 10}), cpu_place); + out_value->mutable_data(paddle::framework::make_ddim({7, 10}), + cpu_place); - SelectedRowsAddTo add_to_functor; + paddle::operators::math::SelectedRowsAddTo + add_to_functor; add_to_functor(ctx, *selected_rows1, 0, output.get()); add_to_functor(ctx, *selected_rows2, in1_value->numel(), output.get()); @@ -169,11 +192,15 @@ TEST(selected_rows_functor, cpu_add_to) { EXPECT_EQ(out_data[5 * row_numel + 7], 2.0); EXPECT_EQ(out_data[6 * row_numel + 9], 2.0); - std::unique_ptr tensor1{new Tensor()}; - tensor1->mutable_data(make_ddim({height, row_numel}), cpu_place); + std::unique_ptr tensor1{ + new paddle::framework::Tensor()}; + tensor1->mutable_data( + paddle::framework::make_ddim({height, row_numel}), cpu_place); functor(ctx, tensor1.get(), 3.0); - SelectedRowsAddToTensor add_to_tensor_functor; + paddle::operators::math::SelectedRowsAddToTensor< + paddle::platform::CPUDeviceContext, float> + add_to_tensor_functor; add_to_tensor_functor(ctx, *output, tensor1.get()); auto* tensor1_data = tensor1->data(); diff --git a/paddle/fluid/operators/math/sequence_pooling.cc b/paddle/fluid/operators/math/sequence_pooling.cc index 5ae42ab973..f25d3d3f1e 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cc +++ b/paddle/fluid/operators/math/sequence_pooling.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/sequence_pooling.h" +#include #include "paddle/fluid/operators/math/math_function.h" namespace paddle { diff --git a/paddle/fluid/operators/math/vol2col.cc b/paddle/fluid/operators/math/vol2col.cc index 09e9f85cca..e92adc09ba 100644 --- a/paddle/fluid/operators/math/vol2col.cc +++ b/paddle/fluid/operators/math/vol2col.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/vol2col.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/vol2col.cu b/paddle/fluid/operators/math/vol2col.cu index 619730d394..e0f3ef3687 100644 --- a/paddle/fluid/operators/math/vol2col.cu +++ b/paddle/fluid/operators/math/vol2col.cu @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include #include "paddle/fluid/operators/math/vol2col.h" #include "paddle/fluid/platform/cuda_helper.h" diff --git a/paddle/fluid/operators/math/vol2col_test.cc b/paddle/fluid/operators/math/vol2col_test.cc index eb91f862e3..b4e4186c98 100644 --- a/paddle/fluid/operators/math/vol2col_test.cc +++ b/paddle/fluid/operators/math/vol2col_test.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/vol2col.h" #include #include +#include template void testVol2col() { diff --git a/paddle/fluid/operators/reader/reader_op_registry.cc b/paddle/fluid/operators/reader/reader_op_registry.cc index fc8dc747ff..3ff4536819 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.cc +++ b/paddle/fluid/operators/reader/reader_op_registry.cc @@ -12,7 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "reader_op_registry.h" +#include "paddle/fluid/operators/reader/reader_op_registry.h" +#include +#include namespace paddle { namespace operators { From bcf260e1e8b94de533775e65dd1d55393bbda16b Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 26 Apr 2018 09:29:20 +0000 Subject: [PATCH 50/72] fix several unit tests --- paddle/fluid/operators/fetch_op.cc | 3 +- paddle/fluid/operators/math/concat_test.cc | 24 +++---- paddle/fluid/operators/math/im2col_test.cc | 16 ++--- .../operators/math/math_function_test.cu | 62 +++++++++---------- paddle/fluid/operators/math/vol2col_test.cc | 9 +-- paddle/fluid/operators/nccl_op_test.cu.cc | 6 +- 6 files changed, 60 insertions(+), 60 deletions(-) diff --git a/paddle/fluid/operators/fetch_op.cc b/paddle/fluid/operators/fetch_op.cc index 7c7f3e9059..462d518120 100644 --- a/paddle/fluid/operators/fetch_op.cc +++ b/paddle/fluid/operators/fetch_op.cc @@ -59,8 +59,7 @@ class FetchOp : public framework::OperatorBase { // CPU outputs? auto &dev_ctx = *pool.Get(src_item.place()); - TensorCopy(src_item, platform::CPUPlace(), dev_ctx, &dst_item); - dev_ctx.Wait(); + TensorCopy(src_item, platform::CPUPlace(), dev_ctx, &dst_item, true); dst_item.set_lod(src_item.lod()); VLOG(3) << "Fetch variable " << fetch_var_name << " to " << out_name; diff --git a/paddle/fluid/operators/math/concat_test.cc b/paddle/fluid/operators/math/concat_test.cc index 1741af8148..854a8ee442 100644 --- a/paddle/fluid/operators/math/concat_test.cc +++ b/paddle/fluid/operators/math/concat_test.cc @@ -72,8 +72,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a); - TensorCopy(input_b_cpu, Place(), *context, &input_b); + TensorCopy(input_a_cpu, Place(), *context, &input_a, true); + TensorCopy(input_b_cpu, Place(), *context, &input_b, true); } std::vector input; @@ -89,7 +89,7 @@ void testConcat() { int* out_ptr; if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu); + TensorCopy(out, CPUPlace(), *context, &out_cpu, true); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -144,8 +144,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a); - TensorCopy(input_b_cpu, Place(), *context, &input_b); + TensorCopy(input_a_cpu, Place(), *context, &input_a, true); + TensorCopy(input_b_cpu, Place(), *context, &input_b, true); } input.clear(); @@ -159,7 +159,7 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu); + TensorCopy(out, CPUPlace(), *context, &out_cpu, true); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -216,8 +216,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a); - TensorCopy(input_b_cpu, Place(), *context, &input_b); + TensorCopy(input_a_cpu, Place(), *context, &input_a, true); + TensorCopy(input_b_cpu, Place(), *context, &input_b, true); } input.clear(); @@ -231,7 +231,7 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu); + TensorCopy(out, CPUPlace(), *context, &out_cpu, true); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -290,8 +290,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a); - TensorCopy(input_b_cpu, Place(), *context, &input_b); + TensorCopy(input_a_cpu, Place(), *context, &input_a, true); + TensorCopy(input_b_cpu, Place(), *context, &input_b, true); } input.clear(); @@ -305,7 +305,7 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu); + TensorCopy(out, CPUPlace(), *context, &out_cpu, true); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); diff --git a/paddle/fluid/operators/math/im2col_test.cc b/paddle/fluid/operators/math/im2col_test.cc index b3978536bc..b09f95e0b2 100644 --- a/paddle/fluid/operators/math/im2col_test.cc +++ b/paddle/fluid/operators/math/im2col_test.cc @@ -62,7 +62,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input); + TensorCopy(input_tmp, *place, *context, &input, true); } output_cfo.mutable_data( {1, filter_size, filter_size, output_height, output_width}, *place); @@ -87,7 +87,8 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { out_cfo_ptr = output_cfo.data(); } else { - TensorCopy(output_cfo, paddle::platform::CPUPlace(), *context, &output_tmp); + TensorCopy(output_cfo, paddle::platform::CPUPlace(), *context, &output_tmp, + true); out_cfo_ptr = output_tmp.data(); } for (int i = 0; i < 6; ++i) { @@ -98,7 +99,8 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { out_ocf_ptr = output_ocf.data(); } else { - TensorCopy(output_ocf, paddle::platform::CPUPlace(), *context, &output_tmp); + TensorCopy(output_ocf, paddle::platform::CPUPlace(), *context, &output_tmp, + true); out_ocf_ptr = output_tmp.data(); } @@ -119,7 +121,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input); + TensorCopy(input_tmp, *place, *context, &input, true); } col2im(*context, output_cfo, dilation, stride, padding, &input); @@ -128,7 +130,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); + TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp, true); in_ptr = input_tmp.data(); } for (int i = 0; i < 6; ++i) { @@ -140,7 +142,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input); + TensorCopy(input_tmp, *place, *context, &input, true); } col2im_ocf(*context, output_ocf, dilation, stride, padding, &input); @@ -148,7 +150,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); + TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp, true); in_ptr = input_tmp.data(); } for (int i = 0; i < 6; ++i) { diff --git a/paddle/fluid/operators/math/math_function_test.cu b/paddle/fluid/operators/math/math_function_test.cu index 8982d9d066..ccfe0c6c07 100644 --- a/paddle/fluid/operators/math/math_function_test.cu +++ b/paddle/fluid/operators/math/math_function_test.cu @@ -40,15 +40,15 @@ TEST(math_function, notrans_mul_trans_fp32) { float arr[6] = {0, 1, 2, 3, 4, 5}; memcpy(input1_ptr, arr, 6 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input1, gpu_place, context, &input2_gpu); + TensorCopy(input1, gpu_place, context, &input1_gpu, true); + TensorCopy(input1, gpu_place, context, &input2_gpu, true); out_gpu.mutable_data({2, 2}, gpu_place); paddle::operators::math::matmul( context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0); - TensorCopy(out_gpu, cpu_place, context, &out); + TensorCopy(out_gpu, cpu_place, context, &out, true); float* out_ptr = out.data(); context.Wait(); @@ -80,8 +80,8 @@ TEST(math_function, notrans_mul_trans_fp16) { float16* input1_ptr = input1.mutable_data({2, 3}, cpu_place); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input1, gpu_place, context, &input2_gpu); + TensorCopy(input1, gpu_place, context, &input1_gpu, true); + TensorCopy(input1, gpu_place, context, &input2_gpu, true); out_gpu.mutable_data({2, 2}, gpu_place); @@ -89,7 +89,7 @@ TEST(math_function, notrans_mul_trans_fp16) { context, input1_gpu, false, input2_gpu, true, float16(1), &out_gpu, float16(0)); - TensorCopy(out_gpu, cpu_place, context, &out); + TensorCopy(out_gpu, cpu_place, context, &out, true); float16* out_ptr = out.data(); context.Wait(); @@ -117,15 +117,15 @@ TEST(math_function, trans_mul_notrans_fp32) { float arr[6] = {0, 1, 2, 3, 4, 5}; memcpy(input1_ptr, arr, 6 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input1, gpu_place, context, &input2_gpu); + TensorCopy(input1, gpu_place, context, &input1_gpu, true); + TensorCopy(input1, gpu_place, context, &input2_gpu, true); out_gpu.mutable_data({3, 3}, gpu_place); paddle::operators::math::matmul( context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0); - TensorCopy(out_gpu, cpu_place, context, &out); + TensorCopy(out_gpu, cpu_place, context, &out, true); float* out_ptr = out.data(); context.Wait(); @@ -162,8 +162,8 @@ TEST(math_function, trans_mul_notrans_fp16) { float16* input1_ptr = input1.mutable_data({2, 3}, cpu_place); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input1, gpu_place, context, &input2_gpu); + TensorCopy(input1, gpu_place, context, &input1_gpu, true); + TensorCopy(input1, gpu_place, context, &input2_gpu, true); out_gpu.mutable_data({3, 3}, gpu_place); @@ -171,7 +171,7 @@ TEST(math_function, trans_mul_notrans_fp16) { context, input1_gpu, true, input2_gpu, false, float16(1), &out_gpu, float16(0)); - TensorCopy(out_gpu, cpu_place, context, &out); + TensorCopy(out_gpu, cpu_place, context, &out, true); float16* out_ptr = out.data(); context.Wait(); @@ -214,9 +214,9 @@ TEST(math_function, gemm_notrans_cublas_fp32) { float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; memcpy(input3_ptr, arr3, 8 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input2, gpu_place, context, &input2_gpu); - TensorCopy(input3, gpu_place, context, &input3_gpu); + TensorCopy(input1, gpu_place, context, &input1_gpu, true); + TensorCopy(input2, gpu_place, context, &input2_gpu, true); + TensorCopy(input3, gpu_place, context, &input3_gpu, true); float* a = input1_gpu.data(); float* b = input2_gpu.data(); float* c = input3_gpu.mutable_data(gpu_place); @@ -224,7 +224,7 @@ TEST(math_function, gemm_notrans_cublas_fp32) { paddle::operators::math::gemm( context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3); + TensorCopy(input3_gpu, cpu_place, context, &input3, true); // numpy code: // a = np.arange(6).reshape(2, 3) @@ -274,9 +274,9 @@ TEST(math_function, gemm_notrans_cublas_fp16) { float16* input3_ptr = input3.mutable_data({2, 4}, cpu_place); fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7}); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input2, gpu_place, context, &input2_gpu); - TensorCopy(input3, gpu_place, context, &input3_gpu); + TensorCopy(input1, gpu_place, context, &input1_gpu, true); + TensorCopy(input2, gpu_place, context, &input2_gpu, true); + TensorCopy(input3, gpu_place, context, &input3_gpu, true); float16* a = input1_gpu.data(); float16* b = input2_gpu.data(); float16* c = input3_gpu.mutable_data(gpu_place); @@ -285,7 +285,7 @@ TEST(math_function, gemm_notrans_cublas_fp16) { context, false, false, m, n, k, float16(1), a, 3, b + 1, 4, float16(1), c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3); + TensorCopy(input3_gpu, cpu_place, context, &input3, true); // numpy code: // a = np.arange(6).reshape(2, 3) @@ -332,9 +332,9 @@ TEST(math_function, gemm_trans_cublas_fp32) { float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; memcpy(input3_ptr, arr3, 8 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input2, gpu_place, context, &input2_gpu); - TensorCopy(input3, gpu_place, context, &input3_gpu); + TensorCopy(input1, gpu_place, context, &input1_gpu, true); + TensorCopy(input2, gpu_place, context, &input2_gpu, true); + TensorCopy(input3, gpu_place, context, &input3_gpu, true); float* a = input1_gpu.data(); float* b = input2_gpu.data(); float* c = input3_gpu.mutable_data(gpu_place); @@ -342,7 +342,7 @@ TEST(math_function, gemm_trans_cublas_fp32) { paddle::operators::math::gemm( context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3); + TensorCopy(input3_gpu, cpu_place, context, &input3, true); context.Wait(); EXPECT_EQ(input3_ptr[0], 0); @@ -386,9 +386,9 @@ TEST(math_function, gemm_trans_cublas_fp16) { float16* input3_ptr = input3.mutable_data({2, 4}, cpu_place); fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7}); - TensorCopy(input1, gpu_place, context, &input1_gpu); - TensorCopy(input2, gpu_place, context, &input2_gpu); - TensorCopy(input3, gpu_place, context, &input3_gpu); + TensorCopy(input1, gpu_place, context, &input1_gpu, true); + TensorCopy(input2, gpu_place, context, &input2_gpu, true); + TensorCopy(input3, gpu_place, context, &input3_gpu, true); float16* a = input1_gpu.data(); float16* b = input2_gpu.data(); float16* c = input3_gpu.mutable_data(gpu_place); @@ -397,7 +397,7 @@ TEST(math_function, gemm_trans_cublas_fp16) { context, false, true, m, n, k, float16(1), a, 3, b + 3, 3, float16(1), c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3); + TensorCopy(input3_gpu, cpu_place, context, &input3, true); context.Wait(); EXPECT_EQ(static_cast(input3_ptr[0]), 0); @@ -441,14 +441,14 @@ void GemvTest(int m, int n, bool trans) { data_b[i] = static_cast(i); } - TensorCopy(mat_a, gpu_place, context, &g_mat_a); - TensorCopy(vec_b, gpu_place, context, &g_vec_b); + TensorCopy(mat_a, gpu_place, context, &g_mat_a, true); + TensorCopy(vec_b, gpu_place, context, &g_vec_b, true); paddle::operators::math::gemv( context, trans, static_cast(m), static_cast(n), 1., g_data_a, g_data_b, 0., g_data_c); - TensorCopy(g_vec_c, cpu_place, context, &vec_c); + TensorCopy(g_vec_c, cpu_place, context, &vec_c, true); if (!trans) { for (int i = 0; i < m; ++i) { diff --git a/paddle/fluid/operators/math/vol2col_test.cc b/paddle/fluid/operators/math/vol2col_test.cc index eb91f862e3..9de47dcc25 100644 --- a/paddle/fluid/operators/math/vol2col_test.cc +++ b/paddle/fluid/operators/math/vol2col_test.cc @@ -71,7 +71,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - paddle::framework::TensorCopy(input_tmp, *place, *context, &input); + paddle::framework::TensorCopy(input_tmp, *place, *context, &input, true); } output.mutable_data({1, filter_size, filter_size, filter_size, output_depth, output_height, output_width}, @@ -85,7 +85,8 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { out_cfo_ptr = output.data(); } else { - TensorCopy(output, paddle::platform::CPUPlace(), *context, &output_tmp); + TensorCopy(output, paddle::platform::CPUPlace(), *context, &output_tmp, + true); out_cfo_ptr = output_tmp.data(); } @@ -99,7 +100,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input); + TensorCopy(input_tmp, *place, *context, &input, true); } paddle::operators::math::Col2VolFunctor col2vol; @@ -109,7 +110,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp); + TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp, true); in_ptr = input_tmp.data(); } diff --git a/paddle/fluid/operators/nccl_op_test.cu.cc b/paddle/fluid/operators/nccl_op_test.cu.cc index 20b8a5c98a..ef54d79fdf 100644 --- a/paddle/fluid/operators/nccl_op_test.cu.cc +++ b/paddle/fluid/operators/nccl_op_test.cu.cc @@ -228,10 +228,8 @@ TEST_F(NCCLTester, ncclReduceOp) { result_tensor->Resize(kDims); auto *ct = result_tensor->mutable_data(cpu_place); - paddle::memory::Copy( - cpu_place, ct, p::CUDAPlace(gpu_list_[kRoot]), rt, - recv_tensor.numel() * sizeof(float), - static_cast(dev_ctxs_[kRoot])->stream()); + paddle::memory::Copy(cpu_place, ct, p::CUDAPlace(gpu_list_[kRoot]), rt, + recv_tensor.numel() * sizeof(float), nullptr); for (int64_t j = 0; j < f::product(kDims); ++j) { ASSERT_NEAR(ct[j], expected_result, 1e-5); From b88721213f4cd7f1d64ab6ce9b876aa39b081077 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 26 Apr 2018 06:40:19 +0000 Subject: [PATCH 51/72] fix broadcast_op_test and reduce_op_test --- CMakeLists.txt | 2 +- paddle/fluid/framework/details/broadcast_op_handle_test.cc | 4 ++-- paddle/fluid/framework/details/reduce_op_handle_test.cc | 6 ++++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 23bbe829ac..2ec4eda711 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,7 +40,7 @@ option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF) option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND}) option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND}) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) -option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF) +option(WITH_TESTING "Compile PaddlePaddle with unit testing" ON) option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON) option(WITH_STYLE_CHECK "Compile PaddlePaddle with style check" ON) option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.cc b/paddle/fluid/framework/details/broadcast_op_handle_test.cc index 3f2dcde3e9..dec761399f 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle_test.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.cc @@ -139,7 +139,7 @@ struct TestBroadcastOpHandle { PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal."); f::Tensor result_tensor; - f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor); + f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor, true); float* ct = result_tensor.mutable_data(cpu_place); for (int64_t i = 0; i < f::product(kDims); ++i) { @@ -185,7 +185,7 @@ struct TestBroadcastOpHandle { } f::Tensor result_tensor; - f::TensorCopy(rt, cpu_place, *(ctxs_[j]), &result_tensor); + f::TensorCopy(rt, cpu_place, *(ctxs_[j]), &result_tensor, true); float* ct = result_tensor.data(); for (int64_t i = 0; i < f::product(kDims); ++i) { diff --git a/paddle/fluid/framework/details/reduce_op_handle_test.cc b/paddle/fluid/framework/details/reduce_op_handle_test.cc index c17aabee53..c557875013 100644 --- a/paddle/fluid/framework/details/reduce_op_handle_test.cc +++ b/paddle/fluid/framework/details/reduce_op_handle_test.cc @@ -194,7 +194,8 @@ struct TestReduceOpHandle { } f::Tensor result_tensor; - f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); + f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor, + true); float *ct = result_tensor.data(); for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { @@ -239,7 +240,8 @@ struct TestReduceOpHandle { auto &rt = out_var->Get(); f::Tensor result_tensor; - f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor); + f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor, + true); float *ct = result_tensor.data(); for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { From bfe08446cb399339c7c9f6056edc299de30f763e Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 26 Apr 2018 09:34:56 +0000 Subject: [PATCH 52/72] fix error --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2ec4eda711..23bbe829ac 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,7 +40,7 @@ option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF) option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND}) option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND}) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) -option(WITH_TESTING "Compile PaddlePaddle with unit testing" ON) +option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF) option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON) option(WITH_STYLE_CHECK "Compile PaddlePaddle with style check" ON) option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) From c816121d11f7aed2939c5b859423883ce8bab050 Mon Sep 17 00:00:00 2001 From: baiyf Date: Thu, 26 Apr 2018 19:02:22 +0800 Subject: [PATCH 53/72] optimized iou_similarity_op (#10231) --- paddle/fluid/operators/iou_similarity_op.h | 24 ++++++++------- .../tests/unittests/test_iou_similarity_op.py | 30 ++++++++++++++----- 2 files changed, 35 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/operators/iou_similarity_op.h b/paddle/fluid/operators/iou_similarity_op.h index c76448c736..9f193ebc59 100644 --- a/paddle/fluid/operators/iou_similarity_op.h +++ b/paddle/fluid/operators/iou_similarity_op.h @@ -41,22 +41,24 @@ struct IOUSimilarityFunctor { IOUSimilarityFunctor(const T* x, const T* y, T* z, int cols) : x_(x), y_(y), z_(z), cols_(static_cast(cols)) {} - inline HOSTDEVICE void operator()(size_t row_id) const { + inline HOSTDEVICE void operator()(size_t tid) const { + size_t row_id = tid / cols_; + size_t col_id = tid % cols_; + T x_min1 = x_[row_id * 4]; T y_min1 = x_[row_id * 4 + 1]; T x_max1 = x_[row_id * 4 + 2]; T y_max1 = x_[row_id * 4 + 3]; - for (size_t i = 0; i < cols_; ++i) { - T x_min2 = y_[i * 4]; - T y_min2 = y_[i * 4 + 1]; - T x_max2 = y_[i * 4 + 2]; - T y_max2 = y_[i * 4 + 3]; - T sim = IOUSimilarity(x_min1, y_min1, x_max1, y_max1, x_min2, y_min2, - x_max2, y_max2); + T x_min2 = y_[col_id * 4]; + T y_min2 = y_[col_id * 4 + 1]; + T x_max2 = y_[col_id * 4 + 2]; + T y_max2 = y_[col_id * 4 + 3]; + + T sim = IOUSimilarity(x_min1, y_min1, x_max1, y_max1, x_min2, y_min2, + x_max2, y_max2); - z_[row_id * cols_ + i] = sim; - } + z_[row_id * cols_ + col_id] = sim; } const T* x_; const T* y_; @@ -81,7 +83,7 @@ class IOUSimilarityKernel : public framework::OpKernel { out->mutable_data(ctx.GetPlace()), y_n); platform::ForRange for_range( - static_cast(ctx.device_context()), x_n); + static_cast(ctx.device_context()), x_n * y_n); for_range(functor); } }; // namespace operators diff --git a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py index e33436b63c..8f62ac20a5 100644 --- a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py +++ b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py @@ -14,6 +14,7 @@ import unittest import numpy as np +import numpy.random as random import sys import math from op_test import OpTest @@ -25,14 +26,27 @@ class TestIOUSimilarityOp(OpTest): def setUp(self): self.op_type = "iou_similarity" - self.boxes1 = np.array( - [[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]).astype('float32') - self.boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], - [0.0, 0.0, 20.0, 20.0]]).astype('float32') - self.output = np.array( - [[2.0 / 16.0, 0, 6.0 / 400.0], - [1.0 / 16.0, 0.0, 5.0 / 400.0]]).astype('float32') - + self.boxes1 = random.rand(2, 4).astype('float32') + self.boxes2 = random.rand(3, 4).astype('float32') + self.output = random.rand(2, 3).astype('float32') + for row in range(self.boxes1.shape[0]): + for col in range(self.boxes2.shape[0]): + xmin1, ymin1, xmax1, ymax1 = self.boxes1[row] + xmin2, ymin2, xmax2, ymax2 = self.boxes2[col] + area1 = (ymax1 - ymin1) * (xmax1 - xmin1) + area2 = (ymax2 - ymin2) * (xmax2 - xmin2) + inter_xmax = min(xmax1, xmax2) + inter_ymax = min(ymax1, ymax2) + inter_xmin = max(xmin1, xmin2) + inter_ymin = max(ymin1, ymin2) + inter_height = inter_ymax - inter_ymin + inter_width = inter_xmax - inter_xmin + inter_height = max(inter_height, 0) + inter_width = max(inter_width, 0) + inter_area = inter_width * inter_height + union_area = area1 + area2 - inter_area + sim_score = inter_area / union_area + self.output[row, col] = sim_score self.inputs = {'X': self.boxes1, 'Y': self.boxes2} self.outputs = {'Out': self.output} From 8efc3082f6c41747b02f1aa975926a0d2b7db68d Mon Sep 17 00:00:00 2001 From: weixing Date: Fri, 27 Apr 2018 01:05:11 +0800 Subject: [PATCH 54/72] Change paddle.v2.reader to paddle.reader (#10226) --- doc/fluid/api/data/data_reader.rst | 4 ++-- doc/v2/api/data/data_reader.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/fluid/api/data/data_reader.rst b/doc/fluid/api/data/data_reader.rst index d7c896a627..1a35d0bbc8 100644 --- a/doc/fluid/api/data/data_reader.rst +++ b/doc/fluid/api/data/data_reader.rst @@ -56,11 +56,11 @@ DataFeeder Reader ====== -.. automodule:: paddle.v2.reader +.. automodule:: paddle.reader :members: :noindex: -.. automodule:: paddle.v2.reader.creator +.. automodule:: paddle.reader.creator :members: :noindex: diff --git a/doc/v2/api/data/data_reader.rst b/doc/v2/api/data/data_reader.rst index d7c896a627..1a35d0bbc8 100644 --- a/doc/v2/api/data/data_reader.rst +++ b/doc/v2/api/data/data_reader.rst @@ -56,11 +56,11 @@ DataFeeder Reader ====== -.. automodule:: paddle.v2.reader +.. automodule:: paddle.reader :members: :noindex: -.. automodule:: paddle.v2.reader.creator +.. automodule:: paddle.reader.creator :members: :noindex: From 98cf74c2556bb9354176344667581f403cef7b55 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Thu, 26 Apr 2018 10:40:53 -0700 Subject: [PATCH 55/72] add float16 inference content (#10210) --- doc/fluid/design/data_type/float16.md | 92 +++++++++++++++++++++++++-- 1 file changed, 85 insertions(+), 7 deletions(-) diff --git a/doc/fluid/design/data_type/float16.md b/doc/fluid/design/data_type/float16.md index 1ea95ed6b5..844d2aafcf 100644 --- a/doc/fluid/design/data_type/float16.md +++ b/doc/fluid/design/data_type/float16.md @@ -3,7 +3,7 @@ ## Why float16 Half precision (float16) is a binary floating-point format that occupies 16 bits in memory. float16 is half the size of traditional 32-bit single precision format (float) and has lower precision and smaller range. -When high precision computation is not required, using float16 data type could potentially +When high precision computation is not required (which is usually the case at least in the deep learning inference stage), using float16 data type could potentially - reduce storage space, memory bandwidth, and power usages; - increase the chance of data fitting into a smaller cache of lower latency; @@ -12,7 +12,7 @@ When high precision computation is not required, using float16 data type could p ## Survey of current float16 support A brief survey of float16 support on different compilers, hardwares, and libraries can be found below. Interested readers can refer to [link1](https://github.com/PaddlePaddle/Paddle/issues/4853) and [link2](https://github.com/Xreki/Xreki.github.io/blob/master/multi_data_types_in_dl_framework/ppt/float16_and_quantized_type.md) for more info. -The goal of float16 is to serve as a key for the executor to find and run the correct version of compute method specialized for float16 in operator kernel. It should be compatible with various natively supported float16 implementations including `__half` for cuda, `float16_t` for ARM, and `Eigen::half` for Eigen to make writing customized float16 kernels easier. +The goal of float16 is to serve as a key for the executor to find and run the correct version of compute method specialized for float16 in operator kernels. It should be compatible with various natively supported float16 implementations including `__half` for cuda, `float16_t` for ARM, and `Eigen::half` for Eigen to make writing customized float16 kernels easier. ### Compiler - nvcc supports `__half` data type after CUDA 7.5. @@ -95,11 +95,89 @@ float half_to_float(float16 h); ``` which provides one-to-one conversion between float32 and float16. These twos functions will do different conversion routines based on the current hardware. CUDA/ARM instrinsics will be used when the corresonding hardware is available. If the hardware or compiler level does not support float32 to float16 conversion, software emulation will be performed to do the conversion. -## To do -After float16 class is available, some of the future items are below: +## float16 inference +In Fluid, a neural network is represented as a protobuf message called [ProgramDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/program.md), whose Python wrapper is a [Program](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#program). The basic structure of a program is some nested [blocks](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#block), where each block consists of some [variable](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#variable) definitions and a sequence of [operators](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#operator). An [executor](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/executor.md) will run a given program desc by executing the sequence of operators in the entrance block of the program one by one. -- Update pybind/tensor_py.h to bind c++ float16 with numpy float16. +### Operator level requirement +Each operator has many kernels for different data types, devices, and library types. The operator will select the appropriate kernel to run based on, among other things, the data type of the input variables. By default, every Fluid operator has a float data type kernel that takes float variables as input and generates float output. -- Modify `GetKernelType()` method in `framework/operator.h` to make it compatible with float16. +This means that if we provide float input to the first operator in a program, then each opeartor will use float kernel to compute float output and send it as input to the next operator to trigger the float kernel. Overall, the program will run in float mode and give us a final output of float data type. -- Create a type-casting operator that can convert the data type in tensor between float16 and other types. +The same principle applies if we want a program to run in float16 mode. We provide input variable of float16 data type to the first operator, and then one by one, each operator in the program will run the float16 kernel (provided that each operator in this program has float16 kernels registered) until we finally obtain a float16 output variable. + +So the preliminary requirement for float16 inference is to add float16 kernel to operators that are needed in a specific kind of program. For example, float16 inference on an image classification neural network like Vgg or Resnet, typically requires the following operators to have float16 kernels: convolution, pooling, multiplication, addition, batch norm, dropout, relu, and softmax. Please refer to [new_op_en](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/dev/new_op_en.md) for details of how to add new kernels to an operator. + +### Variable level requirement +Operators including convolution and multiplication (used in fully-connected layers) takes as input not only the variables generated by the preceding operators but also [parameter](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/python_api.md#parameter) variables, which contains the trained weights to apply to the input data. These weights are obtained in the Fluid training process and are by default of float data type. + +When these operators are running in float16 mode, the float16 kernel requires those parameter variables to contain weights of Fluid float16 data type. Thus, we need a convenient way to convert the original float weights to float16 weights. + +In Fluid, we use tensor to hold actual data for a variable on the c++ end. [Pybind](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/tensor_py.h) is used to bind c++ tensors of certain data type with numpy array of the correponding numpy data type on the Python end. Each common c++ built-in data type has a corresponding numpy data type of the same name. However, since there is no built-in float16 type in c++, we cannot directly bind numpy float16 data type with the Fluid float16 class. Since both Fluid float16 and numpy float16 use uint16 as the internal data storage type, we use c++ built-in type `uint16_t` and the corresponding numpy uint16 data type to bridge the gap via [Pybind](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/pybind/tensor_py.h). + +The following code demonstrates how to do the tensor conversion. +```Python +# var is the variable of float weights +# tensor is a numpy array of data copied from the tensor data in var +# fp16_var is the variable that will contain float16 weights converted from var +tensor = numpy.array(var.get_tensor()) +fp16_tensor = fp16_var.get_tensor() + +# After the original tensor data is converted to numpy float16 data type, +# view(numpy.uint16) is used so that the internal memory of the numpy array +# will be reinterpreted to be of uint16 data type, which is binded to +# Fluid float16 class via pybind with the help of uint16_t built-in c++ type +fp16_tensor.set(tensor.astype(numpy.float16).view(numpy.uint16), GPUPlace) +``` + +### Consistent API requirement +The basic inference in float16 mode requires users to feed input and obtain output both of float16 data type. However, in this way, the inference APIs are not consistent between float16 mode and float mode, and users may find it confusing and diffcult to use float16 inference since they need to do extra steps to provide float16 input data and convert float16 output data back to float. To have consistent API for different inference modes, we need to transpile the program desc in some way so that we can run float16 inference by feeding and fetching variables of float data type. + +This problem can be solved by introducing a type-casting operator which takes an input variable of certain data type, cast it to another specified data type, and put the casted data into the output variable. Insert cast operator where needed can make a program internally run in float16 mode. + +### float16 transpiler +Put all the above requirements in mind, we designed a float16 inference transpiler that can tranpile a float32 mode inference program desc to a float16 mode one. + +Given a float inference program and the corresponding variables of float32 weights in the [scope](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/scope.md), +this transpiler mainly does the following modifications: + +1. Insert cast operators at the beginning of the program so that the input float data will be converted to float16 data type before feeding to subsequent operators to invoke the float16 kernel. + +2. Insert cast operators at the end of the program so that the output float16 data will be converted back to float data type before users obtain the result. + +3. For each parameter variable of float weights, create in the scope a corresponding variable of float16 weights which are converted from the corresponding float weights and add this new float16 variable to the program. + +4. Update the operator information in the program so that each relevant operator use the newly created float16 variable instead of its float counterpart. + +Below is an example of usage: +```Python +# Get the float inference program +[float_inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + +# Prepare the float input data +tensor_img = numpy.random.rand(1, 3, 32, 32).astype(numpy.float32) + +# Running inference_program in float mode +float_results = exe.run(float_inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + +# Use float16 transpiler to speedup +float16_inference_program = float_inference_program.clone() +t = fluid.InferenceTranspiler() +t.float16_transpile(float16_inference_program, GPUPlace) + +# Running +float16_results = exe.run(float16_inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) +``` + +As we can see from the example above, users can simply use the `float16_transpile` method provided by the infernece transpiler class on an existing float inference program to run inference in float16 mode. + +### Speedup on GPU +Currently, Fluid inference in float16 mode is only supported on Nvidia GPU device. There is no motivation to support float16 inference on non-ARM CPUs because float16 is not natively supported there and float16 calculation will only be slower than its float counterpart. + +Nvidia started to support its native float16 data type (which has the same internal memory representation as Fluid float16 class) on CUDA 7.5. Moreover, float16 speedups on common computational intensive tasks including GEMM (general matrix-matrix multiplication) and convolution are supported since cublas 7.5 and cuDNN 5.0. + +Recently, the introduction of [tensor core](https://devblogs.nvidia.com/programming-tensor-cores-cuda-9/) in volta architecture GPUs and the support of tensor core calculation in CUDA 9.0 and cuDNN 7.0 make float16 truly superior to float in certain deep learning applications. Please refer to this [benchmark report](https://github.com/kexinzhao/Paddle_benchmark/blob/master/float16_benchmark.md) for more details. From b1334259158cee17997341b75bfb6668e02533bf Mon Sep 17 00:00:00 2001 From: Xi Chen Date: Thu, 26 Apr 2018 15:28:21 -0700 Subject: [PATCH 56/72] add grad_to_block_id --- python/paddle/fluid/distribute_transpiler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/distribute_transpiler.py b/python/paddle/fluid/distribute_transpiler.py index d17475cd28..44542749f8 100644 --- a/python/paddle/fluid/distribute_transpiler.py +++ b/python/paddle/fluid/distribute_transpiler.py @@ -444,7 +444,8 @@ class DistributeTranspiler: opt_state_block = pserver_program.create_block( pserver_program.num_blocks - 1) for glb_op in global_ops: - __append_optimize_op__(glb_op, opt_state_block) + __append_optimize_op__(glb_op, opt_state_block, + grad_to_block_id) # NOT USED: single block version: # From 83b1a8f6bf295fefcf44949d17b538de47eb522e Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Thu, 26 Apr 2018 15:29:33 -0700 Subject: [PATCH 57/72] Pending more CPPLint errors in fluid/operators/math (#10243) * Fix CPPLint issue in test_engine * Fix CPPLint errors in operators/math * Fix compilation --- paddle/fluid/inference/engine.h | 1 - paddle/fluid/inference/tensorrt/engine.h | 6 ++++-- paddle/fluid/inference/tensorrt/test_engine.cc | 6 +++--- paddle/fluid/operators/concurrency/channel_util.cc | 2 +- paddle/fluid/operators/math/context_project.h | 2 ++ paddle/fluid/operators/math/im2col.cc | 1 + paddle/fluid/operators/math/im2col.cu | 2 ++ paddle/fluid/operators/math/im2col.h | 1 + paddle/fluid/operators/math/math_function_impl.h | 1 + paddle/fluid/operators/math/sequence_padding_test.cc | 3 ++- paddle/fluid/operators/math/sequence_pooling.cu | 1 + paddle/fluid/operators/math/sequence_pooling.h | 1 + paddle/fluid/operators/math/vol2col.h | 1 + paddle/fluid/operators/reader/reader_op_registry.h | 2 ++ 14 files changed, 22 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/inference/engine.h b/paddle/fluid/inference/engine.h index 0633c052e4..6b0ac92fa9 100644 --- a/paddle/fluid/inference/engine.h +++ b/paddle/fluid/inference/engine.h @@ -46,7 +46,6 @@ class EngineBase { virtual void Execute(int batch_size) = 0; virtual ~EngineBase() {} - }; // class EngineBase } // namespace inference diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index ff853455b8..82d8c3df4e 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -16,7 +16,9 @@ limitations under the License. */ #include #include +#include #include +#include #include "paddle/fluid/inference/engine.h" #include "paddle/fluid/inference/tensorrt/helper.h" @@ -56,9 +58,9 @@ class TensorRTEngine : public EngineBase { virtual ~TensorRTEngine(); // TODO(Superjomn) implement it later when graph segmentation is supported. - virtual void Build(const DescType& paddle_model) override; + void Build(const DescType& paddle_model) override; - virtual void Execute(int batch_size) override; + void Execute(int batch_size) override; // Initialize the inference network, so that TensorRT layers can add to this // network. diff --git a/paddle/fluid/inference/tensorrt/test_engine.cc b/paddle/fluid/inference/tensorrt/test_engine.cc index f3dbdf11f2..c6e1c71cdc 100644 --- a/paddle/fluid/inference/tensorrt/test_engine.cc +++ b/paddle/fluid/inference/tensorrt/test_engine.cc @@ -12,13 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/inference/tensorrt/engine.h" - #include #include #include #include +#include "paddle/fluid/inference/tensorrt/engine.h" #include "paddle/fluid/platform/enforce.h" namespace paddle { @@ -65,7 +64,8 @@ TEST_F(TensorRTEngineTest, add_layer) { // fill in real data float x_v = 1234; - engine_->SetInputFromCPU("x", (void*)&x_v, 1 * sizeof(float)); + engine_->SetInputFromCPU("x", reinterpret_cast(&x_v), + 1 * sizeof(float)); LOG(INFO) << "to execute"; engine_->Execute(1); diff --git a/paddle/fluid/operators/concurrency/channel_util.cc b/paddle/fluid/operators/concurrency/channel_util.cc index 246c99489c..fba4abf189 100644 --- a/paddle/fluid/operators/concurrency/channel_util.cc +++ b/paddle/fluid/operators/concurrency/channel_util.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "channel_util.h" +#include "paddle/fluid/operators/concurrency/channel_util.h" #include "paddle/fluid/framework/var_type.h" namespace poc = paddle::operators::concurrency; diff --git a/paddle/fluid/operators/math/context_project.h b/paddle/fluid/operators/math/context_project.h index 4da94383af..027a019a28 100644 --- a/paddle/fluid/operators/math/context_project.h +++ b/paddle/fluid/operators/math/context_project.h @@ -14,6 +14,8 @@ limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/operators/math/im2col.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/math/im2col.cc b/paddle/fluid/operators/math/im2col.cc index 123e10586f..336d6febc2 100644 --- a/paddle/fluid/operators/math/im2col.cc +++ b/paddle/fluid/operators/math/im2col.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/im2col.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/im2col.cu b/paddle/fluid/operators/math/im2col.cu index f41c78140f..1268e21e06 100644 --- a/paddle/fluid/operators/math/im2col.cu +++ b/paddle/fluid/operators/math/im2col.cu @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include #include "paddle/fluid/operators/math/im2col.h" #include "paddle/fluid/platform/cuda_helper.h" diff --git a/paddle/fluid/operators/math/im2col.h b/paddle/fluid/operators/math/im2col.h index 451ec9d534..26d94e0f2e 100644 --- a/paddle/fluid/operators/math/im2col.h +++ b/paddle/fluid/operators/math/im2col.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/platform/device_context.h" diff --git a/paddle/fluid/operators/math/math_function_impl.h b/paddle/fluid/operators/math/math_function_impl.h index f9d4e45324..b9bd49d77d 100644 --- a/paddle/fluid/operators/math/math_function_impl.h +++ b/paddle/fluid/operators/math/math_function_impl.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/operators/math/math_function.h" diff --git a/paddle/fluid/operators/math/sequence_padding_test.cc b/paddle/fluid/operators/math/sequence_padding_test.cc index bece46e753..e3d6214485 100644 --- a/paddle/fluid/operators/math/sequence_padding_test.cc +++ b/paddle/fluid/operators/math/sequence_padding_test.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/sequence_padding.h" #include +#include template void TestSequencePadding(const paddle::framework::LoD& lod, @@ -75,7 +76,7 @@ void TestSequencePadding(const paddle::framework::LoD& lod, delete place; delete context; -}; +} TEST(Seq2BatchPadding, CPU) { paddle::framework::LoD lod1; diff --git a/paddle/fluid/operators/math/sequence_pooling.cu b/paddle/fluid/operators/math/sequence_pooling.cu index 1935364da3..36f6402396 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cu +++ b/paddle/fluid/operators/math/sequence_pooling.cu @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/sequence_pooling.h" #include "paddle/fluid/platform/cuda_helper.h" diff --git a/paddle/fluid/operators/math/sequence_pooling.h b/paddle/fluid/operators/math/sequence_pooling.h index 38e7802229..8dcbee65d0 100644 --- a/paddle/fluid/operators/math/sequence_pooling.h +++ b/paddle/fluid/operators/math/sequence_pooling.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" diff --git a/paddle/fluid/operators/math/vol2col.h b/paddle/fluid/operators/math/vol2col.h index dbc2ed7a69..5f59de8f02 100644 --- a/paddle/fluid/operators/math/vol2col.h +++ b/paddle/fluid/operators/math/vol2col.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/platform/device_context.h" diff --git a/paddle/fluid/operators/reader/reader_op_registry.h b/paddle/fluid/operators/reader/reader_op_registry.h index 929d32ad8b..ec25f55ef5 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.h +++ b/paddle/fluid/operators/reader/reader_op_registry.h @@ -14,6 +14,8 @@ #pragma once +#include +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/reader.h" From 0ecc6fa8f35d9bc15d605db82551ead8f502d523 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Thu, 26 Apr 2018 19:00:28 -0700 Subject: [PATCH 58/72] Add float16 transpiler and image classification example (#10109) * add float16 transpiler * fix feed fetch target names mismatch * fix cast op input change issue * fix program desc flush error * fix inconsistent var names in block desc bug * code clean up * add float16 infernce C++ example and fix prune bug --- .../test_inference_image_classification.cc | 16 ++ python/paddle/fluid/framework.py | 25 ++- python/paddle/fluid/inference_transpiler.py | 208 +++++++++++++++++- python/paddle/fluid/io.py | 2 +- .../tests/book/test_image_classification.py | 20 ++ 5 files changed, 261 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc index 1e6555bb02..1a685b9e2e 100644 --- a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc +++ b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc @@ -62,5 +62,21 @@ TEST(inference, image_classification) { LOG(INFO) << output2.dims(); CheckError(output1, output2); + + // float16 inference requires cuda GPUs with >= 5.3 compute capability + if (paddle::platform::GetCUDAComputeCapability(0) >= 53) { + paddle::framework::LoDTensor output3; + std::vector cpu_fetchs3; + cpu_fetchs3.push_back(&output3); + + LOG(INFO) << "--- GPU Runs in float16 mode: ---"; + std::string fp16_dirname = dirname; + fp16_dirname.replace(fp16_dirname.find("book/"), + std::string("book/").size(), "book/float16_"); + TestInference( + fp16_dirname, cpu_feeds, cpu_fetchs3, FLAGS_repeat); + + CheckError(output2, output3); + } #endif } diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 340882ea9e..53486ecffc 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1070,16 +1070,25 @@ class Program(object): for t in targets: if not isinstance(t, Operator): if isinstance(t, Variable): - if t.op is None: - global_block = self.global_block() - for op in global_block.ops: - if t.name in op.output_arg_names: - t.op = op - break + # After transpiler processing, the op that output this + # variable maybe has been changed, so t.op is not reliable + # and we need to find the current op that generate this + # variable here. + t.op = None + global_block = self.global_block() + for idx, op in enumerate(global_block.ops): + if t.name in op.output_arg_names: + t.op = op + break + t = t.op + if t is None: + raise ValueError( + "The target variable must have an " + "associated operator that generates it.") else: - raise ValueError(("All targets of prune() can only be " - "Variable or Operator.")) + raise ValueError("All targets of prune() can only be " + "Variable or Operator.") targets_idx.append([t.block.idx, t.idx]) res = Program() diff --git a/python/paddle/fluid/inference_transpiler.py b/python/paddle/fluid/inference_transpiler.py index 39b01610f9..f4ad717b9e 100644 --- a/python/paddle/fluid/inference_transpiler.py +++ b/python/paddle/fluid/inference_transpiler.py @@ -121,7 +121,60 @@ class InferenceTranspiler: # And a better solution will be considered later. program = program.clone() + def float16_transpile(self, program, place, scope=None): + ''' + Transpile the program desc and cast the weights to float16 data type to + enable float16 inference. + + Since the operator in a program desc will automatically choose the + right compute kernel to run based on the data type of the input tensor. + We actually don't need to change the program desc to run in float16 mode. + + However, in this way, users who are used to feeding and fetching tensors + of float32 data type when running typical inference may find it confusing + and difficult to run inference in float16 mode as they need to convert + input data to float16 dtype and then convert the results back to float32 + dtype to match the rest of code. + + So this function appends cast ops to the program desc where necessary so + that users are able to run inference in float16 mode while providing input + tensor (feed_holder) of float data type and obtaining output tensor + (fetch_holder) of float data type. + + Moreover, it is desired that when we have the scope and program desc to run + inference in float32 mode, we can use a single API to do the necessary + modification and then user can run float16 inference on the fly. To make + this happen, this function also create new parameters in the scope to have the + converted float16 weights and change the operators in program desc to use + these new parameters. + + :param program: program to transpile + :type program: Program + :param place: inference place + :type place: Place + :param scope: inference scope + :type scope: Scope + ''' + if scope is None: + scope = global_scope() + + self.scope = scope + self.place = place + self.block = program.block(0) + self.input_map = {} # store the input names should be adjusted + + self._modify_feed_fetch() + self._convert_param_to_float16() + self._adjust_input(skip=True) + self._remove_unused_var() + + # TODO(luotao): use clone() method to flush the program.desc in force, + # since some large program.desc will not be flushed immediately. + # And a better solution will be considered later. + program = program.clone() + # ====================== private transpiler functions ===================== + def _insert_bias_op(self, index, current_op, bn_op): ''' Construct elementwise_add operator for adding bias @@ -216,9 +269,27 @@ class InferenceTranspiler: # collect the renamed input self.input_map[bn_op.output("Y")[0]] = bias_op.output("Out")[0] - def _adjust_input(self): + def _adjust_input(self, skip=False): + ''' + Change the input variable name in operators. + + When we are in the process of modifying a program desc, we usually + replace some variables with some other variables, where we create + a dictionary input_map to record the one-to-one correspondence + between each old variable and the new one. + + After that, this function will search all the operators that use the + old variables and change the info in op to use the new variables. There + maybe some exceptions to this rule when we are using the float16 transpiler + and insert cast ops to cast float32 variable to float16 one. After we + insert the cast op to cast var_1 to var_1_fp16, we don't want to change + the input of cast op to var_1_fp16 after using this function. + ''' + skip_ops = {"cast"} for i in range(len(self.block.ops)): current_op = self.block.ops[i] + if skip and current_op.type in skip_ops: + continue for input_arg in current_op.input_arg_names: if input_arg in self.input_map: current_op.rename_input(input_arg, @@ -238,3 +309,138 @@ class InferenceTranspiler: for var in self.block.vars.keys(): if var not in args: self.block.remove_var(var) + + def _modify_feed_fetch(self): + ''' + Modify feed fetch op/vars for float16 inference. + + For each feed op: + feed_op->feed_target_var + + Change it to: + feed_op->feed_target_var->cast_op(from other dtype to float16)->tmp_var + + For each fetch op: + fetch_target_var->fetch_op + + Change it to: + tmp_var->cast_op(from float16 to other dtype)->fetch_target_var->fetch_op + + :return: None + ''' + + def find_op(var): + # It is possible that var.op is not up to date after some + # modifications to program desc. Here we force to make it up to date. + var.op = None + for op in self.block.ops: + if var.name in op.output_arg_names: + var.op = op + break + + if var.op is None: + raise ValueError("The target variable must have an " + "associated operator that generates it.") + + i = 0 + while i < len(self.block.ops): + cur_op = self.block.ops[i] + if cur_op.type == "feed": + var_name = cur_op.output("Out")[0] + tmp_var_name = var_name + ".fp16" + var = self.block.vars[var_name] + tmp_var = self.block.create_var( + name=tmp_var_name.encode('ascii'), + type=var.type, + dtype=core.VarDesc.VarType.FP16, + shape=var.shape, + persistable=var.persistable) + self.block.insert_op( + i + 1, + type="cast", + inputs={"X": var}, + outputs={"Out": tmp_var}, + attrs={ + 'in_dtype': int(var.dtype), + 'out_dtype': int(tmp_var.dtype) + }) + self.input_map[var_name] = tmp_var_name + i = i + 1 + elif cur_op.type == "fetch": + var_name = cur_op.input("X")[0] + tmp_var_name = var_name + ".fp16" + var = self.block.vars[var_name] + tmp_var = self.block.create_var( + name=tmp_var_name.encode('ascii'), + type=var.type, + dtype=core.VarDesc.VarType.FP16, + shape=var.shape, + persistable=var.persistable) + find_op(var) + var.op.rename_output(var_name, tmp_var_name) + self.block.insert_op( + i, + type="cast", + inputs={"X": tmp_var}, + outputs={"Out": var}, + attrs={ + 'in_dtype': int(tmp_var.dtype), + 'out_dtype': int(var.dtype) + }) + i = i + 1 + i = i + 1 + + def _convert_param_to_float16(self): + def _get_no_fp16_conversion_var_names(): + ''' + Get the set of input variable names that shouldn't be converted to float16. + + When we want to run inference in float16 mode, most parameters need to be + firstly converted to float16. However, there are some parameters that + shouldn't be converted to float16 because the corresponding operator + requires float32 parameters even in float16 mode (when the input data is + of float16 data type). Currently, the only operator that has this exclusion + is the batch norm op. + + :return: set of input variable names + :type var_names: set + ''' + op_names = {'batch_norm'} + var_names = [] + for op in self.block.ops: + if op.type in op_names: + var_names += op.input_arg_names + return set(var_names) + + def _should_be_converted(var): + return var.persistable and \ + var.name not in self.no_conversion_vars and \ + var.type != core.VarDesc.VarType.FEED_MINIBATCH and \ + var.type != core.VarDesc.VarType.FETCH_LIST + + self.no_conversion_vars = _get_no_fp16_conversion_var_names() + conversion_var_list = filter(_should_be_converted, + self.block.vars.values()) + for var in conversion_var_list: + fp16_var_name = var.name + ".fp16" + fp16_var = self.block.create_parameter( + name=fp16_var_name.encode('ascii'), + type=var.type, + dtype=core.VarDesc.VarType.FP16, + shape=var.shape) + + # cast the data in the tensor of the original var to float16 + # data type and store it in the tensor of the new float16 var + self.scope.var(fp16_var_name) + fp16_tensor = self.scope.find_var(fp16_var_name).get_tensor() + tensor = np.array(self.scope.find_var(var.name).get_tensor()) + # After the old tensor data is converted to np.float16, view(np.uint16) + # is used so that the internal memory of the numpy array will be + # reinterpreted to be of np.uint16 data type, which is binded to fluid + # float16 data type via the help of pybind in tensor_py.h. + fp16_tensor.set( + tensor.astype(np.float16).view(np.uint16), self.place) + + # old var will be replaced by the fp16 var in program desc + self.input_map[var.name] = fp16_var_name + self.block.remove_var(var.name) diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index f7f1ca2598..08b8a878b6 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -336,7 +336,7 @@ def save_inference_model(dirname, if main_program is None: main_program = default_main_program() - copy_program = main_program + copy_program = main_program.clone() if not os.path.isdir(dirname): os.makedirs(dirname) diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index db96c82ce2..09f994c370 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -252,6 +252,26 @@ def infer(use_cuda, save_dirname=None): fetch_targets, exe, inference_transpiler_program) + if use_cuda and fluid.core.is_float16_supported(place): + # Use float16_transpiler to speedup + fp16_transpiler_program = inference_transpiler_program.clone() + t.float16_transpile(fp16_transpiler_program, place) + + fp16_results = exe.run(fp16_transpiler_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + + assert len(results[0]) == len(fp16_results[0]) + for i in range(len(results[0])): + np.testing.assert_almost_equal( + results[0][i], fp16_results[0][i], decimal=2) + + print("float16 infer results: ", fp16_results[0]) + + fluid.io.save_inference_model("float16_" + save_dirname, + feed_target_names, fetch_targets, exe, + fp16_transpiler_program) + def main(net_type, use_cuda, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): From e498e1fc56c19c9e0c48729eee5d3491137a9c3d Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Fri, 27 Apr 2018 04:04:47 +0200 Subject: [PATCH 59/72] Adam operator optimized with Eigen (#10229) * Some changes for Adam profiling * Adam optimization: initial Eigen optimization * Eigen Adam: flavour of adam can be chosen * Eigen Adam used for CPU by default. Plain Adam used for GPU * Eigen Adam: missing call to the Eigen functor added * Eigen Adam: revert changes in benchmarks * Eigen Adam: typo corrected --- paddle/fluid/operators/adam_op.h | 117 +++++++++++++++++++++++++++---- 1 file changed, 103 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/operators/adam_op.h b/paddle/fluid/operators/adam_op.h index b332b67163..f82ff47b52 100644 --- a/paddle/fluid/operators/adam_op.h +++ b/paddle/fluid/operators/adam_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once #include // for sqrt in CPU and CUDA +#include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" @@ -24,8 +25,14 @@ namespace operators { namespace scatter = paddle::operators::math::scatter; +struct GPUAdam; +struct CPUAdam; + +template +struct AdamFunctor; + template -struct AdamFunctor { +struct AdamFunctor { T beta1_; T beta2_; T epsilon_; @@ -71,6 +78,7 @@ struct AdamFunctor { // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); + mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); @@ -82,6 +90,71 @@ struct AdamFunctor { } }; +template +struct AdamFunctor { + T beta1_; + T beta2_; + T epsilon_; + + const T* beta1_pow_; + const T* beta2_pow_; + const T* moment1_; + T* moment1_out_; + const T* moment2_; + T* moment2_out_; + const T* lr_; + const T* grad_; + const T* param_; + T* param_out_; + + AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, + const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, + T* mom2_out, const T* lr, const T* grad, const T* param, + T* param_out) + : beta1_(beta1), + beta2_(beta2), + epsilon_(epsilon), + beta1_pow_(beta1_pow), + beta2_pow_(beta2_pow), + moment1_(mom1), + moment1_out_(mom1_out), + moment2_(mom2), + moment2_out_(mom2_out), + lr_(lr), + grad_(grad), + param_(param), + param_out_(param_out) {} + + void operator()(size_t numel) const { + Eigen::Map> g{ + grad_, static_cast(numel)}; + Eigen::Map> mom1{ + moment1_, static_cast(numel)}; + Eigen::Map> mom2{ + moment2_, static_cast(numel)}; + Eigen::Map> param{ + param_, static_cast(numel)}; + + Eigen::Map> param_out{ + param_out_, static_cast(numel)}; + Eigen::Map> moment1_out{ + moment1_out_, static_cast(numel)}; + Eigen::Map> moment2_out{ + moment2_out_, static_cast(numel)}; + + T lr = *lr_; + T beta1_pow = *beta1_pow_; + T beta2_pow = *beta2_pow_; + + // Calculation + lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); + + moment1_out = beta1_ * mom1 + (1 - beta1_) * g; + moment2_out = beta2_ * mom2 + (1 - beta2_) * g * g; + param_out = param - lr * (moment1_out / (moment2_out.sqrt() + epsilon_)); + } +}; + template struct SparseAdamFunctor { T beta1_; @@ -134,6 +207,7 @@ struct SparseAdamFunctor { T p = param_[rows_[i] * row_numel_ + j]; lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); + mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); @@ -177,19 +251,34 @@ class AdamOpKernel : public framework::OpKernel { if (grad_var->IsType()) { auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); - AdamFunctor functor( - beta1, beta2, epsilon, beta1_pow.template data(), - beta2_pow.template data(), mom1.template data(), - mom1_out.template mutable_data(ctx.GetPlace()), - mom2.template data(), - mom2_out.template mutable_data(ctx.GetPlace()), - lr.template data(), grad.template data(), - param.template data(), - param_out.template mutable_data(ctx.GetPlace())); - platform::ForRange for_range( - static_cast(ctx.device_context()), - param.numel()); - for_range(functor); + + if (platform::is_cpu_place(ctx.GetPlace())) { + AdamFunctor functor( + beta1, beta2, epsilon, beta1_pow.template data(), + beta2_pow.template data(), mom1.template data(), + mom1_out.template mutable_data(ctx.GetPlace()), + mom2.template data(), + mom2_out.template mutable_data(ctx.GetPlace()), + lr.template data(), grad.template data(), + param.template data(), + param_out.template mutable_data(ctx.GetPlace())); + functor(param.numel()); + } else if (platform::is_gpu_place(ctx.GetPlace())) { + AdamFunctor functor( + beta1, beta2, epsilon, beta1_pow.template data(), + beta2_pow.template data(), mom1.template data(), + mom1_out.template mutable_data(ctx.GetPlace()), + mom2.template data(), + mom2_out.template mutable_data(ctx.GetPlace()), + lr.template data(), grad.template data(), + param.template data(), + param_out.template mutable_data(ctx.GetPlace())); + + platform::ForRange for_range( + static_cast(ctx.device_context()), + param.numel()); + for_range(functor); + } } else if (grad_var->IsType()) { auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); From 4a5bfa89c342771688f5d62dc2156df85933af50 Mon Sep 17 00:00:00 2001 From: dyning Date: Fri, 27 Apr 2018 10:36:15 +0800 Subject: [PATCH 60/72] Modify RoI pooling op to use LoDTensor and expose it into Python API (#10208) * modify roi pool with lod and expose ROI Pooling into Python API * make lod code brief * make doc more clearly * make doc more clearly --- doc/fluid/api/layers.rst | 9 +++ paddle/fluid/operators/roi_pool_op.cc | 17 ++-- paddle/fluid/operators/roi_pool_op.cu | 79 ++++++++++++++----- paddle/fluid/operators/roi_pool_op.h | 63 ++++++++++----- python/paddle/fluid/layers/nn.py | 51 ++++++++++++ .../fluid/tests/unittests/test_layers.py | 10 +++ .../fluid/tests/unittests/test_roi_pool_op.py | 37 +++++---- 7 files changed, 201 insertions(+), 65 deletions(-) diff --git a/doc/fluid/api/layers.rst b/doc/fluid/api/layers.rst index 3790f09c84..ff3c9346a2 100644 --- a/doc/fluid/api/layers.rst +++ b/doc/fluid/api/layers.rst @@ -479,6 +479,13 @@ label_smooth .. autofunction:: paddle.fluid.layers.label_smooth :noindex: +roi_pool +--------- + +.. autofunction:: paddle.fluid.layers.roi_pool + :noindex: + + ops === @@ -820,3 +827,5 @@ topk .. autofunction:: paddle.fluid.layers.topk :noindex: + + diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index 224ec93d28..397e49ef20 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -18,8 +18,7 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; - -static constexpr int kROISize = 5; +using LoDTensor = framework::LoDTensor; class ROIPoolOp : public framework::OperatorWithKernel { public: @@ -40,11 +39,11 @@ class ROIPoolOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(input_dims.size() == 4, "The format of input tensor is NCHW."); PADDLE_ENFORCE(rois_dims.size() == 2, - "ROIs should be a 2-D tensor of shape (num_rois, 5)" - "given as [[batch_id, x1, y1, x2, y2], …]."); + "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)" + "given as [[x1, y1, x2, y2], …]."); PADDLE_ENFORCE(rois_dims[1] == kROISize, - "ROIs should be a 2-D tensor of shape (num_rois, 5)" - "given as [[batch_id, x1, y1, x2, y2], …]."); + "ROIs should be a 2-D LoDTensor of shape (num_rois, 4)" + "given as [[x1, y1, x2, y2], …]."); int pooled_height = ctx->Attrs().Get("pooled_height"); int pooled_width = ctx->Attrs().Get("pooled_width"); @@ -109,10 +108,10 @@ class ROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { "H is the height of the feature, and " "W is the width of the feature."); AddInput("ROIs", - "(Tensor), " + "(LoDTensor), " "ROIs (Regions of Interest) to pool over. " - "should be a 2-D tensor of shape (num_rois, 5)" - "given as [[batch_id, x1, y1, x2, y2], …]. " + "should be a 2-D LoDTensor of shape (num_rois, 4)" + "given as [[x1, y1, x2, y2], …]. " "Where batch_id is the id of the data, " "(x1, y1) is the top left coordinates, and " "(x2, y2) is the bottom right coordinates."); diff --git a/paddle/fluid/operators/roi_pool_op.cu b/paddle/fluid/operators/roi_pool_op.cu index 1931629d13..0bdfee0434 100644 --- a/paddle/fluid/operators/roi_pool_op.cu +++ b/paddle/fluid/operators/roi_pool_op.cu @@ -19,10 +19,10 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; -static constexpr int kROISize = 5; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, @@ -30,13 +30,11 @@ static inline int NumBlocks(const int N) { } template -__global__ void GPUROIPoolForward(const int nthreads, const T* input_data, - const int64_t* input_rois, - const float spatial_scale, const int channels, - const int height, const int width, - const int pooled_height, - const int pooled_width, T* output_data, - int64_t* argmax_data) { +__global__ void GPUROIPoolForward( + const int nthreads, const T* input_data, const int64_t* input_rois, + const float spatial_scale, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + int* roi_batch_id_data, T* output_data, int64_t* argmax_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { @@ -46,11 +44,11 @@ __global__ void GPUROIPoolForward(const int nthreads, const T* input_data, int n = index / pooled_width / pooled_height / channels; const int64_t* offset_input_rois = input_rois + n * kROISize; - int roi_batch_ind = offset_input_rois[0]; - int roi_start_w = round(offset_input_rois[1] * spatial_scale); - int roi_start_h = round(offset_input_rois[2] * spatial_scale); - int roi_end_w = round(offset_input_rois[3] * spatial_scale); - int roi_end_h = round(offset_input_rois[4] * spatial_scale); + int roi_batch_ind = roi_batch_id_data[n]; + int roi_start_w = round(offset_input_rois[0] * spatial_scale); + int roi_start_h = round(offset_input_rois[1] * spatial_scale); + int roi_end_w = round(offset_input_rois[2] * spatial_scale); + int roi_end_h = round(offset_input_rois[3] * spatial_scale); int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); @@ -93,7 +91,8 @@ __global__ void GPUROIPoolBackward( const int nthreads, const int64_t* input_rois, const T* output_grad, const int64_t* argmax_data, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, T* input_grad) { + const int pooled_height, const int pooled_width, int* roi_batch_id_data, + T* input_grad) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { @@ -102,8 +101,7 @@ __global__ void GPUROIPoolBackward( int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; - const int64_t* offset_input_rois = input_rois + n * kROISize; - int roi_batch_ind = offset_input_rois[0]; + int roi_batch_ind = roi_batch_id_data[n]; int input_offset = (roi_batch_ind * channels + c) * height * width; int output_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_output_grad = output_grad + output_offset; @@ -124,7 +122,7 @@ class GPUROIPoolOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* out = ctx.Output("Out"); auto* argmax = ctx.Output("Argmax"); @@ -133,23 +131,46 @@ class GPUROIPoolOpKernel : public framework::OpKernel { auto spatial_scale = ctx.Attr("spatial_scale"); auto in_dims = in->dims(); + int batch_size = in_dims[0]; auto in_stride = framework::stride(in_dims); int channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; - size_t rois_num = rois->dims()[0]; + int rois_num = rois->dims()[0]; if (rois_num == 0) return; int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; + framework::Tensor roi_batch_id_list; + roi_batch_id_list.Resize({rois_num}); + int* roi_batch_id_data = + roi_batch_id_list.mutable_data(platform::CPUPlace()); + auto rois_lod = rois->lod().back(); + int rois_batch_size = rois_lod.size() - 1; + PADDLE_ENFORCE_EQ( + rois_batch_size, batch_size, + "The rois_batch_size and imgs batch_size must be the same."); + int rois_num_with_lod = rois_lod[rois_batch_size]; + PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, + "The rois_num from input and lod must be the same."); + for (int n = 0; n < rois_batch_size; ++n) { + for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { + roi_batch_id_data[i] = n; + } + } + + framework::Tensor roi_batch_id_list_gpu; + framework::TensorCopy(roi_batch_id_list, ctx.GetPlace(), + ctx.device_context(), &roi_batch_id_list_gpu); + GPUROIPoolForward< T><<>>( output_size, in->data(), rois->data(), spatial_scale, channels, height, width, pooled_height, pooled_width, - out->mutable_data(ctx.GetPlace()), + roi_batch_id_list_gpu.data(), out->mutable_data(ctx.GetPlace()), argmax->mutable_data(ctx.GetPlace())); } }; @@ -159,7 +180,7 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* argmax = ctx.Input("Argmax"); auto* out_grad = ctx.Input(framework::GradVarName("Out")); @@ -169,12 +190,27 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel { auto pooled_width = ctx.Attr("pooled_width"); auto spatial_scale = ctx.Attr("spatial_scale"); - size_t rois_num = rois->dims()[0]; + int rois_num = rois->dims()[0]; int channels = in->dims()[1]; int height = in->dims()[2]; int width = in->dims()[3]; if (x_grad) { + framework::Tensor roi_batch_id_list; + roi_batch_id_list.Resize({rois_num}); + int* roi_batch_id_data = + roi_batch_id_list.mutable_data(platform::CPUPlace()); + auto rois_lod = rois->lod().back(); + int rois_batch_size = rois_lod.size() - 1; + for (int n = 0; n < rois_batch_size; ++n) { + for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { + roi_batch_id_data[i] = n; + } + } + framework::Tensor roi_batch_id_list_gpu; + framework::TensorCopy(roi_batch_id_list, ctx.GetPlace(), + ctx.device_context(), &roi_batch_id_list_gpu); + x_grad->mutable_data(ctx.GetPlace()); math::SetConstant set_zero; set_zero(ctx.cuda_device_context(), x_grad, static_cast(0)); @@ -189,6 +225,7 @@ class GPUROIPoolGradOpKernel : public framework::OpKernel { output_grad_size, rois->data(), out_grad->data(), argmax->data(), rois_num, spatial_scale, channels, height, width, pooled_height, pooled_width, + roi_batch_id_list_gpu.data(), x_grad->mutable_data(ctx.GetPlace())); } } diff --git a/paddle/fluid/operators/roi_pool_op.h b/paddle/fluid/operators/roi_pool_op.h index 54e0749031..c4f739b2c6 100644 --- a/paddle/fluid/operators/roi_pool_op.h +++ b/paddle/fluid/operators/roi_pool_op.h @@ -21,12 +21,14 @@ limitations under the License. */ namespace paddle { namespace operators { +static constexpr int kROISize = 4; + template class CPUROIPoolOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* out = ctx.Output("Out"); auto* argmax = ctx.Output("Argmax"); @@ -47,24 +49,36 @@ class CPUROIPoolOpKernel : public framework::OpKernel { auto out_stride = framework::stride(out->dims()); const T* input_data = in->data(); - const int64_t* rois_data = rois->data(); - T* output_data = out->mutable_data(ctx.GetPlace()); - int64_t* argmax_data = argmax->mutable_data(ctx.GetPlace()); - for (int n = 0; n < rois_num; ++n) { - int roi_batch_id = rois_data[0]; - PADDLE_ENFORCE_GE(roi_batch_id, 0); - PADDLE_ENFORCE_LT(roi_batch_id, batch_size); - rois_data += roi_stride[0]; + framework::Tensor roi_batch_id_list; + roi_batch_id_list.Resize({rois_num}); + int* roi_batch_id_data = + roi_batch_id_list.mutable_data(ctx.GetPlace()); + + auto rois_lod = rois->lod().back(); + int rois_batch_size = rois_lod.size() - 1; + PADDLE_ENFORCE_EQ( + rois_batch_size, batch_size, + "The rois_batch_size and imgs batch_size must be the same."); + int rois_num_with_lod = rois_lod[rois_batch_size]; + PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, + "The rois_num from input and lod must be the same."); + for (int n = 0; n < rois_batch_size; ++n) { + for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { + roi_batch_id_data[i] = n; + } } - rois_data = rois->data(); + T* output_data = out->mutable_data(ctx.GetPlace()); + int64_t* argmax_data = argmax->mutable_data(ctx.GetPlace()); + + const int64_t* rois_data = rois->data(); for (int n = 0; n < rois_num; ++n) { - int roi_batch_id = rois_data[0]; - int roi_start_w = round(rois_data[1] * spatial_scale); - int roi_start_h = round(rois_data[2] * spatial_scale); - int roi_end_w = round(rois_data[3] * spatial_scale); - int roi_end_h = round(rois_data[4] * spatial_scale); + int roi_batch_id = roi_batch_id_data[n]; + int roi_start_w = round(rois_data[0] * spatial_scale); + int roi_start_h = round(rois_data[1] * spatial_scale); + int roi_end_w = round(rois_data[2] * spatial_scale); + int roi_end_h = round(rois_data[3] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_height = std::max(roi_end_h - roi_start_h + 1, 1); @@ -133,7 +147,7 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input("X"); - auto* rois = ctx.Input("ROIs"); + auto* rois = ctx.Input("ROIs"); auto* argmax = ctx.Input("Argmax"); auto* out_grad = ctx.Input(framework::GradVarName("Out")); @@ -143,6 +157,20 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel { auto pooled_width = ctx.Attr("pooled_width"); if (in_grad) { + int rois_num = rois->dims()[0]; + framework::Tensor roi_batch_id_list; + roi_batch_id_list.Resize({rois_num}); + int* roi_batch_id_data = + roi_batch_id_list.mutable_data(ctx.GetPlace()); + + auto rois_lod = rois->lod().back(); + int rois_batch_size = rois_lod.size() - 1; + for (int n = 0; n < rois_batch_size; ++n) { + for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { + roi_batch_id_data[i] = n; + } + } + const int64_t* rois_data = rois->data(); const T* out_grad_data = out_grad->data(); const int64_t* argmax_data = argmax->data(); @@ -156,11 +184,10 @@ class CPUROIPoolGradOpKernel : public framework::OpKernel { auto roi_stride = framework::stride(rois->dims()); auto out_stride = framework::stride(out_grad->dims()); - int rois_num = rois->dims()[0]; int channels = in->dims()[1]; for (int n = 0; n < rois_num; ++n) { - int roi_batch_idx = rois_data[0]; + int roi_batch_idx = roi_batch_id_data[n]; T* batch_grad_data = in_grad_data + roi_batch_idx * in_stride[0]; for (int c = 0; c < channels; ++c) { for (int ph = 0; ph < pooled_height; ++ph) { diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 9a0c328033..7f16bf2a0c 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -79,6 +79,7 @@ __all__ = [ 'lrn', 'pad', 'label_smooth', + 'roi_pool', ] @@ -3759,3 +3760,53 @@ def label_smooth(label, outputs={"Out": smooth_label}, attrs={"epsilon": float(epsilon)}) return smooth_label + + +def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): + """ + Region of interest pooling (also known as RoI pooling) is to perform + is to perform max pooling on inputs of nonuniform sizes to obtain + fixed-size feature maps (e.g. 7*7). + The operator has three steps: + 1. Dividing each region proposal into equal-sized sections with + the pooled_width and pooled_height + 2. Finding the largest value in each section + 3. Copying these max values to the output buffer + + Args: + input (Variable): The input for ROI pooling. + rois (Variable): ROIs (Regions of Interest) to pool over. It should + be a 2-D one level LoTensor of shape [num_rois, 4]. + The layout is [x1, y1, x2, y2], where (x1, y1) + is the top left coordinates, and (x2, y2) is the + bottom right coordinates. The num_rois is the + total number of ROIs in this batch data. + pooled_height (integer): The pooled output height. Default: 1 + pooled_width (integer): The pooled output width. Default: 1 + spatial_scale (float): Multiplicative spatial scale factor. To + translate ROI coords from their input scale + to the scale used when pooling. Default: 1.0 + + Returns: + pool_out (Variable): The output is a 4-D tensor of the shape + (num_rois, channels, pooled_h, pooled_w). + + Examples: + pool_out = fluid.layers.roi_pool(input=x, rois=rois, 7, 7, 1.0) + """ + helper = LayerHelper('roi_pool', **locals()) + dtype = helper.input_dtype() + pool_out = helper.create_tmp_variable(dtype) + argmaxes = helper.create_tmp_variable(dtype='int32') + helper.append_op( + type="roi_pool", + inputs={"X": input, + "ROIs": rois}, + outputs={"Out": pool_out, + "Argmax": argmaxes}, + attrs={ + "pooled_height": pooled_height, + "pooled_width": pooled_width, + "spatial_scale": spatial_scale + }) + return pool_out diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 17d6afdee1..c5414abf0f 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -359,6 +359,16 @@ class TestBook(unittest.TestCase): self.assertIsNotNone(indices) print(str(program)) + def test_roi_pool(self): + program = Program() + with program_guard(program): + x = layers.data(name="x", shape=[256, 30, 30], dtype="float32") + rois = layers.data( + name="rois", shape=[4], dtype="float32", lod_level=1) + output = layers.roi_pool(x, rois, 7, 7, 0.6) + self.assertIsNotNone(output) + print(str(program)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py index e556d51b02..3d754aff3a 100644 --- a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py @@ -25,7 +25,7 @@ class TestROIPoolOp(OpTest): self.make_rois() self.calc_roi_pool() - self.inputs = {'X': self.x, 'ROIs': self.rois} + self.inputs = {'X': self.x, 'ROIs': (self.rois[:, 1:5], self.rois_lod)} self.attrs = { 'spatial_scale': self.spatial_scale, @@ -36,7 +36,7 @@ class TestROIPoolOp(OpTest): self.outputs = {'Out': self.outs, 'Argmax': self.argmaxes} def init_test_case(self): - self.batch_size = 5 + self.batch_size = 3 self.channels = 3 self.height = 6 self.width = 4 @@ -47,7 +47,6 @@ class TestROIPoolOp(OpTest): self.spatial_scale = 1.0 / 4.0 self.pooled_height = 2 self.pooled_width = 2 - self.rois_num = 2 self.x = np.random.random(self.x_dim).astype('float32') @@ -106,20 +105,24 @@ class TestROIPoolOp(OpTest): def make_rois(self): rois = [] - batch_ids = np.random.randint(0, self.batch_size, size=self.rois_num) - for i in range(self.rois_num): - x1 = np.random.random_integers( - 0, self.width / self.spatial_scale - self.pooled_width) - y1 = np.random.random_integers( - 0, self.height / self.spatial_scale - self.pooled_height) - - x2 = np.random.random_integers(x1 + self.pooled_width, - self.width / self.spatial_scale) - y2 = np.random.random_integers(y1 + self.pooled_height, - self.height / self.spatial_scale) - - roi = [batch_ids[i], x1, y1, x2, y2] - rois.append(roi) + self.rois_lod = [[]] + for bno in range(self.batch_size): + self.rois_lod[0].append(len(rois)) + for i in range(bno + 1): + x1 = np.random.random_integers( + 0, self.width / self.spatial_scale - self.pooled_width) + y1 = np.random.random_integers( + 0, self.height / self.spatial_scale - self.pooled_height) + + x2 = np.random.random_integers(x1 + self.pooled_width, + self.width / self.spatial_scale) + y2 = np.random.random_integers(y1 + self.pooled_height, + self.height / self.spatial_scale) + + roi = [bno, x1, y1, x2, y2] + rois.append(roi) + self.rois_lod[0].append(len(rois)) + self.rois_num = len(rois) self.rois = np.array(rois).astype("int64") def setUp(self): From 330fa95cbd5a7d5c175b7d947da86618a2713cb7 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 27 Apr 2018 11:07:26 +0800 Subject: [PATCH 61/72] Follow comments --- .../details/broadcast_op_handle_test.cc | 4 +- .../framework/details/fetch_op_handle.cc | 3 +- .../details/reduce_op_handle_test.cc | 6 +- paddle/fluid/framework/tensor_util.cc | 49 ++++++++++++--- paddle/fluid/framework/tensor_util.h | 5 +- paddle/fluid/operators/fetch_op.cc | 4 +- paddle/fluid/operators/math/im2col_test.cc | 16 +++-- .../operators/math/math_function_test.cu | 62 +++++++++---------- paddle/fluid/operators/math/vol2col_test.cc | 9 ++- .../reader/create_double_buffer_reader_op.cc | 3 +- paddle/fluid/operators/reshape_op.h | 8 +-- 11 files changed, 93 insertions(+), 76 deletions(-) diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.cc b/paddle/fluid/framework/details/broadcast_op_handle_test.cc index dec761399f..8f1b6d1615 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle_test.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.cc @@ -139,7 +139,7 @@ struct TestBroadcastOpHandle { PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal."); f::Tensor result_tensor; - f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor, true); + f::TensorCopySync(out_tensor, cpu_place, &result_tensor); float* ct = result_tensor.mutable_data(cpu_place); for (int64_t i = 0; i < f::product(kDims); ++i) { @@ -185,7 +185,7 @@ struct TestBroadcastOpHandle { } f::Tensor result_tensor; - f::TensorCopy(rt, cpu_place, *(ctxs_[j]), &result_tensor, true); + f::TensorCopySync(rt, cpu_place, &result_tensor); float* ct = result_tensor.data(); for (int64_t i = 0; i < f::product(kDims); ++i) { diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index 423449abff..1e8ca20b51 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -66,8 +66,7 @@ void FetchOpHandle::RunImpl() { auto &t = var->Get(); if (platform::is_gpu_place(t.place())) { #ifdef PADDLE_WITH_CUDA - TensorCopy(t, cpu, *dev_ctxes_[t.place()], &tensors_[i], true); - dev_ctxes_.at(t.place())->Wait(); + TensorCopySync(t, cpu, &tensors_[i]); #endif } else { tensors_[i].ShareDataWith(t); diff --git a/paddle/fluid/framework/details/reduce_op_handle_test.cc b/paddle/fluid/framework/details/reduce_op_handle_test.cc index c557875013..ffdd7c14eb 100644 --- a/paddle/fluid/framework/details/reduce_op_handle_test.cc +++ b/paddle/fluid/framework/details/reduce_op_handle_test.cc @@ -194,8 +194,7 @@ struct TestReduceOpHandle { } f::Tensor result_tensor; - f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor, - true); + f::TensorCopySync(rt, cpu_place, &result_tensor); float *ct = result_tensor.data(); for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { @@ -240,8 +239,7 @@ struct TestReduceOpHandle { auto &rt = out_var->Get(); f::Tensor result_tensor; - f::TensorCopy(rt, cpu_place, *(ctxs_[output_scope_idx]), &result_tensor, - true); + f::TensorCopySync(rt, cpu_place, &result_tensor); float *ct = result_tensor.data(); for (int64_t j = 0; j < f::product(result_tensor.dims()); ++j) { diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index d2e60ab1dd..e5bc74755f 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -20,7 +20,7 @@ namespace paddle { namespace framework { void TensorCopy(const Tensor& src, const platform::Place& dst_place, - const platform::DeviceContext& ctx, Tensor* dst, bool sync) { + const platform::DeviceContext& ctx, Tensor* dst) { VLOG(3) << "TensorCopy " << src.dims() << " from " << src.place() << " to " << dst_place; src.check_memory_size(); @@ -48,9 +48,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, auto ctx_gpu_place = boost::get(ctx_place); PADDLE_ENFORCE_EQ(src_gpu_place, ctx_gpu_place); auto stream = - sync ? nullptr - : reinterpret_cast(ctx) - .stream(); + reinterpret_cast(ctx).stream(); memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream); } else if (platform::is_cpu_place(src_place) && platform::is_gpu_place(dst_place)) { @@ -61,9 +59,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, auto ctx_gpu_place = boost::get(ctx_place); PADDLE_ENFORCE_EQ(dst_gpu_place, ctx_gpu_place); auto stream = - sync ? nullptr - : reinterpret_cast(ctx) - .stream(); + reinterpret_cast(ctx).stream(); memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, stream); } else if (platform::is_gpu_place(src_place) && platform::is_gpu_place(dst_place)) { @@ -72,9 +68,7 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, auto ctx_place = ctx.GetPlace(); PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); auto stream = - sync ? nullptr - : reinterpret_cast(ctx) - .stream(); + reinterpret_cast(ctx).stream(); memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream); } #endif @@ -92,6 +86,41 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, TensorCopy(src, dst_place, *dev_ctx, dst); } +void TensorCopySync(const Tensor& src, const platform::Place& dst_place, + Tensor* dst) { + VLOG(3) << "TensorCopySync " << src.dims() << " from " << src.place() + << " to " << dst_place; + src.check_memory_size(); + dst->Resize(src.dims()); + dst->set_layout(src.layout()); + auto src_place = src.place(); + auto src_ptr = src.data(); + auto dst_ptr = dst->mutable_data(dst_place, src.type()); + auto size = src.numel() * SizeOfType(src.type()); + if (platform::is_cpu_place(src_place) && platform::is_cpu_place(dst_place)) { + memory::Copy(boost::get(dst_place), dst_ptr, + boost::get(src_place), src_ptr, size); + } +#ifdef PADDLE_WITH_CUDA + else if (platform::is_gpu_place(src_place) && // NOLINT + platform::is_cpu_place(dst_place)) { + auto src_gpu_place = boost::get(src_place); + auto dst_cpu_place = boost::get(dst_place); + memory::Copy(dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr); + } else if (platform::is_cpu_place(src_place) && + platform::is_gpu_place(dst_place)) { + auto src_cpu_place = boost::get(src_place); + auto dst_gpu_place = boost::get(dst_place); + memory::Copy(dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size, nullptr); + } else if (platform::is_gpu_place(src_place) && + platform::is_gpu_place(dst_place)) { + auto src_gpu_place = boost::get(src_place); + auto dst_gpu_place = boost::get(dst_place); + memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, nullptr); + } +#endif +} + template struct AnyDTypeVisitor { Predicate predicate_; diff --git a/paddle/fluid/framework/tensor_util.h b/paddle/fluid/framework/tensor_util.h index 3af68402dc..dca279b693 100644 --- a/paddle/fluid/framework/tensor_util.h +++ b/paddle/fluid/framework/tensor_util.h @@ -24,10 +24,11 @@ namespace paddle { namespace framework { void TensorCopy(const Tensor& src, const platform::Place& dst_place, - const platform::DeviceContext& ctx, Tensor* dst, - bool sync = false); + const platform::DeviceContext& ctx, Tensor* dst); void TensorCopy(const Tensor& src, const platform::Place& dst_place, Tensor* dst); +void TensorCopySync(const Tensor& src, const platform::Place& dst_place, + Tensor* dst); template void TensorFromVector(const std::vector& src, diff --git a/paddle/fluid/operators/fetch_op.cc b/paddle/fluid/operators/fetch_op.cc index 462d518120..18deec5813 100644 --- a/paddle/fluid/operators/fetch_op.cc +++ b/paddle/fluid/operators/fetch_op.cc @@ -57,9 +57,7 @@ class FetchOp : public framework::OperatorBase { // FIXME(yuyang18): Should we assume the fetch operator always generate // CPU outputs? - auto &dev_ctx = *pool.Get(src_item.place()); - - TensorCopy(src_item, platform::CPUPlace(), dev_ctx, &dst_item, true); + TensorCopySync(src_item, platform::CPUPlace(), &dst_item); dst_item.set_lod(src_item.lod()); VLOG(3) << "Fetch variable " << fetch_var_name << " to " << out_name; diff --git a/paddle/fluid/operators/math/im2col_test.cc b/paddle/fluid/operators/math/im2col_test.cc index b09f95e0b2..887c64b32a 100644 --- a/paddle/fluid/operators/math/im2col_test.cc +++ b/paddle/fluid/operators/math/im2col_test.cc @@ -62,7 +62,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input, true); + TensorCopySync(input_tmp, *place, &input); } output_cfo.mutable_data( {1, filter_size, filter_size, output_height, output_width}, *place); @@ -87,8 +87,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { out_cfo_ptr = output_cfo.data(); } else { - TensorCopy(output_cfo, paddle::platform::CPUPlace(), *context, &output_tmp, - true); + TensorCopySync(output_cfo, paddle::platform::CPUPlace(), &output_tmp); out_cfo_ptr = output_tmp.data(); } for (int i = 0; i < 6; ++i) { @@ -99,8 +98,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { out_ocf_ptr = output_ocf.data(); } else { - TensorCopy(output_ocf, paddle::platform::CPUPlace(), *context, &output_tmp, - true); + TensorCopySync(output_ocf, paddle::platform::CPUPlace(), &output_tmp); out_ocf_ptr = output_tmp.data(); } @@ -121,7 +119,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input, true); + TensorCopySync(input_tmp, *place, &input); } col2im(*context, output_cfo, dilation, stride, padding, &input); @@ -130,7 +128,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp, true); + TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp); in_ptr = input_tmp.data(); } for (int i = 0; i < 6; ++i) { @@ -142,7 +140,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input, true); + TensorCopySync(input_tmp, *place, &input); } col2im_ocf(*context, output_ocf, dilation, stride, padding, &input); @@ -150,7 +148,7 @@ void testIm2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp, true); + TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp); in_ptr = input_tmp.data(); } for (int i = 0; i < 6; ++i) { diff --git a/paddle/fluid/operators/math/math_function_test.cu b/paddle/fluid/operators/math/math_function_test.cu index ccfe0c6c07..7986326e96 100644 --- a/paddle/fluid/operators/math/math_function_test.cu +++ b/paddle/fluid/operators/math/math_function_test.cu @@ -40,15 +40,15 @@ TEST(math_function, notrans_mul_trans_fp32) { float arr[6] = {0, 1, 2, 3, 4, 5}; memcpy(input1_ptr, arr, 6 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu, true); - TensorCopy(input1, gpu_place, context, &input2_gpu, true); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input1, gpu_place, &input2_gpu); out_gpu.mutable_data({2, 2}, gpu_place); paddle::operators::math::matmul( context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0); - TensorCopy(out_gpu, cpu_place, context, &out, true); + TensorCopySync(out_gpu, cpu_place, &out); float* out_ptr = out.data(); context.Wait(); @@ -80,8 +80,8 @@ TEST(math_function, notrans_mul_trans_fp16) { float16* input1_ptr = input1.mutable_data({2, 3}, cpu_place); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); - TensorCopy(input1, gpu_place, context, &input1_gpu, true); - TensorCopy(input1, gpu_place, context, &input2_gpu, true); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input1, gpu_place, &input2_gpu); out_gpu.mutable_data({2, 2}, gpu_place); @@ -89,7 +89,7 @@ TEST(math_function, notrans_mul_trans_fp16) { context, input1_gpu, false, input2_gpu, true, float16(1), &out_gpu, float16(0)); - TensorCopy(out_gpu, cpu_place, context, &out, true); + TensorCopySync(out_gpu, cpu_place, &out); float16* out_ptr = out.data(); context.Wait(); @@ -117,15 +117,15 @@ TEST(math_function, trans_mul_notrans_fp32) { float arr[6] = {0, 1, 2, 3, 4, 5}; memcpy(input1_ptr, arr, 6 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu, true); - TensorCopy(input1, gpu_place, context, &input2_gpu, true); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input1, gpu_place, &input2_gpu); out_gpu.mutable_data({3, 3}, gpu_place); paddle::operators::math::matmul( context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0); - TensorCopy(out_gpu, cpu_place, context, &out, true); + TensorCopySync(out_gpu, cpu_place, &out); float* out_ptr = out.data(); context.Wait(); @@ -162,8 +162,8 @@ TEST(math_function, trans_mul_notrans_fp16) { float16* input1_ptr = input1.mutable_data({2, 3}, cpu_place); fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5}); - TensorCopy(input1, gpu_place, context, &input1_gpu, true); - TensorCopy(input1, gpu_place, context, &input2_gpu, true); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input1, gpu_place, &input2_gpu); out_gpu.mutable_data({3, 3}, gpu_place); @@ -171,7 +171,7 @@ TEST(math_function, trans_mul_notrans_fp16) { context, input1_gpu, true, input2_gpu, false, float16(1), &out_gpu, float16(0)); - TensorCopy(out_gpu, cpu_place, context, &out, true); + TensorCopySync(out_gpu, cpu_place, &out); float16* out_ptr = out.data(); context.Wait(); @@ -214,9 +214,9 @@ TEST(math_function, gemm_notrans_cublas_fp32) { float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; memcpy(input3_ptr, arr3, 8 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu, true); - TensorCopy(input2, gpu_place, context, &input2_gpu, true); - TensorCopy(input3, gpu_place, context, &input3_gpu, true); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input2, gpu_place, &input2_gpu); + TensorCopySync(input3, gpu_place, &input3_gpu); float* a = input1_gpu.data(); float* b = input2_gpu.data(); float* c = input3_gpu.mutable_data(gpu_place); @@ -224,7 +224,7 @@ TEST(math_function, gemm_notrans_cublas_fp32) { paddle::operators::math::gemm( context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3, true); + TensorCopySync(input3_gpu, cpu_place, &input3); // numpy code: // a = np.arange(6).reshape(2, 3) @@ -274,9 +274,9 @@ TEST(math_function, gemm_notrans_cublas_fp16) { float16* input3_ptr = input3.mutable_data({2, 4}, cpu_place); fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7}); - TensorCopy(input1, gpu_place, context, &input1_gpu, true); - TensorCopy(input2, gpu_place, context, &input2_gpu, true); - TensorCopy(input3, gpu_place, context, &input3_gpu, true); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input2, gpu_place, &input2_gpu); + TensorCopySync(input3, gpu_place, &input3_gpu); float16* a = input1_gpu.data(); float16* b = input2_gpu.data(); float16* c = input3_gpu.mutable_data(gpu_place); @@ -285,7 +285,7 @@ TEST(math_function, gemm_notrans_cublas_fp16) { context, false, false, m, n, k, float16(1), a, 3, b + 1, 4, float16(1), c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3, true); + TensorCopySync(input3_gpu, cpu_place, &input3); // numpy code: // a = np.arange(6).reshape(2, 3) @@ -332,9 +332,9 @@ TEST(math_function, gemm_trans_cublas_fp32) { float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7}; memcpy(input3_ptr, arr3, 8 * sizeof(float)); - TensorCopy(input1, gpu_place, context, &input1_gpu, true); - TensorCopy(input2, gpu_place, context, &input2_gpu, true); - TensorCopy(input3, gpu_place, context, &input3_gpu, true); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input2, gpu_place, &input2_gpu); + TensorCopySync(input3, gpu_place, &input3_gpu); float* a = input1_gpu.data(); float* b = input2_gpu.data(); float* c = input3_gpu.mutable_data(gpu_place); @@ -342,7 +342,7 @@ TEST(math_function, gemm_trans_cublas_fp32) { paddle::operators::math::gemm( context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3, true); + TensorCopySync(input3_gpu, cpu_place, &input3); context.Wait(); EXPECT_EQ(input3_ptr[0], 0); @@ -386,9 +386,9 @@ TEST(math_function, gemm_trans_cublas_fp16) { float16* input3_ptr = input3.mutable_data({2, 4}, cpu_place); fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7}); - TensorCopy(input1, gpu_place, context, &input1_gpu, true); - TensorCopy(input2, gpu_place, context, &input2_gpu, true); - TensorCopy(input3, gpu_place, context, &input3_gpu, true); + TensorCopySync(input1, gpu_place, &input1_gpu); + TensorCopySync(input2, gpu_place, &input2_gpu); + TensorCopySync(input3, gpu_place, &input3_gpu); float16* a = input1_gpu.data(); float16* b = input2_gpu.data(); float16* c = input3_gpu.mutable_data(gpu_place); @@ -397,7 +397,7 @@ TEST(math_function, gemm_trans_cublas_fp16) { context, false, true, m, n, k, float16(1), a, 3, b + 3, 3, float16(1), c + 1, 4); - TensorCopy(input3_gpu, cpu_place, context, &input3, true); + TensorCopySync(input3_gpu, cpu_place, &input3); context.Wait(); EXPECT_EQ(static_cast(input3_ptr[0]), 0); @@ -441,14 +441,14 @@ void GemvTest(int m, int n, bool trans) { data_b[i] = static_cast(i); } - TensorCopy(mat_a, gpu_place, context, &g_mat_a, true); - TensorCopy(vec_b, gpu_place, context, &g_vec_b, true); + TensorCopySync(mat_a, gpu_place, &g_mat_a); + TensorCopySync(vec_b, gpu_place, &g_vec_b); paddle::operators::math::gemv( context, trans, static_cast(m), static_cast(n), 1., g_data_a, g_data_b, 0., g_data_c); - TensorCopy(g_vec_c, cpu_place, context, &vec_c, true); + TensorCopySync(g_vec_c, cpu_place, &vec_c); if (!trans) { for (int i = 0; i < m; ++i) { diff --git a/paddle/fluid/operators/math/vol2col_test.cc b/paddle/fluid/operators/math/vol2col_test.cc index 9de47dcc25..1b9551b6f2 100644 --- a/paddle/fluid/operators/math/vol2col_test.cc +++ b/paddle/fluid/operators/math/vol2col_test.cc @@ -71,7 +71,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - paddle::framework::TensorCopy(input_tmp, *place, *context, &input, true); + paddle::framework::TensorCopySync(input_tmp, *place, &input); } output.mutable_data({1, filter_size, filter_size, filter_size, output_depth, output_height, output_width}, @@ -85,8 +85,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { out_cfo_ptr = output.data(); } else { - TensorCopy(output, paddle::platform::CPUPlace(), *context, &output_tmp, - true); + TensorCopySync(output, paddle::platform::CPUPlace(), &output_tmp); out_cfo_ptr = output_tmp.data(); } @@ -100,7 +99,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { input = input_tmp; } else { - TensorCopy(input_tmp, *place, *context, &input, true); + TensorCopySync(input_tmp, *place, &input); } paddle::operators::math::Col2VolFunctor col2vol; @@ -110,7 +109,7 @@ void testVol2col() { if (paddle::platform::is_cpu_place(*place)) { in_ptr = input.data(); } else { - TensorCopy(input, paddle::platform::CPUPlace(), *context, &input_tmp, true); + TensorCopySync(input, paddle::platform::CPUPlace(), &input_tmp); in_ptr = input_tmp.data(); } diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index 4372f23fc1..afbe562345 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -180,8 +180,7 @@ void DoubleBufferReader::PrefetchThreadFunc() { auto* gpu_ctx = ctxs_[cached_tensor_id].get(); gpu_batch.resize(cpu_batch.size()); for (size_t i = 0; i < cpu_batch.size(); ++i) { - framework::TensorCopy(cpu_batch[i], place_, *gpu_ctx, &gpu_batch[i], - true); + framework::TensorCopySync(cpu_batch[i], place_, &gpu_batch[i]); gpu_batch[i].set_lod(cpu_batch[i].lod()); } } diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h index 8320c257c9..987b43cc92 100644 --- a/paddle/fluid/operators/reshape_op.h +++ b/paddle/fluid/operators/reshape_op.h @@ -124,10 +124,8 @@ class ReshapeKernel : public framework::OpKernel { auto *shape_data = shape_tensor->data(); framework::Tensor cpu_shape_tensor; if (platform::is_gpu_place(ctx.GetPlace())) { - TensorCopy(*shape_tensor, platform::CPUPlace(), ctx.device_context(), - &cpu_shape_tensor); + TensorCopySync(*shape_tensor, platform::CPUPlace(), &cpu_shape_tensor); shape_data = cpu_shape_tensor.data(); - ctx.device_context().Wait(); } auto shape = std::vector(shape_data, shape_data + shape_tensor->numel()); @@ -146,9 +144,7 @@ class ReshapeKernel : public framework::OpKernel { out->Resize(out_dims); if (!inplace) { out->mutable_data(ctx.GetPlace()); - framework::TensorCopy(*in, ctx.GetPlace(), ctx.device_context(), out); - ctx.device_context().Wait(); - // TensorCopy will resize to in_dims. + framework::TensorCopySync(*in, ctx.GetPlace(), out); out->Resize(out_dims); } else { out->ShareDataWith(*in); From 30f9dc92e54493621d817b859e513e9e4196b1d7 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 27 Apr 2018 11:49:40 +0800 Subject: [PATCH 62/72] fix errors --- paddle/fluid/operators/math/concat_test.cc | 24 +++++++++---------- .../reader/create_double_buffer_reader_op.cc | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/paddle/fluid/operators/math/concat_test.cc b/paddle/fluid/operators/math/concat_test.cc index 854a8ee442..19d056fa54 100644 --- a/paddle/fluid/operators/math/concat_test.cc +++ b/paddle/fluid/operators/math/concat_test.cc @@ -72,8 +72,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a, true); - TensorCopy(input_b_cpu, Place(), *context, &input_b, true); + TensorCopySync(input_a_cpu, Place(), &input_a); + TensorCopySync(input_b_cpu, Place(), &input_b); } std::vector input; @@ -89,7 +89,7 @@ void testConcat() { int* out_ptr; if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu, true); + TensorCopySync(out, CPUPlace(), &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -144,8 +144,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a, true); - TensorCopy(input_b_cpu, Place(), *context, &input_b, true); + TensorCopySync(input_a_cpu, Place(), &input_a); + TensorCopySync(input_b_cpu, Place(), &input_b); } input.clear(); @@ -159,7 +159,7 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu, true); + TensorCopySync(out, CPUPlace(), &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -216,8 +216,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a, true); - TensorCopy(input_b_cpu, Place(), *context, &input_b, true); + TensorCopySync(input_a_cpu, Place(), &input_a); + TensorCopySync(input_b_cpu, Place(), &input_b); } input.clear(); @@ -231,7 +231,7 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu, true); + TensorCopySync(out, CPUPlace(), &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -290,8 +290,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(input_a_cpu, Place(), *context, &input_a, true); - TensorCopy(input_b_cpu, Place(), *context, &input_b, true); + TensorCopySync(input_a_cpu, Place(), &input_a); + TensorCopySync(input_b_cpu, Place(), &input_b); } input.clear(); @@ -305,7 +305,7 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopy(out, CPUPlace(), *context, &out_cpu, true); + TensorCopySync(out, CPUPlace(), &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index afbe562345..d2831d31af 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -177,9 +177,9 @@ void DoubleBufferReader::PrefetchThreadFunc() { } if (platform::is_gpu_place(place_)) { auto& gpu_batch = gpu_tensor_cache_[cached_tensor_id]; - auto* gpu_ctx = ctxs_[cached_tensor_id].get(); gpu_batch.resize(cpu_batch.size()); for (size_t i = 0; i < cpu_batch.size(); ++i) { + // TODO(fengjiayi): Use asynchronous TensorCopy instead framework::TensorCopySync(cpu_batch[i], place_, &gpu_batch[i]); gpu_batch[i].set_lod(cpu_batch[i].lod()); } From 76c4ae856f3acc44ca427aeaaf35453a0f8407ae Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Fri, 27 Apr 2018 15:22:21 +0800 Subject: [PATCH 63/72] Fix reshape op. (#10253) --- paddle/fluid/operators/reshape_op.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h index 8320c257c9..44a91ebd7c 100644 --- a/paddle/fluid/operators/reshape_op.h +++ b/paddle/fluid/operators/reshape_op.h @@ -93,8 +93,14 @@ class ReshapeOp : public framework::OperatorWithKernel { if (unk_dim_idx != -1) { output_shape[unk_dim_idx] = -in_size / capacity; - PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size, - "Invalid shape is given."); + // in_size < 0 and is un-determinate in compile time, skip the check, + // for example, in_dims = [-1, 8, 1, 1], shape = [-1, 3, 8], + // capacity = -24, in_size = -8, output_shape[0] = 0 + // the following check will fail. + if (in_size > 0) { + PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size, + "Invalid shape is given."); + } } else { PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given."); } From c2cbd1d4db4ae7425bf2972f923b477e8b4a14f2 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Fri, 27 Apr 2018 15:37:12 +0800 Subject: [PATCH 64/72] fix nccl2 version in manylinux devel docker image --- tools/manylinux1/build_scripts/install_nccl2.sh | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tools/manylinux1/build_scripts/install_nccl2.sh b/tools/manylinux1/build_scripts/install_nccl2.sh index 7efc1fe865..109ef32a99 100644 --- a/tools/manylinux1/build_scripts/install_nccl2.sh +++ b/tools/manylinux1/build_scripts/install_nccl2.sh @@ -1,11 +1,18 @@ #!/bin/bash -DEB="nccl-repo-ubuntu1604-2.1.4-ga-cuda8.0_1-1_amd64.deb" +VERSION=$(nvcc --version | grep release | grep -oEi "release ([0-9]+)\.([0-9])"| sed "s/release //") +if [ "$VERSION" == "9.0" ]; then + DEB="nccl-repo-ubuntu1604-2.1.15-ga-cuda9.0_1-1_amd64.deb" + URL="http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.15-ga-cuda9.0_1-1_amd64.deb" +else + DEB="nccl-repo-ubuntu1604-2.1.4-ga-cuda8.0_1-1_amd64.deb" + URL="http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.4-ga-cuda8.0_1-1_amd64.deb?responseContentDisposition=attachment" +fi + DIR="/nccl2" mkdir -p $DIR # we cached the nccl2 deb package in BOS, so we can download it with wget # install nccl2: http://docs.nvidia.com/deeplearning/sdk/nccl-install-guide/index.html#down -wget -O $DIR/$DEB \ - "http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.4-ga-cuda8.0_1-1_amd64.deb?responseContentDisposition=attachment" +wget -O $DIR/$DEB $URL cd $DIR && ar x $DEB && tar xf data.tar.xz DEBS=$(find ./var/ -name "*.deb") From 8806801eb8064a53e6c17685051615f829a1f45b Mon Sep 17 00:00:00 2001 From: robot <2466956298@qq.com> Date: Wed, 25 Apr 2018 16:57:56 +0800 Subject: [PATCH 65/72] [fix assign op] using assign op instead of scale1.0 --- python/paddle/fluid/layers/tensor.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index da066c34bd..5b8f55db72 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -193,10 +193,9 @@ def assign(input, output): helper = LayerHelper('assign', **locals()) if isinstance(input, Variable): helper.append_op( - type='scale', + type='assign', inputs={'X': [input]}, outputs={'Out': [output]}, - attrs={'scale': 1.0}) elif isinstance(input, numpy.ndarray): dtype = convert_np_dtype_to_dtype_(input.dtype) if dtype == VarDesc.VarType.FP32: From 19a28b1611d72c2501deaee2a66b5e0836d6f1f1 Mon Sep 17 00:00:00 2001 From: robot <2466956298@qq.com> Date: Wed, 25 Apr 2018 17:03:58 +0800 Subject: [PATCH 66/72] [fix assign op]/2 using assign op instead of scale1.0 --- python/paddle/fluid/layers/tensor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 5b8f55db72..1cbef96a17 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -195,7 +195,7 @@ def assign(input, output): helper.append_op( type='assign', inputs={'X': [input]}, - outputs={'Out': [output]}, + outputs={'Out': [output]}) elif isinstance(input, numpy.ndarray): dtype = convert_np_dtype_to_dtype_(input.dtype) if dtype == VarDesc.VarType.FP32: From ac2911c399a019ab440d5cc73d081897090ad3f7 Mon Sep 17 00:00:00 2001 From: robot <2466956298@qq.com> Date: Wed, 25 Apr 2018 17:28:30 +0800 Subject: [PATCH 67/72] fix code style --- python/paddle/fluid/layers/tensor.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 1cbef96a17..4be0dc6a6b 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -193,9 +193,7 @@ def assign(input, output): helper = LayerHelper('assign', **locals()) if isinstance(input, Variable): helper.append_op( - type='assign', - inputs={'X': [input]}, - outputs={'Out': [output]}) + type='assign', inputs={'X': [input]}, outputs={'Out': [output]}) elif isinstance(input, numpy.ndarray): dtype = convert_np_dtype_to_dtype_(input.dtype) if dtype == VarDesc.VarType.FP32: From 31373370ceb020001b66de5dbee0566bef9a6cd2 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 27 Apr 2018 16:14:10 +0800 Subject: [PATCH 68/72] fix mac compile errors --- paddle/utils/tests/test_CustomStackTrace.cpp | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/paddle/utils/tests/test_CustomStackTrace.cpp b/paddle/utils/tests/test_CustomStackTrace.cpp index c320074fba..4d5540b24c 100644 --- a/paddle/utils/tests/test_CustomStackTrace.cpp +++ b/paddle/utils/tests/test_CustomStackTrace.cpp @@ -12,10 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include - -#include -#include +#include // NOLINT +#include // NOLINT #include "paddle/utils/CustomStackTrace.h" #include "paddle/utils/Locks.h" @@ -39,14 +37,10 @@ void testNormalImpl( threads.reserve(FLAGS_test_thread_num); for (int32_t i = 0; i < FLAGS_test_thread_num; ++i) { - threads.emplace_back(new std::thread([&tracer, - &countDown, - &layerSize, - &startBarrier, - &doneBarrier, - &callback] { - callback(tracer, countDown, layerSize, startBarrier, doneBarrier); - })); + threads.emplace_back( + new std::thread([&tracer, &startBarrier, &doneBarrier, &callback] { + callback(tracer, countDown, layerSize, startBarrier, doneBarrier); + })); } size_t cntDown = countDown; while (cntDown-- > 0) { From d2f54e9f724b268bc50c7e4eacab3ce9e6d412e2 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Fri, 27 Apr 2018 16:32:57 +0800 Subject: [PATCH 69/72] update --- tools/manylinux1/build_scripts/install_nccl2.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/manylinux1/build_scripts/install_nccl2.sh b/tools/manylinux1/build_scripts/install_nccl2.sh index 109ef32a99..282c5c290d 100644 --- a/tools/manylinux1/build_scripts/install_nccl2.sh +++ b/tools/manylinux1/build_scripts/install_nccl2.sh @@ -4,8 +4,8 @@ if [ "$VERSION" == "9.0" ]; then DEB="nccl-repo-ubuntu1604-2.1.15-ga-cuda9.0_1-1_amd64.deb" URL="http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.15-ga-cuda9.0_1-1_amd64.deb" else - DEB="nccl-repo-ubuntu1604-2.1.4-ga-cuda8.0_1-1_amd64.deb" - URL="http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.4-ga-cuda8.0_1-1_amd64.deb?responseContentDisposition=attachment" + DEB="nccl-repo-ubuntu1604-2.1.15-ga-cuda8.0_1-1_amd64.deb" + URL="http://nccl2-deb.gz.bcebos.com/nccl-repo-ubuntu1604-2.1.15-ga-cuda8.0_1-1_amd64.deb" fi DIR="/nccl2" From 9612c7e599cef15a9641fa69a1cb78809977b549 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 27 Apr 2018 16:47:27 +0800 Subject: [PATCH 70/72] Add comments and polish code --- .../details/multi_devices_graph_builder.cc | 58 +++++++++---------- .../details/multi_devices_graph_builder.h | 11 +++- paddle/fluid/framework/details/ssa_graph.h | 10 ++++ .../framework/details/ssa_graph_builder.h | 2 + 4 files changed, 51 insertions(+), 30 deletions(-) diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index 3413467b14..c2eb1c31b4 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -58,23 +58,20 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result, const OpDesc &op, - const platform::Place &p, - const size_t &i) const { + size_t place_id) const { + auto p = places_[place_id]; auto *op_handle = result->ops_.back().get(); op_handle->SetDeviceContext(p, platform::DeviceContextPool::Instance().Get(p)); - auto var_names = op.InputArgumentNames(); - - for (auto &each_var_name : var_names) { - VarHandle *var = CreateOrGetLatestVarHandle(result, each_var_name, p, i); + for (auto &each_var_name : op.InputArgumentNames()) { + VarHandle *var = + CreateOrGetLatestVarHandle(result, each_var_name, p, place_id); op_handle->AddInput(var); } - var_names = op.OutputArgumentNames(); - - for (auto &each_var_name : var_names) { - CreateOpOutput(result, op_handle, each_var_name, p, i); + for (auto &each_var_name : op.OutputArgumentNames()) { + CreateOpOutput(result, op_handle, each_var_name, p, place_id); } } @@ -84,17 +81,18 @@ bool MultiDevSSAGraphBuilder::IsDistTrainOp(const OpDesc &op, return false; } - auto checker = [&](const std::vector opvars, - const std::vector sendvars) -> bool { - bool is_dist_train_op = false; + /** + * Check any of opvars contains `.block` and in sendvars + */ + auto checker = [](const std::vector &opvars, + const std::vector &sendvars) -> bool { for (auto &var : opvars) { if (var.find(".block") != std::string::npos && std::find(sendvars.begin(), sendvars.end(), var) != sendvars.end()) { - is_dist_train_op = true; - break; + return true; } } - return is_dist_train_op; + return false; }; if (op.Type() == "split") { @@ -117,13 +115,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( places_.size()); // Find "send" op first for split is in front of send. - OpDesc *send_op = nullptr; - for (auto *op : program.Block(0).AllOps()) { - if (op->Type() == "send") { - send_op = op; - break; - } - } + OpDesc *send_op = GetSendOpDesc(program); bool is_forwarding = true; for (auto *op : program.Block(0).AllOps()) { @@ -134,6 +126,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( } else if (IsDistTrainOp(*op, send_op)) { CreateComputationalOps(&result, *op, 1); } else if (IsScaleLossOp(*op)) { + // user can customize loss@grad if skip_scale_loss_ if (!skip_scale_loss_) { CreateScaleLossGradOp(&result); } @@ -142,10 +135,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( CreateComputationalOps(&result, *op, places_.size()); if (!is_forwarding) { // Currently, we assume that once gradient is generated, it can be - // broadcast, and each gradient is only broadcast once. But there are no - // other cases, for example, we need to adjust the gradient according to - // the input when we get the gradient, which is not considered at - // present. + // broadcast, and each gradient is only broadcast once. for (auto &og : op->OutputArgumentNames()) { if (IsParameterGradientOnce(og, &og_has_been_broadcast)) { InsertNCCLAllReduceOp(&result, og); @@ -175,6 +165,16 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( return std::unique_ptr(graph); } +OpDesc *MultiDevSSAGraphBuilder::GetSendOpDesc( + const ProgramDesc &program) const { + for (auto *op : program.Block(0).AllOps()) { + if (op->Type() == "send") { + return op; + } + } + return nullptr; +} + void MultiDevSSAGraphBuilder::InsertNCCLAllReduceOp( SSAGraph *result, const std::string &og) const { #ifdef PADDLE_WITH_CUDA @@ -243,7 +243,7 @@ void MultiDevSSAGraphBuilder::CreateComputationalOps(SSAGraph *result, auto p = places_[scope_idx]; auto s = local_scopes_[scope_idx]; result->ops_.emplace_back(new ComputationOpHandle(op, s, p)); - CreateOpHandleIOs(result, op, p, scope_idx); + CreateOpHandleIOs(result, op, scope_idx); } } @@ -255,7 +255,7 @@ void MultiDevSSAGraphBuilder::CreateSendOp(SSAGraph *result, result->ops_.emplace_back(new SendOpHandle(op, s, p)); // Create inputs for output on original place and no ssa output // is created for send op. - CreateOpHandleIOs(result, op, p, 0); + CreateOpHandleIOs(result, op, 0); } bool MultiDevSSAGraphBuilder::IsScaleLossOp(const OpDesc &op) const { diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.h b/paddle/fluid/framework/details/multi_devices_graph_builder.h index dc3da70eda..fa4d31bdc4 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.h +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.h @@ -48,7 +48,7 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { private: void CreateOpHandleIOs(SSAGraph *result, const OpDesc &op, - const platform::Place &p, const size_t &i) const; + size_t place_id) const; private: std::string loss_var_name_; @@ -65,6 +65,9 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { void CreateSendOp(SSAGraph *result, const OpDesc &op) const; + /** + * Is this operator as the end-point operator before/after send operator. + */ bool IsDistTrainOp(const OpDesc &op, OpDesc *send_op) const; void CreateComputationalOps(SSAGraph *result, const OpDesc &op, @@ -77,6 +80,12 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { std::unordered_set *og_has_been_broadcast) const; void InsertNCCLAllReduceOp(SSAGraph *result, const std::string &og) const; + + /** + * Get send op in the global block of program. + * nullptr if not found. + */ + OpDesc *GetSendOpDesc(const ProgramDesc &program) const; }; } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/ssa_graph.h b/paddle/fluid/framework/details/ssa_graph.h index 72684e7f97..e996a00c16 100644 --- a/paddle/fluid/framework/details/ssa_graph.h +++ b/paddle/fluid/framework/details/ssa_graph.h @@ -25,12 +25,22 @@ namespace paddle { namespace framework { namespace details { +// A SSA graph used by parallel executor. struct SSAGraph { + // all variable in each devices. + // The outside vector is the device vector. Each element of this vector is a + // map from variable name to variables. The variables, who have the same name, + // will have a different version. The offset in the + // `std::vector>` is the version of varaibles. std::vector< std::unordered_map>>> vars_; + // aux variables to represent dependency. Useful to resolve data hazard. std::unordered_set> dep_vars_; + + // all operators. NOTE that even we use a vector here, the operators is + // unordered. std::vector> ops_; }; diff --git a/paddle/fluid/framework/details/ssa_graph_builder.h b/paddle/fluid/framework/details/ssa_graph_builder.h index be1f0460e4..64e5d93081 100644 --- a/paddle/fluid/framework/details/ssa_graph_builder.h +++ b/paddle/fluid/framework/details/ssa_graph_builder.h @@ -48,6 +48,8 @@ class SSAGraphBuilder { const platform::Place &place, size_t place_offset); + // Add an output variable (each_var_name, place, place_offset) to op_handle, + // which belongs to graph static void CreateOpOutput(SSAGraph *graph, OpHandleBase *op_handle, const std::string &each_var_name, const platform::Place &place, size_t place_offset); From e7353596311d65665c1e90948d17f3f871e7a0fa Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Fri, 27 Apr 2018 15:44:38 -0700 Subject: [PATCH 71/72] Fix more CPPlint issues in fluid/operators/math (#10249) * Fix CPPLint errors * Fix CPPLint errors in sequence2batch * Fix compilation * Fix LSTM op and GRU op * Fix LSTMP op * Fix more cpplint errors in operators/math * Address Code review feedback --- paddle/fluid/operators/gru_op.h | 10 +-- paddle/fluid/operators/lstm_op.h | 12 +-- paddle/fluid/operators/lstmp_op.h | 12 +-- paddle/fluid/operators/math/concat_test.cc | 75 ++++++++++--------- paddle/fluid/operators/math/cross_entropy.cu | 4 +- .../operators/math/detail/lstm_gpu_kernel.h | 4 +- paddle/fluid/operators/math/sampler.h | 28 +++---- .../operators/math/selected_rows_functor.cc | 1 + .../operators/math/selected_rows_functor.cu | 1 + paddle/fluid/operators/math/sequence2batch.cc | 6 +- paddle/fluid/operators/math/sequence2batch.cu | 6 +- paddle/fluid/operators/math/sequence2batch.h | 14 ++-- paddle/fluid/operators/math/sequence_scale.cc | 10 +-- paddle/fluid/operators/math/sequence_scale.cu | 10 +-- paddle/fluid/operators/math/sequence_scale.h | 4 +- paddle/fluid/operators/nccl/nccl_gpu_common.h | 4 +- paddle/fluid/operators/warpctc_op.h | 4 +- 17 files changed, 106 insertions(+), 99 deletions(-) diff --git a/paddle/fluid/operators/gru_op.h b/paddle/fluid/operators/gru_op.h index 53f844a660..3b0d93e54b 100644 --- a/paddle/fluid/operators/gru_op.h +++ b/paddle/fluid/operators/gru_op.h @@ -34,7 +34,7 @@ inline void ReorderInitState(const DeviceContext& ctx, framework::Tensor* dst, bool indexed_src) { math::CopyMatrixRowsFunctor row_shuffle; dst->mutable_data(src.dims(), ctx.GetPlace()); - row_shuffle(ctx, src, index_lod, *dst, indexed_src); + row_shuffle(ctx, src, index_lod, dst, indexed_src); } template @@ -61,7 +61,7 @@ class GRUKernel : public framework::OpKernel { bool is_reverse = context.Attr("is_reverse"); math::LoDTensor2BatchFunctor to_batch; auto& dev_ctx = context.template device_context(); - to_batch(dev_ctx, *input, *batch_gate, true, is_reverse); + to_batch(dev_ctx, *input, batch_gate, true, is_reverse); if (bias) { math::RowwiseAdd add_bias; @@ -113,7 +113,7 @@ class GRUKernel : public framework::OpKernel { math::Batch2LoDTensorFunctor to_seq; batch_hidden->set_lod(batch_gate->lod()); - to_seq(dev_ctx, *batch_hidden, *hidden); + to_seq(dev_ctx, *batch_hidden, hidden); } void Compute(const framework::ExecutionContext& context) const override { @@ -174,7 +174,7 @@ class GRUGradKernel : public framework::OpKernel { bool is_reverse = context.Attr("is_reverse"); batch_hidden_grad.set_lod(batch_hidden->lod()); - to_batch(dev_ctx, *hidden_grad, batch_hidden_grad, false, is_reverse); + to_batch(dev_ctx, *hidden_grad, &batch_hidden_grad, false, is_reverse); math::GRUMetaValue gru_value; gru_value.gate_weight = const_cast(weight_data); @@ -236,7 +236,7 @@ class GRUGradKernel : public framework::OpKernel { input_grad->mutable_data(context.GetPlace()); math::Batch2LoDTensorFunctor to_seq; batch_gate_grad.set_lod(batch_gate->lod()); - to_seq(dev_ctx, batch_gate_grad, *input_grad); + to_seq(dev_ctx, batch_gate_grad, input_grad); } if (bias_grad) { bias_grad->mutable_data(context.GetPlace()); diff --git a/paddle/fluid/operators/lstm_op.h b/paddle/fluid/operators/lstm_op.h index a1ef0eb278..0707aded8c 100644 --- a/paddle/fluid/operators/lstm_op.h +++ b/paddle/fluid/operators/lstm_op.h @@ -33,7 +33,7 @@ inline void ReorderInitState(const DeviceContext& ctx, framework::Tensor* dst, bool indexed_src) { math::CopyMatrixRowsFunctor row_shuffle; dst->mutable_data(src.dims(), ctx.GetPlace()); - row_shuffle(ctx, src, index_lod, *dst, indexed_src); + row_shuffle(ctx, src, index_lod, dst, indexed_src); } template @@ -57,7 +57,7 @@ class LSTMKernel : public framework::OpKernel { bool is_reverse = ctx.Attr("is_reverse"); math::LoDTensor2BatchFunctor to_batch; auto& device_ctx = ctx.template device_context(); - to_batch(device_ctx, *input, *batch_gate, true, is_reverse); + to_batch(device_ctx, *input, batch_gate, true, is_reverse); auto in_dims = input->dims(); int frame_size = static_cast(in_dims[1] / 4); @@ -161,11 +161,11 @@ class LSTMKernel : public framework::OpKernel { math::Batch2LoDTensorFunctor to_seq; batch_hidden.set_lod(batch_gate->lod()); // restore the output hidden in LoDTensor from the batch hidden - to_seq(device_ctx, batch_hidden, *hidden_out); + to_seq(device_ctx, batch_hidden, hidden_out); batch_cell.set_lod(batch_gate->lod()); // restore the output cell state in LoDTensor from the batch cell - to_seq(device_ctx, batch_cell, *cell_out); + to_seq(device_ctx, batch_cell, cell_out); } }; @@ -257,7 +257,7 @@ class LSTMGradKernel : public framework::OpKernel { const framework::DDim& dims, framework::LoDTensor& dst) { dst.mutable_data(dims, ctx.GetPlace()); dst.set_lod(batch_gate->lod()); - to_batch(ctx, src, dst, false); + to_batch(ctx, src, &dst, false); }; LoDTensor batch_hidden, batch_hidden_g, batch_cell; @@ -351,7 +351,7 @@ class LSTMGradKernel : public framework::OpKernel { if (in_g) { /* backward data */ in_g->mutable_data(ctx.GetPlace()); - to_seq(device_ctx, batch_gate_g, *in_g); + to_seq(device_ctx, batch_gate_g, in_g); } if (bias && bias_g) { /* backward bias */ diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h index 172db54896..628936a310 100644 --- a/paddle/fluid/operators/lstmp_op.h +++ b/paddle/fluid/operators/lstmp_op.h @@ -40,7 +40,7 @@ inline void ReorderInitState(const DeviceContext& ctx, framework::Tensor* dst, bool indexed_src) { math::CopyMatrixRowsFunctor row_shuffle; dst->mutable_data(src.dims(), ctx.GetPlace()); - row_shuffle(ctx, src, index, *dst, indexed_src); + row_shuffle(ctx, src, index, dst, indexed_src); } template @@ -81,7 +81,7 @@ class LSTMPKernel : public framework::OpKernel { bool is_reverse = ctx.Attr("is_reverse"); math::LoDTensor2BatchFunctor to_batch; auto& device_ctx = ctx.template device_context(); - to_batch(device_ctx, *input, *batch_gate, true, is_reverse); + to_batch(device_ctx, *input, batch_gate, true, is_reverse); auto in_dims = input->dims(); int frame_size = static_cast(in_dims[1] / 4); @@ -208,11 +208,11 @@ class LSTMPKernel : public framework::OpKernel { math::Batch2LoDTensorFunctor to_seq; batch_proj.set_lod(batch_gate->lod()); // restore the output hidden in LoDTensor from the batch hidden - to_seq(device_ctx, batch_proj, *proj_out); + to_seq(device_ctx, batch_proj, proj_out); batch_cell.set_lod(batch_gate->lod()); // restore the output cell state in LoDTensor from the batch cell - to_seq(device_ctx, batch_cell, *cell_out); + to_seq(device_ctx, batch_cell, cell_out); } }; @@ -332,7 +332,7 @@ class LSTMPGradKernel : public framework::OpKernel { const framework::DDim& dims, framework::LoDTensor& dst) { dst.mutable_data(dims, ctx.GetPlace()); dst.set_lod(batch_gate->lod()); - to_batch(ctx, src, dst, false); + to_batch(ctx, src, &dst, false); }; LoDTensor batch_hidden_g, batch_proj, batch_proj_g, batch_cell; @@ -471,7 +471,7 @@ class LSTMPGradKernel : public framework::OpKernel { if (in_g) { /* backward data */ in_g->mutable_data(ctx.GetPlace()); - to_seq(device_ctx, batch_gate_g, *in_g); + to_seq(device_ctx, batch_gate_g, in_g); } if (bias && bias_g) { /* backward bias */ diff --git a/paddle/fluid/operators/math/concat_test.cc b/paddle/fluid/operators/math/concat_test.cc index 19d056fa54..f0847aafae 100644 --- a/paddle/fluid/operators/math/concat_test.cc +++ b/paddle/fluid/operators/math/concat_test.cc @@ -17,17 +17,14 @@ limitations under the License. */ #include #include "paddle/fluid/framework/tensor_util.h" -using namespace paddle::framework; -using namespace paddle::platform; - template void testConcat() { - Tensor input_a_cpu; - Tensor input_b_cpu; - Tensor out_cpu; - Tensor input_a; - Tensor input_b; - Tensor out; + paddle::framework::Tensor input_a_cpu; + paddle::framework::Tensor input_b_cpu; + paddle::framework::Tensor out_cpu; + paddle::framework::Tensor input_a; + paddle::framework::Tensor input_b; + paddle::framework::Tensor out; DeviceContext* context = new DeviceContext(Place()); // DeviceContext context(Place()); @@ -40,18 +37,18 @@ void testConcat() { * output: * out.shape: [5, 3, 4] */ - auto dim_a = make_ddim({2, 3, 4}); - auto dim_b = make_ddim({3, 3, 4}); - auto dim_out = make_ddim({5, 3, 4}); + auto dim_a = paddle::framework::make_ddim({2, 3, 4}); + auto dim_b = paddle::framework::make_ddim({3, 3, 4}); + auto dim_out = paddle::framework::make_ddim({5, 3, 4}); input_a.mutable_data(dim_a, Place()); input_b.mutable_data(dim_b, Place()); out.mutable_data(dim_out, Place()); if (paddle::platform::is_gpu_place(Place())) { - input_a_cpu.mutable_data(dim_a, CPUPlace()); - input_b_cpu.mutable_data(dim_b, CPUPlace()); - out_cpu.mutable_data(dim_out, CPUPlace()); + input_a_cpu.mutable_data(dim_a, paddle::platform::CPUPlace()); + input_b_cpu.mutable_data(dim_b, paddle::platform::CPUPlace()); + out_cpu.mutable_data(dim_out, paddle::platform::CPUPlace()); } int* a_ptr; @@ -72,11 +69,11 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopySync(input_a_cpu, Place(), &input_a); - TensorCopySync(input_b_cpu, Place(), &input_b); + paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a); + paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b); } - std::vector input; + std::vector input; input.push_back(input_a); input.push_back(input_b); @@ -89,7 +86,8 @@ void testConcat() { int* out_ptr; if (paddle::platform::is_gpu_place(Place())) { - TensorCopySync(out, CPUPlace(), &out_cpu); + paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context, + &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -115,9 +113,9 @@ void testConcat() { * output: * out.shape: [2, 7, 4] */ - dim_a = make_ddim({2, 3, 4}); - dim_b = make_ddim({2, 4, 4}); - dim_out = make_ddim({2, 7, 4}); + dim_a = paddle::framework::make_ddim({2, 3, 4}); + dim_b = paddle::framework::make_ddim({2, 4, 4}); + dim_out = paddle::framework::make_ddim({2, 7, 4}); input_a.Resize(dim_a); input_b.Resize(dim_b); @@ -144,8 +142,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopySync(input_a_cpu, Place(), &input_a); - TensorCopySync(input_b_cpu, Place(), &input_b); + paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a); + paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b); } input.clear(); @@ -159,7 +157,8 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopySync(out, CPUPlace(), &out_cpu); + paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context, + &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -187,9 +186,9 @@ void testConcat() { * output: * out.shape: [2, 3, 9] */ - dim_a = make_ddim({2, 3, 4}); - dim_b = make_ddim({2, 3, 5}); - dim_out = make_ddim({2, 3, 9}); + dim_a = paddle::framework::make_ddim({2, 3, 4}); + dim_b = paddle::framework::make_ddim({2, 3, 5}); + dim_out = paddle::framework::make_ddim({2, 3, 9}); input_a.Resize(dim_a); input_b.Resize(dim_b); @@ -216,8 +215,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopySync(input_a_cpu, Place(), &input_a); - TensorCopySync(input_b_cpu, Place(), &input_b); + paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a); + paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b); } input.clear(); @@ -231,7 +230,8 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopySync(out, CPUPlace(), &out_cpu); + paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context, + &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); @@ -261,9 +261,9 @@ void testConcat() { * output: * out.shape: [2, 6, 4] */ - dim_a = make_ddim({2, 3, 4}); - dim_b = make_ddim({2, 3, 4}); - dim_out = make_ddim({2, 6, 4}); + dim_a = paddle::framework::make_ddim({2, 3, 4}); + dim_b = paddle::framework::make_ddim({2, 3, 4}); + dim_out = paddle::framework::make_ddim({2, 6, 4}); input_a.Resize(dim_a); input_b.Resize(dim_b); @@ -290,8 +290,8 @@ void testConcat() { } if (paddle::platform::is_gpu_place(Place())) { - TensorCopySync(input_a_cpu, Place(), &input_a); - TensorCopySync(input_b_cpu, Place(), &input_b); + paddle::framework::TensorCopy(input_a_cpu, Place(), *context, &input_a); + paddle::framework::TensorCopy(input_b_cpu, Place(), *context, &input_b); } input.clear(); @@ -305,7 +305,8 @@ void testConcat() { PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); if (paddle::platform::is_gpu_place(Place())) { - TensorCopySync(out, CPUPlace(), &out_cpu); + paddle::framework::TensorCopy(out, paddle::platform::CPUPlace(), *context, + &out_cpu); out_ptr = out_cpu.data(); } else { out_ptr = out.data(); diff --git a/paddle/fluid/operators/math/cross_entropy.cu b/paddle/fluid/operators/math/cross_entropy.cu index f4935c2813..da73f575f3 100644 --- a/paddle/fluid/operators/math/cross_entropy.cu +++ b/paddle/fluid/operators/math/cross_entropy.cu @@ -108,7 +108,9 @@ class CrossEntropyFunctor { if (softLabel) { const T* label_data = labels->data(); - int block = class_num > 512 ? 512 : pow(2, int(std::log2(class_num))); + int block = class_num > 512 + ? 512 + : pow(2, static_cast(std::log2(class_num))); SoftCrossEntropyKernel<<< batch_size, block, block * sizeof(T), diff --git a/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h b/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h index ee7b16da41..0b1034a080 100644 --- a/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h @@ -13,13 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include + #include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/lstm_compute.h" #include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/device_context.h" -#include - namespace paddle { namespace operators { namespace math { diff --git a/paddle/fluid/operators/math/sampler.h b/paddle/fluid/operators/math/sampler.h index 9d6a6c28c4..b82691f269 100644 --- a/paddle/fluid/operators/math/sampler.h +++ b/paddle/fluid/operators/math/sampler.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include #include -typedef long int64; namespace paddle { namespace operators { namespace math { @@ -27,25 +27,25 @@ namespace math { */ class Sampler { public: - explicit Sampler(int64 range) : range_(range) { + explicit Sampler(int64_t range) : range_(range) { PADDLE_ENFORCE_GT(range, 0); std::random_device r; seed_ = r(); } - explicit Sampler(int64 range, unsigned int seed) + explicit Sampler(int64_t range, unsigned int seed) : range_(range), seed_(seed) { PADDLE_ENFORCE_GT(range, 0); } virtual ~Sampler(); // Sample a single value - virtual int64 Sample() const = 0; + virtual int64_t Sample() const = 0; // The probability that a single call to Sample() returns the given value. - virtual float Probability(int64 value) const = 0; + virtual float Probability(int64_t value) const = 0; - int64 range() { return range_; }; + int64 range() { return range_; } protected: - const int64 range_; + const int64_t range_; unsigned int seed_; }; @@ -56,15 +56,15 @@ class Sampler { */ class UniformSampler : public Sampler { public: - explicit UniformSampler(int64 range); + explicit UniformSampler(int64_t range); - explicit UniformSampler(int64 range, unsigned int seed); + explicit UniformSampler(int64_t range, unsigned int seed); ~UniformSampler() override {} int64 Sample() const override; - float Probability(int64 value) const override; + float Probability(int64_t value) const override; private: const float inv_range_; @@ -79,15 +79,15 @@ class UniformSampler : public Sampler { */ class LogUniformSampler : public Sampler { public: - explicit LogUniformSampler(int64 range); + explicit LogUniformSampler(int64_t range); - explicit LogUniformSampler(int64 range, unsigned int seed); + explicit LogUniformSampler(int64_t range, unsigned int seed); ~LogUniformSampler() override {} int64 Sample() const override; - float Probability(int64 value) const override; + float Probability(int64_t value) const override; private: const float log_range_; @@ -95,6 +95,6 @@ class LogUniformSampler : public Sampler { std::shared_ptr> dist_; }; -} // math +} // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/math/selected_rows_functor.cc b/paddle/fluid/operators/math/selected_rows_functor.cc index 5da3d15277..a830dc5250 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.cc +++ b/paddle/fluid/operators/math/selected_rows_functor.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include +#include #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" diff --git a/paddle/fluid/operators/math/selected_rows_functor.cu b/paddle/fluid/operators/math/selected_rows_functor.cu index 5d78fd9d21..7b31ee8e38 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.cu +++ b/paddle/fluid/operators/math/selected_rows_functor.cu @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include +#include #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" diff --git a/paddle/fluid/operators/math/sequence2batch.cc b/paddle/fluid/operators/math/sequence2batch.cc index 8899abff36..b546b87282 100644 --- a/paddle/fluid/operators/math/sequence2batch.cc +++ b/paddle/fluid/operators/math/sequence2batch.cc @@ -23,11 +23,11 @@ class CopyMatrixRowsFunctor { public: void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& src, - framework::Vector index_lod, framework::Tensor& dst, + framework::Vector index_lod, framework::Tensor* dst, bool is_src_index) { size_t* index = index_lod.data(); auto src_dims = src.dims(); - auto dst_dims = dst.dims(); + auto dst_dims = dst->dims(); PADDLE_ENFORCE_EQ(src_dims.size(), 2UL, "The src must be matrix with rank 2."); PADDLE_ENFORCE_EQ(dst_dims.size(), 2UL, @@ -37,7 +37,7 @@ class CopyMatrixRowsFunctor { auto height = dst_dims[0]; auto width = dst_dims[1]; auto* src_data = src.data(); - auto* dst_data = dst.data(); + auto* dst_data = dst->data(); for (int i = 0; i < height; ++i) { if (is_src_index) { memcpy(dst_data + i * width, src_data + index[i] * width, diff --git a/paddle/fluid/operators/math/sequence2batch.cu b/paddle/fluid/operators/math/sequence2batch.cu index 3185f10d41..be73adfc0c 100644 --- a/paddle/fluid/operators/math/sequence2batch.cu +++ b/paddle/fluid/operators/math/sequence2batch.cu @@ -43,10 +43,10 @@ class CopyMatrixRowsFunctor { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& src, - framework::Vector index_lod, framework::Tensor& dst, + framework::Vector index_lod, framework::Tensor* dst, bool is_src_index) { auto src_dims = src.dims(); - auto dst_dims = dst.dims(); + auto dst_dims = dst->dims(); PADDLE_ENFORCE_EQ(src_dims.size(), 2, "The src must be matrix with rank 2."); PADDLE_ENFORCE_EQ(dst_dims.size(), 2, @@ -56,7 +56,7 @@ class CopyMatrixRowsFunctor { auto height = dst_dims[0]; auto width = dst_dims[1]; auto* src_data = src.data(); - auto* dst_data = dst.data(); + auto* dst_data = dst->data(); dim3 threads(128, 8); dim3 grid(8, 1); diff --git a/paddle/fluid/operators/math/sequence2batch.h b/paddle/fluid/operators/math/sequence2batch.h index e78aafd37d..0abda999a5 100644 --- a/paddle/fluid/operators/math/sequence2batch.h +++ b/paddle/fluid/operators/math/sequence2batch.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor.h" @@ -35,7 +37,7 @@ class CopyMatrixRowsFunctor { // copy the input src to the indexed rows of output dst. // The indexed rows are based on the input index. void operator()(const DeviceContext& context, const framework::Tensor& src, - framework::Vector index_lod, framework::Tensor& dst, + framework::Vector index_lod, framework::Tensor* dst, bool is_src_index); }; @@ -58,10 +60,10 @@ class LoDTensor2BatchFunctor { public: void operator()(const DeviceContext& context, const framework::LoDTensor& lod_tensor, - framework::LoDTensor& batch, bool is_cal_batch_lod, + framework::LoDTensor* batch, bool is_cal_batch_lod, bool is_reverse = false) const { if (!is_cal_batch_lod) { - auto lods = batch.lod(); + auto lods = batch->lod(); PADDLE_ENFORCE_GT(lods.size(), 2UL); PADDLE_ENFORCE_EQ(lods[1].size(), static_cast(lod_tensor.dims()[0])); @@ -141,7 +143,7 @@ class LoDTensor2BatchFunctor { for (size_t i = 0; i < seq_info.size(); ++i) { seq_order[i] = seq_info[i].seq_idx; } - batch.set_lod(batch_lods); + batch->set_lod(batch_lods); CopyMatrixRowsFunctor to_batch; to_batch(context, lod_tensor, batch_lods[1], batch, true); @@ -153,11 +155,11 @@ class Batch2LoDTensorFunctor { public: void operator()(const DeviceContext& context, const framework::LoDTensor& batch, - framework::LoDTensor& lod_tensor) const { + framework::LoDTensor* lod_tensor) const { auto in_lod = batch.lod(); PADDLE_ENFORCE_GT(in_lod.size(), 2UL); PADDLE_ENFORCE_EQ(in_lod[1].size(), - static_cast(lod_tensor.dims()[0])); + static_cast(lod_tensor->dims()[0])); CopyMatrixRowsFunctor to_seq; to_seq(context, batch, in_lod[1], lod_tensor, false); } diff --git a/paddle/fluid/operators/math/sequence_scale.cc b/paddle/fluid/operators/math/sequence_scale.cc index 2c46d4183b..ee5b22ca85 100644 --- a/paddle/fluid/operators/math/sequence_scale.cc +++ b/paddle/fluid/operators/math/sequence_scale.cc @@ -21,15 +21,15 @@ namespace math { template class ScaleLoDTensorFunctor { public: - void operator()(const platform::CPUDeviceContext& context, - framework::LoDTensor& seq, const T* scales) { + void operator()(const platform::CPUDeviceContext& context, const T* scales, + framework::LoDTensor* seq) { const size_t level = 0; - auto lod = seq.lod(); + auto lod = seq->lod(); const size_t num_seq = lod[level].size() - 1; - size_t seq_width = seq.dims()[1]; + size_t seq_width = seq->dims()[1]; framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); - T* seq_data = seq.mutable_data(context.GetPlace()); + T* seq_data = seq->mutable_data(context.GetPlace()); for (size_t i = 0; i < num_seq; ++i) { for (size_t j = lod[level][i] * seq_width; j < lod[level][i + 1] * seq_width; ++j) { diff --git a/paddle/fluid/operators/math/sequence_scale.cu b/paddle/fluid/operators/math/sequence_scale.cu index 74085153c6..430bf13c3f 100644 --- a/paddle/fluid/operators/math/sequence_scale.cu +++ b/paddle/fluid/operators/math/sequence_scale.cu @@ -35,14 +35,14 @@ __global__ void SequenceScaleKernel(T* seq, size_t* lod, const T* scales, template class ScaleLoDTensorFunctor { public: - void operator()(const platform::CUDADeviceContext& context, - framework::LoDTensor& seq, const T* scales) { + void operator()(const platform::CUDADeviceContext& context, const T* scales, + framework::LoDTensor* seq) { const size_t level = 0; - auto lod = seq.lod(); + auto lod = seq->lod(); const size_t num_seq = lod[level].size() - 1; - const size_t seq_width = seq.numel() / seq.dims()[0]; + const size_t seq_width = seq->numel() / seq->dims()[0]; framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); - T* seq_data = seq.mutable_data(context.GetPlace()); + T* seq_data = seq->mutable_data(context.GetPlace()); SequenceScaleKernel<<< num_seq, PADDLE_CUDA_NUM_THREADS, 0, context.stream()>>>( diff --git a/paddle/fluid/operators/math/sequence_scale.h b/paddle/fluid/operators/math/sequence_scale.h index 6cdcbe21cb..202243985c 100644 --- a/paddle/fluid/operators/math/sequence_scale.h +++ b/paddle/fluid/operators/math/sequence_scale.h @@ -46,8 +46,8 @@ namespace math { template class ScaleLoDTensorFunctor { public: - void operator()(const DeviceContext& context, framework::LoDTensor& seq, - const T* scales); + void operator()(const DeviceContext& context, const T* scales, + framework::LoDTensor* seq); }; } // namespace math diff --git a/paddle/fluid/operators/nccl/nccl_gpu_common.h b/paddle/fluid/operators/nccl/nccl_gpu_common.h index 113f93e346..558ff4cc09 100644 --- a/paddle/fluid/operators/nccl/nccl_gpu_common.h +++ b/paddle/fluid/operators/nccl/nccl_gpu_common.h @@ -15,9 +15,9 @@ limitations under the License. */ #pragma once #include -#include +#include // NOLINT #include -#include +#include // NOLINT #include #include #include diff --git a/paddle/fluid/operators/warpctc_op.h b/paddle/fluid/operators/warpctc_op.h index afbfe69973..85131d0025 100644 --- a/paddle/fluid/operators/warpctc_op.h +++ b/paddle/fluid/operators/warpctc_op.h @@ -222,8 +222,8 @@ class WarpCTCGradKernel : public framework::OpKernel { const T* loss_grad_data = loss_grad->data(); math::ScaleLoDTensorFunctor()( - ctx.template device_context(), *logits_grad, - loss_grad_data); + ctx.template device_context(), loss_grad_data, + logits_grad); } }; From 5eb4c3aa6fd503c75e20a74b5b5e35b0f0105714 Mon Sep 17 00:00:00 2001 From: Xi Chen Date: Fri, 27 Apr 2018 16:56:55 -0700 Subject: [PATCH 72/72] force clean up process when no_clean_up is set true --- tools/aws_benchmarking/server/cluster_master.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/aws_benchmarking/server/cluster_master.py b/tools/aws_benchmarking/server/cluster_master.py index 7952e61159..1333a942bf 100644 --- a/tools/aws_benchmarking/server/cluster_master.py +++ b/tools/aws_benchmarking/server/cluster_master.py @@ -640,6 +640,7 @@ def start_server(args): elif request_path == "/cleanup": self._set_headers() logging.info("Received request to cleanup cluster") + args.no_clean_up = False cleanup(args.task_name) self.wfile.write("cleanup in progress")