From 18bff5298dc3ff90a53378bd1c45740a8ab20d79 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 1 Feb 2019 10:58:47 +0000 Subject: [PATCH 01/33] extract fused_emb_seq_pool forward function test=develop --- .../fused/fused_embedding_seq_pool_op.h | 58 ++++++++++++------- 1 file changed, 37 insertions(+), 21 deletions(-) diff --git a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h index 758432fd9e..744e83541d 100644 --- a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h +++ b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h @@ -31,38 +31,54 @@ using LoDTensor = framework::LoDTensor; using SelectedRows = framework::SelectedRows; using DDim = framework::DDim; +template +void emb_seqpool(const framework::ExecutionContext &context, const T *table, + const int64_t *idx, T *out, int64_t table_height, + int64_t table_width, int64_t idx_height, int64_t idx_width, + int64_t out_width) { // pool type == sum + PADDLE_ENFORCE_EQ(table_width * idx_width, out_width); + + auto check_idx_value_valid = [&](int i) { + PADDLE_ENFORCE_LT(idx[i], table_height, "idx value: %d, i: %d", idx[i], i); + PADDLE_ENFORCE_GE(idx[i], 0, "idx value: %d, i: %d", idx[i], i); + }; + auto blas = math::GetBlas(context); + + for (int w = 0; w != idx_width; ++w) { + check_idx_value_valid(w); + blas.VCOPY(table_width, table + idx[w] * table_width, + out + w * table_width); + } + + for (int h = 1; h < idx_height; ++h) { + for (int w = 0; w < idx_width; ++w) { + int i = h * idx_width + w; + check_idx_value_valid(i); + blas.AXPY(table_width, static_cast(1), table + idx[i] * table_width, + out + w * table_width); + } + } +} + template struct EmbeddingVSumFunctor { void operator()(const framework::ExecutionContext &context, const LoDTensor *table_t, const LoDTensor *ids_t, LoDTensor *output_t) { auto *table = table_t->data(); - int64_t row_number = table_t->dims()[0]; - int64_t row_width = table_t->dims()[1]; - int64_t last_dim = output_t->dims()[1]; + int64_t table_height = table_t->dims()[0]; + int64_t table_width = table_t->dims()[1]; + int64_t out_width = output_t->dims()[1]; const int64_t *ids = ids_t->data(); auto ids_lod = ids_t->lod()[0]; - int64_t ids_count = ids_t->numel() / ids_lod.back(); - + int64_t idx_width = ids_t->numel() / ids_lod.back(); auto *output = output_t->mutable_data(context.GetPlace()); - auto blas = math::GetBlas(context); + PADDLE_ENFORCE_LE(table_width * idx_width, out_width); for (int64_t i = 0; i != ids_lod.size() - 1; ++i) { - size_t begin = ids_lod[i] * ids_count; - for (int64_t j = 0; j != ids_count; ++j) { - PADDLE_ENFORCE_LT(ids[begin], row_number); - PADDLE_ENFORCE_GE(ids[begin], 0, "ids %d", i); - blas.VCOPY(row_width, table + ids[begin + j] * row_width, - output + i * last_dim + j * row_width); - } - - for (int64_t r = (ids_lod[i] + 1) * ids_count; - r < ids_lod[i + 1] * ids_count; ++r) { - PADDLE_ENFORCE_LT(ids[r], row_number); - PADDLE_ENFORCE_GE(ids[r], 0, "ids %d", i); - blas.AXPY(row_width, 1., table + ids[r] * row_width, - output + i * last_dim + (r % ids_count) * row_width); - } + emb_seqpool(context, table, ids + ids_lod[i] * idx_width, + output + i * out_width, table_height, table_width, + ids_lod[i + 1] - ids_lod[i], idx_width, out_width); } } }; From 11afbe0f538f873b77647e280ee8de5ae35ca790 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 13 Feb 2019 15:27:06 +0800 Subject: [PATCH 02/33] add details. test=develop --- .../framework/details/memory_optimize_pass.cc | 85 ++++++++++--------- 1 file changed, 44 insertions(+), 41 deletions(-) diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index 41e4a834df..1574d78440 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -69,55 +69,58 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( } for (auto& var : op->outputs) { - if (!NodeCanReused(var) || cfg_->Use(op).count(var->Name()) == 0 || - skip_set_.count(var->Name())) + if (skip_set_.count(var->Name())) { + VLOG(3) << "Skip set contains variable of " << var->Name() + << "disable reuse on it. skipped"; continue; - ir::Node* cache = pool_.FindBestFitNode(var); - - if (var->Name() == FLAGS_memory_optimize_debug) { - VLOG(3) << "start match var " << DebugString(var) << " of op " - << op->Name(); - VLOG(3) << pool_.ToString(); - VLOG(3) << "matched in pool : " - << ((cache == nullptr) ? "False" : "True"); } + if (NodeCanReused(var) && cfg_->Use(op).count(var->Name()) == 0) { + ir::Node* cache = pool_.FindBestFitNode(var); + if (var->Name() == FLAGS_memory_optimize_debug) { + VLOG(3) << "start match var " << DebugString(var) << " of op " + << op->Name(); + VLOG(3) << pool_.ToString(); + VLOG(3) << "matched in pool : " + << ((cache == nullptr) ? "False" : "True"); + } - if (cache == nullptr) continue; - if (var->Name() == cache->Name()) { - VLOG(3) << "The same cache variable is cascade reused." << var->Name() - << " is re-filled to the pool after" - << "the reused op is finished. Current op can not " - << "replace it again. Skip this candidate."; - continue; - - int node_idx_in_pool = pool_.GetNodeIndexInPool(cache); - VLOG(3) << string::Sprintf( - "!!! %s, %s => %s, cache idx %d, pool size %d", - std::to_string(reuse_id++), DebugString(var), DebugString(cache), - node_idx_in_pool, static_cast(pool_.size())); - - // update CFG Graph on the fly. - // reused var maybe re-fill into the pool - cfg_->RenameVarInCFGGraph(var->Name(), cache->Name(), idx); - // NOTE(dzhwinter): we need to both update the ProgramDesc - // and IR Graph. because op_desc/var_desc is used in CreateOp, - // CreateVar when running happens. But IR Graph - // define the dependence relationship between nodes. - RenameVarInGraphDesc(var->Name(), cache->Name(), idx); - RenameVarInGraphNode(var->Name(), cache->Name(), idx, graph.get()); + if (cache != nullptr) { + if (var->Name() == cache->Name()) { + VLOG(3) << "The same cache variable is cascade reused." + << var->Name() << " is re-filled to the pool after" + << "the reused op is finished. Current op can not " + << "replace it again. Skip this candidate."; + continue; + } - pool_.Erase(cache); - } + int node_idx_in_pool = pool_.GetNodeIndexInPool(cache); + VLOG(3) << string::Sprintf( + "!!! %s, %s => %s, cache idx %d, pool size %d", + std::to_string(reuse_id++), DebugString(var), DebugString(cache), + node_idx_in_pool, static_cast(pool_.size())); + // NOTE(dzhwinter): update the ProgramDesc/IR Graph + // and the CFG Graph on the fly. + // + // IR Graph define the dependence relationship between nodes. + // + // ProgramDesc defines the input/output vars. Its used in + // CreateOp, CreateVar when running happens. + // + // CFG Graph store the liveness information, when reuse happens + // we also need to update the variable liveness. + cfg_->RenameVarInCFGGraph(var->Name(), cache->Name(), idx); + RenameVarInGraphDesc(var->Name(), cache->Name(), idx); + RenameVarInGraphNode(var->Name(), cache->Name(), idx, graph.get()); - // fill the pool - std::unordered_set unlived_vars; - for (auto var : cfg_->LiveIn(op)) { - if (cfg_->LiveOut(op).count(var) == 0) { - unlived_vars.emplace(var); + pool_.Erase(cache); } } - for (auto var : unlived_vars) { + } + // fill the pool + for (auto var : cfg_->LiveIn(op)) { + if (cfg_->LiveOut(op).count(var) == 0) { ir::Node* var_node = cfg_->GetNodeByName(var, op); + if (var_node == nullptr) continue; if (NodeCanReused(var_node) && !pool_.Has(var_node)) { pool_.Insert(var_node); } From 6d6ddcfe15f6d6d2be156b469cbb284ce9382646 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 13 Feb 2019 19:39:32 +0800 Subject: [PATCH 03/33] add details. test=develop --- paddle/fluid/framework/details/CMakeLists.txt | 7 ++- .../details/memory_optimize_helper.cc | 52 ++++++++++++++++++- .../details/memory_optimize_helper.h | 1 + .../framework/details/memory_optimize_pass.cc | 15 +++--- 4 files changed, 64 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index e88084424b..5e8ffa4f51 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -50,7 +50,12 @@ cc_library(data_balance_op_handle SRCS data_balance_op_handle.cc DEPS op_handle_ cc_library(gather_op_handle SRCS gather_op_handle.cc DEPS op_handle_base scope ddim memory variable_visitor) cc_library(fuse_vars_op_handle SRCS fuse_vars_op_handle.cc DEPS op_handle_base scope) -cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper) +if(WITH_GPU) +cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper cpu_info) +else() +nv_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper gpu_info) +endif() + cc_library(memory_optimize_pass SRCS memory_optimize_pass.cc DEPS memory_optimize_helper pass) cc_library(inplace_op_pass SRCS inplace_op_pass.cc DEPS memory_optimize_pass op_info) cc_library(modify_op_lock_and_record_event_pass SRCS modify_op_lock_and_record_event_pass.cc DEPS computation_op_handle op_graph_view multi_devices_helper) diff --git a/paddle/fluid/framework/details/memory_optimize_helper.cc b/paddle/fluid/framework/details/memory_optimize_helper.cc index 6345ba3359..ef2b4131bf 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.cc +++ b/paddle/fluid/framework/details/memory_optimize_helper.cc @@ -13,13 +13,19 @@ // limitations under the License. #include "paddle/fluid/framework/details/memory_optimize_helper.h" +#include #include #include -#include +#include #include #include #include #include "paddle/fluid/framework/var_desc.h" +#include "paddle/fluid/platform/cpu_info.h" + +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/platform/gpu_info.h" +#endif // PADDLE_WITH_CUDA namespace paddle { namespace framework { @@ -230,6 +236,27 @@ ir::Node* OrderedSet::FindBestFitNode(ir::Node* var) const { return found_node; } +ir::Node* OrderedSet::FindNextBestFitNode(ir::Node* var, ir::Node* prev) const { + ir::Node* found_node = nullptr; + NodeComparator functor; + auto it = + std::find_if(nodes_.begin(), nodes_.end(), [&](const NodeVector& v) { + if (v.front() == prev) + return true; + else + return false; + }); + PADDLE_ENFORCE(it != nodes_.end(), "Not found previous in node list!"); + for (it = std::next(it); it != nodes_.end(); ++it) { + auto& candidate = it->front(); + if (functor(var, candidate)) { + found_node = candidate; + break; + } + } + return found_node; +} + bool OrderedSet::Has(ir::Node* var) const { if (mark_table_.count(var->Name())) { auto& node_in_samename = mark_table_.at(var->Name()); @@ -274,14 +301,35 @@ bool NodeCanReused(ir::Node* node) { return flag; } +int MinChunkSize() { + int size{0}; +#ifdef PADDLE_WITH_CUDA + size = platform::GpuMinChunkSize(); +#else + size = platform::CpuMinChunkSize(); +#endif // PADDLE_WITH_CUDA + return size; +} + bool NodeCanReused(const VarDesc& node) { auto type = node.GetType(); + // only these types holds bulk of gpu memory if (!(type == proto::VarType::LOD_TENSOR || type == proto::VarType::SELECTED_ROWS || type == proto::VarType::LOD_TENSOR_ARRAY)) { return false; } - if (node.Persistable() || node.GetShape().empty()) { + // persistable variable is parameter + if (node.Persistable()) { + return false; + } + // shape < min_chunk_size is meaningless. + // further more, fetched loss always has size = 1 + // which should not be reused. + auto shape = node.GetShape(); + int size = std::abs( + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies())); + if (shape.empty() || size < MinChunkSize()) { return false; } // vars can be @EMPTY@, @LR_DECAY_REUSE_ID@. For example, while_grad diff --git a/paddle/fluid/framework/details/memory_optimize_helper.h b/paddle/fluid/framework/details/memory_optimize_helper.h index 0bfaf827fe..e17030b2ab 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.h +++ b/paddle/fluid/framework/details/memory_optimize_helper.h @@ -62,6 +62,7 @@ class OrderedSet { } // find the bestfit shape node block with var. ir::Node* FindBestFitNode(ir::Node* var) const; + ir::Node* FindNextBestFitNode(ir::Node* var, ir::Node* prev) const; // map store non-const iterator, can not promise const int GetNodeIndexInPool(ir::Node* var); // pool all node to string diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index 1574d78440..2f9e2e662b 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -76,6 +76,13 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( } if (NodeCanReused(var) && cfg_->Use(op).count(var->Name()) == 0) { ir::Node* cache = pool_.FindBestFitNode(var); + while (cache != nullptr && var->Name() == cache->Name()) { + VLOG(3) << "The same cache variable is cascade reused." << var->Name() + << " is re-filled to the pool after" + << "the reused op is finished. Current op can not " + << "replace it again. Skip this candidate."; + cache = pool_.FindNextBestFitNode(var, cache); + } if (var->Name() == FLAGS_memory_optimize_debug) { VLOG(3) << "start match var " << DebugString(var) << " of op " << op->Name(); @@ -85,14 +92,6 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( } if (cache != nullptr) { - if (var->Name() == cache->Name()) { - VLOG(3) << "The same cache variable is cascade reused." - << var->Name() << " is re-filled to the pool after" - << "the reused op is finished. Current op can not " - << "replace it again. Skip this candidate."; - continue; - } - int node_idx_in_pool = pool_.GetNodeIndexInPool(cache); VLOG(3) << string::Sprintf( "!!! %s, %s => %s, cache idx %d, pool size %d", From 51d1e8cd065001a0ef96a81da748760c0b1b8e14 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 13 Feb 2019 20:04:54 +0800 Subject: [PATCH 04/33] add details. test=develop --- python/paddle/fluid/compiler.py | 5 ++++- python/paddle/fluid/parallel_executor.py | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index ef02429428..7c8c4a7e06 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -177,7 +177,10 @@ class CompiledProgram(object): # FIXME(dzhwinter): enable_inplace should be after memory_optimize # if turn on python memory optimize, turn off the inplace_pass. - self._build_strategy.enable_inplace = False if self._program._is_mem_optimized else True + if build_strategy.memory_optimize is None: + build_strategy.memory_optimize = False if main._is_mem_optimized else True + if build_strategy.enable_inplace is None: + build_strategy.enable_inplace = False if main._is_mem_optimized else True if self._build_strategy.num_trainers > 1 and trainers_endpoints: assert self._build_strategy.num_trainers == len( diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 22212ae9a2..8586670c24 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -148,6 +148,8 @@ class ParallelExecutor(object): else framework.default_main_program() # FIXME(dzhwinter): enable_inplace should be after memory_optimize # if turn on python memory optimize, turn off the inplace_pass. + if build_strategy.memory_optimize is None: + build_strategy.memory_optimize = False if main._is_mem_optimized else True if build_strategy.enable_inplace is None: build_strategy.enable_inplace = False if main._is_mem_optimized else True scope = scope if scope is not None else executor.global_scope() From 283573c6aa8d3e6d6f72c6f68c11b553095d64bc Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Thu, 14 Feb 2019 10:36:55 +0800 Subject: [PATCH 05/33] add details. test=develop --- paddle/fluid/framework/details/CMakeLists.txt | 4 ++-- paddle/fluid/framework/details/inplace_op_pass.cc | 2 +- python/paddle/fluid/compiler.py | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index 5e8ffa4f51..6b1957ae59 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -51,9 +51,9 @@ cc_library(gather_op_handle SRCS gather_op_handle.cc DEPS op_handle_base scope d cc_library(fuse_vars_op_handle SRCS fuse_vars_op_handle.cc DEPS op_handle_base scope) if(WITH_GPU) -cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper cpu_info) +cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper gpu_info) else() -nv_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper gpu_info) +nv_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper cpu_info) endif() cc_library(memory_optimize_pass SRCS memory_optimize_pass.cc DEPS memory_optimize_helper pass) diff --git a/paddle/fluid/framework/details/inplace_op_pass.cc b/paddle/fluid/framework/details/inplace_op_pass.cc index b0c5968499..c91fc81b2d 100644 --- a/paddle/fluid/framework/details/inplace_op_pass.cc +++ b/paddle/fluid/framework/details/inplace_op_pass.cc @@ -49,7 +49,7 @@ DEFINE_bool( "If this option turns on, only these op in whitelist can be inplaced." "If it turns off, all of the running op can be candidate of inplaced op." "Such as scale, elementwise_add" - "By default, it's turned on"); + "By default, it's turned off"); DECLARE_string(memory_optimize_debug); diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 7c8c4a7e06..b24cec044f 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -177,10 +177,10 @@ class CompiledProgram(object): # FIXME(dzhwinter): enable_inplace should be after memory_optimize # if turn on python memory optimize, turn off the inplace_pass. - if build_strategy.memory_optimize is None: - build_strategy.memory_optimize = False if main._is_mem_optimized else True - if build_strategy.enable_inplace is None: - build_strategy.enable_inplace = False if main._is_mem_optimized else True + if self._build_strategy.memory_optimize is None: + self._build_strategy.memory_optimize = False if main._is_mem_optimized else True + if self._build_strategy.enable_inplace is None: + self._build_strategy.enable_inplace = False if main._is_mem_optimized else True if self._build_strategy.num_trainers > 1 and trainers_endpoints: assert self._build_strategy.num_trainers == len( From fe7ffedc1a45a29e02ee259ba7a1781f3a2903d0 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Thu, 14 Feb 2019 12:02:53 +0000 Subject: [PATCH 06/33] test=develop, update protobuf --- cmake/external/protobuf.cmake | 4 ++-- python/requirements.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index e05b7694dd..3da3f10d7c 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -203,7 +203,7 @@ FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST) ENDIF() SET(PROTOBUF_REPO "https://github.com/google/protobuf.git") - SET(PROTOBUF_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546") + SET(PROTOBUF_TAG "v3.6.1") ExternalProject_Add( ${TARGET_NAME} @@ -231,7 +231,7 @@ FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST) ) ENDFUNCTION() -SET(PROTOBUF_VERSION 3.1) +SET(PROTOBUF_VERSION 3.6.1) IF(NOT PROTOBUF_FOUND) build_protobuf(extern_protobuf FALSE) diff --git a/python/requirements.txt b/python/requirements.txt index 5a70f1aa3f..6cbda1db54 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,6 +1,6 @@ requests==2.9.2 numpy>=1.12 -protobuf==3.1 +protobuf>=3.6 recordio>=0.1.0 matplotlib==2.2.3 # TODO: let python3 paddlepaddle package use latest matplotlib rarfile From 15da2f9a0d555edbddacb3e5f4c747f1059602df Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Wed, 13 Feb 2019 14:00:31 +0000 Subject: [PATCH 07/33] add embseqpool jitkernel refer code, test and benchmark test=develop --- paddle/fluid/operators/jit/benchmark.cc | 36 ++++++++++ paddle/fluid/operators/jit/helper.cc | 1 + paddle/fluid/operators/jit/helper.h | 9 +++ paddle/fluid/operators/jit/kernel_base.h | 66 +++++++++++++------ paddle/fluid/operators/jit/kernel_key.cc | 5 ++ .../fluid/operators/jit/refer/CMakeLists.txt | 1 + paddle/fluid/operators/jit/refer/refer.cc | 2 + paddle/fluid/operators/jit/refer/refer.h | 34 ++++++++++ paddle/fluid/operators/jit/test.cc | 65 ++++++++++++++++++ 9 files changed, 200 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/operators/jit/benchmark.cc b/paddle/fluid/operators/jit/benchmark.cc index 97ddf223ae..9831b6ef92 100644 --- a/paddle/fluid/operators/jit/benchmark.cc +++ b/paddle/fluid/operators/jit/benchmark.cc @@ -301,6 +301,37 @@ void BenchSeqPoolKernel() { } } +template +void BenchEmbSeqPoolKernel() { + std::vector pool_types = {jit::SeqPoolType::kSum}; + int64_t tbl_h = 1e4; + for (int tbl_w : {10, 16, 256}) { + Tensor table; + table.Resize({tbl_h, tbl_w}); + RandomVec(tbl_h * tbl_w, table.mutable_data(PlaceType()), -2.f, 2.f); + const T* table_data = table.data(); + for (auto type : pool_types) { + for (int idx_w : {1, 2, 10, 16}) { + for (int idx_h : {1, 2, 10, 16}) { + int64_t out_w = tbl_w * idx_w; + jit::emb_seq_pool_attr_t attr(tbl_h, tbl_w, idx_h, idx_w, out_w, + type); + Tensor idx, out; + idx.Resize({idx_h, idx_w}); + out.Resize({out_w}); + RandomVec(idx_h * idx_w, + idx.mutable_data(PlaceType()), 0, + tbl_h - 1); + const int64_t* idx_data = idx.data(); + T* o_data = out.mutable_data(PlaceType()); + BenchAllImpls, PlaceType>( + attr, table_data, idx_data, o_data, &attr); + } + } + } + } +} + template void BenchMatMulKernel() { for (int m : {1, 2, 3, 4}) { @@ -376,6 +407,11 @@ BENCH_FP32_CPU(kGRUHtPart2) { BenchGRUKernel(); } // seq pool function BENCH_FP32_CPU(kSeqPool) { BenchSeqPoolKernel(); } +// embedding seq pool function +BENCH_FP32_CPU(kEmbSeqPool) { + BenchEmbSeqPoolKernel(); +} + // matmul BENCH_FP32_CPU(kMatMul) { BenchMatMulKernel(); } diff --git a/paddle/fluid/operators/jit/helper.cc b/paddle/fluid/operators/jit/helper.cc index e7292fe2bd..a766536132 100644 --- a/paddle/fluid/operators/jit/helper.cc +++ b/paddle/fluid/operators/jit/helper.cc @@ -54,6 +54,7 @@ const char* to_string(KernelType kt) { ONE_CASE(kHMax); ONE_CASE(kHSum); ONE_CASE(kSoftmax); + ONE_CASE(kEmbSeqPool); default: PADDLE_THROW("Not support type: %d, or forget to add it.", kt); return "NOT JITKernel"; diff --git a/paddle/fluid/operators/jit/helper.h b/paddle/fluid/operators/jit/helper.h index d5773d6594..07998588a5 100644 --- a/paddle/fluid/operators/jit/helper.h +++ b/paddle/fluid/operators/jit/helper.h @@ -172,6 +172,15 @@ inline std::ostream& operator<<(std::ostream& os, const seq_pool_attr_t& attr) { return os; } +inline std::ostream& operator<<(std::ostream& os, + const emb_seq_pool_attr_t& attr) { + os << "table_height[" << attr.table_height << "],table_width[" + << attr.table_width << "],index_height[" << attr.index_height + << "],index_width[" << attr.index_width << "],output_width[" + << attr.out_width << "],pool_type[" << to_string(attr.pool_type) << "]"; + return os; +} + inline std::ostream& operator<<(std::ostream& os, const matmul_attr_t& attr) { os << "M[" << attr.m << "],N[" << attr.n << "],K[" << attr.k << "]"; return os; diff --git a/paddle/fluid/operators/jit/kernel_base.h b/paddle/fluid/operators/jit/kernel_base.h index 4a8f61146a..20b6a32bef 100644 --- a/paddle/fluid/operators/jit/kernel_base.h +++ b/paddle/fluid/operators/jit/kernel_base.h @@ -13,6 +13,7 @@ * limitations under the License. */ #pragma once +#include #include "paddle/fluid/operators/jit/macro.h" #include "paddle/fluid/platform/macros.h" @@ -20,34 +21,35 @@ namespace paddle { namespace operators { namespace jit { -// TODO(TJ): reorder by alphabet typedef enum { kNone = 0, - kVMul = 1, - kVAdd = 2, - kVAddRelu, - kVSub, - kVScal, - kVAddBias, - kVRelu, - kVIdentity, - kVSquare, - kVExp, - kVSigmoid, - kVTanh, - kLSTMCtHt, - kLSTMC1H1, + // sort by alphabet + kCRFDecoding = 1, + kEmbSeqPool = 2, kGRUH1, kGRUHtPart1, kGRUHtPart2, - kCRFDecoding, + kHSum, // horizontal max + kHMax, // horizontal sum + kLSTMCtHt, + kLSTMC1H1, kLayerNorm, + kMatMul, kNCHW16CMulNC, kSeqPool, - kMatMul, - kHSum, // horizontal max - kHMax, // horizontal sum kSoftmax, + kVAdd, + kVAddBias, + kVAddRelu, + kVExp, + kVIdentity, + kVMul, + kVRelu, + kVScal, + kVSigmoid, + kVSquare, + kVSub, + kVTanh, } KernelType; typedef enum { @@ -145,6 +147,32 @@ struct SeqPoolTuples { typedef void (*func_type)(const T*, T*, const seq_pool_attr_t*); }; +typedef struct emb_seq_pool_attr_s { + int64_t table_height, table_width; + int64_t index_height, index_width; + int64_t out_width; + SeqPoolType pool_type; + emb_seq_pool_attr_s() = default; + explicit emb_seq_pool_attr_s(int64_t tbl_height, int64_t tbl_width, + int64_t idx_height, int64_t idx_width, + int64_t output_width, + SeqPoolType seqpool_type = SeqPoolType::kSum) + : table_height(tbl_height), + table_width(tbl_width), + index_height(idx_height), + index_width(idx_width), + out_width(output_width), + pool_type(seqpool_type) {} +} emb_seq_pool_attr_t; + +template +struct EmbSeqPoolTuples { + typedef T data_type; + typedef emb_seq_pool_attr_t attr_type; + typedef void (*func_type)(const T*, const int64_t*, T*, + const emb_seq_pool_attr_t*); +}; + typedef struct matmul_attr_s { int m, n, k; void* packed_weight{nullptr}; diff --git a/paddle/fluid/operators/jit/kernel_key.cc b/paddle/fluid/operators/jit/kernel_key.cc index 1e4a8884e7..e659c6d254 100644 --- a/paddle/fluid/operators/jit/kernel_key.cc +++ b/paddle/fluid/operators/jit/kernel_key.cc @@ -56,6 +56,11 @@ size_t JitCodeKey(const matmul_attr_t& attr) { return (key << shift * 2) + ((static_cast(attr.n)) << shift) + attr.k; } +template <> +size_t JitCodeKey(const emb_seq_pool_attr_t& attr) { + return attr.table_width; +} + } // namespace jit } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/jit/refer/CMakeLists.txt b/paddle/fluid/operators/jit/refer/CMakeLists.txt index 9f2935828c..218d801c08 100644 --- a/paddle/fluid/operators/jit/refer/CMakeLists.txt +++ b/paddle/fluid/operators/jit/refer/CMakeLists.txt @@ -32,3 +32,4 @@ USE_JITKERNEL_REFER(kVSquare) USE_JITKERNEL_REFER(kHSum) USE_JITKERNEL_REFER(kHMax) USE_JITKERNEL_REFER(kSoftmax) +USE_JITKERNEL_REFER(kEmbSeqPool) diff --git a/paddle/fluid/operators/jit/refer/refer.cc b/paddle/fluid/operators/jit/refer/refer.cc index b8adb40ec7..7e7dd6960b 100644 --- a/paddle/fluid/operators/jit/refer/refer.cc +++ b/paddle/fluid/operators/jit/refer/refer.cc @@ -57,4 +57,6 @@ REGISTER_REFER_KERNEL(kHSum, HSum); REGISTER_REFER_KERNEL(kSoftmax, Softmax); +REGISTER_REFER_KERNEL(kEmbSeqPool, EmbSeqPool); + #undef REGISTER_REFER_KERNEL diff --git a/paddle/fluid/operators/jit/refer/refer.h b/paddle/fluid/operators/jit/refer/refer.h index 0c4a985f8e..fd1193aa41 100644 --- a/paddle/fluid/operators/jit/refer/refer.h +++ b/paddle/fluid/operators/jit/refer/refer.h @@ -16,6 +16,7 @@ #include #include +#include #include "paddle/fluid/operators/jit/helper.h" #include "paddle/fluid/operators/jit/kernel_base.h" #include "paddle/fluid/platform/enforce.h" @@ -414,6 +415,37 @@ void Softmax(const T* x, T* y, int n, int bs = 1) { } } +// embedding seq pool +// table is a matrix with (tbl_h, tbl_w) +// idx is a matrix with (idx_h, idx_w) +// output is a vector with length tbl_w * idx_w +template +void EmbSeqPool(const T* table, const int64_t* idx, T* out, + const emb_seq_pool_attr_t* attr) { + PADDLE_ENFORCE_EQ(attr->table_width * attr->index_width, attr->out_width); + + auto check_idx_value_valid = [&](int64_t i) { + PADDLE_ENFORCE_LT(idx[i], attr->table_height, "idx value: %d, i: %d", + idx[i], i); + PADDLE_ENFORCE_GE(idx[i], 0, "idx value: %d, i: %d", idx[i], i); + }; + + for (int64_t w = 0; w != attr->index_width; ++w) { + check_idx_value_valid(w); + std::memcpy(out + w * attr->table_width, table + idx[w] * attr->table_width, + attr->table_width * sizeof(T)); + } + + for (int64_t h = 1; h < attr->index_height; ++h) { + for (int64_t w = 0; w < attr->index_width; ++w) { + int64_t i = h * attr->index_width + w; + check_idx_value_valid(i); + VAdd(table + idx[i] * attr->table_width, out + w * attr->table_width, + out + w * attr->table_width, attr->table_width); + } + } +} + #define DECLARE_REFER_KERNEL(name, tuples) \ template \ class name##Kernel : public ReferKernel> { \ @@ -462,6 +494,8 @@ DECLARE_REFER_KERNEL(HSum, XRNTuples); DECLARE_REFER_KERNEL(Softmax, SoftmaxTuples); +DECLARE_REFER_KERNEL(EmbSeqPool, EmbSeqPoolTuples); + #undef DECLARE_REFER_KERNEL } // namespace refer diff --git a/paddle/fluid/operators/jit/test.cc b/paddle/fluid/operators/jit/test.cc index 237e588d35..c35b6aef23 100644 --- a/paddle/fluid/operators/jit/test.cc +++ b/paddle/fluid/operators/jit/test.cc @@ -270,6 +270,32 @@ struct TestFuncWithRefer, std::vector, std::vector, } }; +template +struct TestFuncWithRefer, std::vector, + std::vector, std::vector, + typename jit::EmbSeqPoolTuples::attr_type> { + void operator()(const typename jit::EmbSeqPoolTuples::func_type tgt, + const std::vector& table, const std::vector& idx, + const std::vector& oref, + const typename jit::EmbSeqPoolTuples::attr_type& attr) { + EXPECT_TRUE(tgt != nullptr); + EXPECT_EQ(table.size(), + static_cast(attr.table_height * attr.table_width)); + EXPECT_EQ(idx.size(), + static_cast(attr.index_height * attr.index_width)); + EXPECT_EQ(oref.size(), + static_cast(attr.table_width * attr.index_width)); + const T* table_data = table.data(); + const int64_t* idx_data = idx.data(); + const T* oref_data = oref.data(); + int o_w = oref.size(); + std::vector out(o_w); + T* o_data = out.data(); + tgt(table_data, idx_data, o_data, &attr); + ExpectEQ(o_data, oref_data, o_w); + } +}; + template struct TestFuncWithRefer, std::vector, std::vector, std::vector, @@ -587,6 +613,40 @@ void TestSoftmaxKernel() { } } +template +void TestEmbSeqPoolKernel() { + VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); + int64_t tbl_h = 1e4; + std::vector pool_types = { + jit::SeqPoolType::kSum}; // only support sum yet + for (int tbl_w : TestSizes()) { + std::vector table(tbl_h * tbl_w); + RandomVec(tbl_h * tbl_w, table.data(), -2.f, 2.f); + const T* table_data = table.data(); + for (auto type : pool_types) { + for (int idx_w : {1, 2, 10, 16}) { + for (int idx_h : {1, 2, 10, 16}) { + auto ref = jit::GetRefer>(); + EXPECT_TRUE(ref != nullptr); + std::vector idx(idx_h * idx_w); + RandomVec(idx_h * idx_w, idx.data(), 0, tbl_h - 1); + int64_t out_w = tbl_w * idx_w; + std::vector oref(out_w); + const int64_t* idx_data = idx.data(); + T* o_data = oref.data(); + jit::emb_seq_pool_attr_t attr(tbl_h, tbl_w, idx_h, idx_w, out_w, + type); + ref(table_data, idx_data, o_data, &attr); + + TestAllImpls, PlaceType, std::vector, + std::vector, std::vector>(attr, table, idx, + oref, attr); + } + } + } + } +} + template void TestNCHW16CMulNCKernel() { VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); @@ -756,6 +816,11 @@ TEST(JITKernel, kSoftmax) { TestSoftmaxKernel(); } +TEST(JITKernel, kEmbSeqPool) { + TestEmbSeqPoolKernel(); + TestEmbSeqPoolKernel(); +} + TEST(JITKernel, kNCHW16CMulNC) { TestNCHW16CMulNCKernel(); TestNCHW16CMulNCKernel(); From a3a3d3d8613c729dccb76aa066948c523c35c7e2 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Thu, 14 Feb 2019 14:38:41 +0000 Subject: [PATCH 08/33] add embseqpool jitkernel mkl impl and use it test=develop --- .../fused/fused_embedding_seq_pool_op.h | 41 ++++--------------- .../operators/jit/more/mkl/CMakeLists.txt | 1 + paddle/fluid/operators/jit/more/mkl/mkl.cc | 11 +++++ paddle/fluid/operators/jit/more/mkl/mkl.h | 29 +++++++++++++ 4 files changed, 50 insertions(+), 32 deletions(-) diff --git a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h index 744e83541d..92345b3c0e 100644 --- a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h +++ b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h @@ -21,6 +21,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/operators/jit/kernels.h" #include "paddle/fluid/operators/math/blas.h" namespace paddle { @@ -31,35 +32,6 @@ using LoDTensor = framework::LoDTensor; using SelectedRows = framework::SelectedRows; using DDim = framework::DDim; -template -void emb_seqpool(const framework::ExecutionContext &context, const T *table, - const int64_t *idx, T *out, int64_t table_height, - int64_t table_width, int64_t idx_height, int64_t idx_width, - int64_t out_width) { // pool type == sum - PADDLE_ENFORCE_EQ(table_width * idx_width, out_width); - - auto check_idx_value_valid = [&](int i) { - PADDLE_ENFORCE_LT(idx[i], table_height, "idx value: %d, i: %d", idx[i], i); - PADDLE_ENFORCE_GE(idx[i], 0, "idx value: %d, i: %d", idx[i], i); - }; - auto blas = math::GetBlas(context); - - for (int w = 0; w != idx_width; ++w) { - check_idx_value_valid(w); - blas.VCOPY(table_width, table + idx[w] * table_width, - out + w * table_width); - } - - for (int h = 1; h < idx_height; ++h) { - for (int w = 0; w < idx_width; ++w) { - int i = h * idx_width + w; - check_idx_value_valid(i); - blas.AXPY(table_width, static_cast(1), table + idx[i] * table_width, - out + w * table_width); - } - } -} - template struct EmbeddingVSumFunctor { void operator()(const framework::ExecutionContext &context, @@ -75,10 +47,15 @@ struct EmbeddingVSumFunctor { auto *output = output_t->mutable_data(context.GetPlace()); PADDLE_ENFORCE_LE(table_width * idx_width, out_width); + + jit::emb_seq_pool_attr_t attr(table_height, table_width, 0, idx_width, + out_width, jit::SeqPoolType::kSum); for (int64_t i = 0; i != ids_lod.size() - 1; ++i) { - emb_seqpool(context, table, ids + ids_lod[i] * idx_width, - output + i * out_width, table_height, table_width, - ids_lod[i + 1] - ids_lod[i], idx_width, out_width); + attr.index_height = ids_lod[i + 1] - ids_lod[i]; + auto emb_seqpool = jit::Get, + platform::CPUPlace>(attr); + emb_seqpool(table, ids + ids_lod[i] * idx_width, output + i * out_width, + &attr); } } }; diff --git a/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt b/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt index f9e5aea32e..d209f31007 100644 --- a/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt +++ b/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt @@ -13,3 +13,4 @@ USE_JITKERNEL_MORE(kVSigmoid, mkl) USE_JITKERNEL_MORE(kVTanh, mkl) USE_JITKERNEL_MORE(kSeqPool, mkl) USE_JITKERNEL_MORE(kSoftmax, mkl) +USE_JITKERNEL_MORE(kEmbSeqPool, mkl) diff --git a/paddle/fluid/operators/jit/more/mkl/mkl.cc b/paddle/fluid/operators/jit/more/mkl/mkl.cc index 4c999131ab..29a451f832 100644 --- a/paddle/fluid/operators/jit/more/mkl/mkl.cc +++ b/paddle/fluid/operators/jit/more/mkl/mkl.cc @@ -174,6 +174,16 @@ bool SeqPoolKernel::UseMe(const seq_pool_attr_t& attr) const { return true; } +template <> +bool EmbSeqPoolKernel::UseMe(const emb_seq_pool_attr_t& attr) const { + return true; +} + +template <> +bool EmbSeqPoolKernel::UseMe(const emb_seq_pool_attr_t& attr) const { + return true; +} + template <> bool MatMulKernel::UseMe(const matmul_attr_t& attr) const { return platform::MayIUse(platform::avx); @@ -227,6 +237,7 @@ REGISTER_MKL_KERNEL(kVSquare, VSquare); REGISTER_MKL_KERNEL(kVSigmoid, VSigmoid); REGISTER_MKL_KERNEL(kVTanh, VTanh); REGISTER_MKL_KERNEL(kSeqPool, SeqPool); +REGISTER_MKL_KERNEL(kEmbSeqPool, EmbSeqPool); REGISTER_MKL_KERNEL(kSoftmax, Softmax); #undef REGISTER_MKL_KERNEL diff --git a/paddle/fluid/operators/jit/more/mkl/mkl.h b/paddle/fluid/operators/jit/more/mkl/mkl.h index 8130b87326..9a72ba8302 100644 --- a/paddle/fluid/operators/jit/more/mkl/mkl.h +++ b/paddle/fluid/operators/jit/more/mkl/mkl.h @@ -18,6 +18,7 @@ #include #include #include "paddle/fluid/operators/jit/kernel_base.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace operators { @@ -91,6 +92,32 @@ void SeqPool(const T* x, T* y, const seq_pool_attr_t* attr) { } } +template +void EmbSeqPool(const T* table, const int64_t* idx, T* out, + const emb_seq_pool_attr_t* attr) { + PADDLE_ENFORCE_EQ(attr->table_width * attr->index_width, attr->out_width); + auto check_idx_value_valid = [&](int64_t i) { + PADDLE_ENFORCE_LT(idx[i], attr->table_height, "idx value: %d, i: %d", + idx[i], i); + PADDLE_ENFORCE_GE(idx[i], 0, "idx value: %d, i: %d", idx[i], i); + }; + + for (int64_t w = 0; w != attr->index_width; ++w) { + check_idx_value_valid(w); + VCopy(table + idx[w] * attr->table_width, out + w * attr->table_width, + attr->table_width); + } + + for (int64_t h = 1; h < attr->index_height; ++h) { + for (int64_t w = 0; w < attr->index_width; ++w) { + int64_t i = h * attr->index_width + w; + check_idx_value_valid(i); + VAXPY(static_cast(1), table + idx[i] * attr->table_width, + out + w * attr->table_width, attr->table_width); + } + } +} + template void ASum(const T* x, T* res, int n); @@ -142,6 +169,8 @@ DECLARE_MKL_KERNEL(VSquare, XYNTuples); DECLARE_MKL_KERNEL(SeqPool, SeqPoolTuples); +DECLARE_MKL_KERNEL(EmbSeqPool, EmbSeqPoolTuples); + DECLARE_MKL_KERNEL(Softmax, SoftmaxTuples); #undef DECLARE_MKL_KERNEL From b6085526f34db0bb447c8b43c6b04ab49ac7bdfa Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Fri, 15 Feb 2019 08:07:04 +0000 Subject: [PATCH 09/33] test=develop, update protobuf in Dockerfile used by CI --- tools/manylinux1/Dockerfile.x64 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/manylinux1/Dockerfile.x64 b/tools/manylinux1/Dockerfile.x64 index 48fd145e5f..c2fd743f62 100644 --- a/tools/manylinux1/Dockerfile.x64 +++ b/tools/manylinux1/Dockerfile.x64 @@ -31,10 +31,10 @@ RUN wget --no-check-certificate -qO- https://storage.googleapis.com/golang/go1.8 ENV GOROOT=/usr/local/go GOPATH=/root/gopath ENV PATH=${GOROOT}/bin:${GOPATH}/bin:${PATH} -# protobuf 3.1.0 -RUN cd /opt && wget -q --no-check-certificate https://github.com/google/protobuf/releases/download/v3.1.0/protobuf-cpp-3.1.0.tar.gz && \ - tar xzf protobuf-cpp-3.1.0.tar.gz && \ - cd protobuf-3.1.0 && ./configure && make -j4 && make install && cd .. && rm -f protobuf-cpp-3.1.0.tar.gz +# protobuf 3.6.1 +RUN cd /opt && wget -q --no-check-certificate https://github.com/google/protobuf/releases/download/v3.6.1/protobuf-cpp-3.6.1.tar.gz && \ + tar xzf protobuf-cpp-3.6.1.tar.gz && \ + cd protobuf-3.6.1 && ./configure && make -j4 && make install && cd .. && rm -f protobuf-cpp-3.6.1.tar.gz RUN wget https://raw.githubusercontent.com/PaddlePaddle/Paddle/develop/python/requirements.txt -O /root/requirements.txt From 48cf979a2138a3267224a1d86c65cd1db62068c3 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Fri, 15 Feb 2019 09:49:58 +0000 Subject: [PATCH 10/33] test=develop, install requirements before start for Linux --- cmake/external/python.cmake | 4 ++-- paddle/scripts/paddle_build.sh | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index 623c53f4f7..351e7fa3ce 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -74,8 +74,8 @@ IF(PYTHONINTERP_FOUND) find_python_module(wheel REQUIRED) find_python_module(google.protobuf REQUIRED) FIND_PACKAGE(NumPy REQUIRED) - IF(${PY_GOOGLE.PROTOBUF_VERSION} AND ${PY_GOOGLE.PROTOBUF_VERSION} VERSION_LESS "3.0.0") - MESSAGE(FATAL_ERROR "Found Python Protobuf ${PY_GOOGLE.PROTOBUF_VERSION} < 3.0.0, " + IF(${PY_GOOGLE.PROTOBUF_VERSION} AND ${PY_GOOGLE.PROTOBUF_VERSION} VERSION_LESS "3.6.1") + MESSAGE(FATAL_ERROR "Found Python Protobuf ${PY_GOOGLE.PROTOBUF_VERSION} < 3.6.1, " "please use pip to upgrade protobuf. pip install -U protobuf") ENDIF() ENDIF(PYTHONINTERP_FOUND) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 1135caf4f8..bb24ada223 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -128,30 +128,35 @@ function cmake_gen() { PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27m/bin/python -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27m/include/python2.7 -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs2/lib/libpython2.7.so" + pip install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp27-cp27mu" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs2/lib:} export PATH=/opt/python/cp27-cp27mu/bin/:${PATH} PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27mu/bin/python -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27mu/include/python2.7 -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs4/lib/libpython2.7.so" + pip install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp35-cp35m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-3.5.1/lib/:${LD_LIBRARY_PATH} export PATH=/opt/_internal/cpython-3.5.1/bin/:${PATH} export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.5.1/bin/python3 -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.5.1/include/python3.5m -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.5.1/lib/libpython3.so" + pip3.5 install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp36-cp36m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-3.6.0/lib/:${LD_LIBRARY_PATH} export PATH=/opt/_internal/cpython-3.6.0/bin/:${PATH} export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.6.0/bin/python3 -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.6.0/include/python3.6m -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.6.0/lib/libpython3.so" + pip3.6 install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp37-cp37m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-3.7.0/lib/:${LD_LIBRARY_PATH} export PATH=/opt/_internal/cpython-3.7.0/bin/:${PATH} export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.7.0/bin/python3.7 -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.7.0/include/python3.7m -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.7.0/lib/libpython3.so" + pip3.7 install -r ${PADDLE_ROOT}/python/requirements.txt fi fi fi From d376cf71b743b65dd4fc21edd3a634f69148a3eb Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 00:13:16 +0800 Subject: [PATCH 11/33] polish code for reading. test=develop --- .../fluid/framework/details/build_strategy.cc | 2 + .../details/memory_optimize_helper.cc | 15 ++++-- .../details/memory_optimize_helper.h | 1 + .../details/memory_optimize_helper_test.cc | 46 +++++++++++++++++++ .../framework/details/memory_optimize_pass.cc | 38 ++++++++------- .../unittests/parallel_executor_test_base.py | 2 +- .../test_ir_memory_optimize_transformer.py | 46 +++++++++++++++++++ 7 files changed, 128 insertions(+), 22 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index f8030c53f7..0c823b9ca2 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -240,7 +240,9 @@ std::unique_ptr BuildStrategy::Apply( continue; } } + VLOG(3) << "Start Apply Pass " << pass->Type(); graph = pass->Apply(std::move(graph)); + VLOG(3) << "Finish Apply Pass " << pass->Type(); } return graph; } diff --git a/paddle/fluid/framework/details/memory_optimize_helper.cc b/paddle/fluid/framework/details/memory_optimize_helper.cc index ef2b4131bf..33c2186067 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.cc +++ b/paddle/fluid/framework/details/memory_optimize_helper.cc @@ -268,10 +268,15 @@ bool OrderedSet::Has(ir::Node* var) const { return false; } +void OrderedSet::Erase(const std::string& var) { + PADDLE_ENFORCE(mark_table_.count(var)); + nodes_.erase(mark_table_[var]); + mark_table_.erase(var); +} + void OrderedSet::Erase(ir::Node* var) { - PADDLE_ENFORCE(mark_table_.count(var->Name())); - nodes_.erase(mark_table_[var->Name()]); - mark_table_.erase(var->Name()); + PADDLE_ENFORCE(var != nullptr); + Erase(var->Name()); } std::string OrderedSet::ToString() const { @@ -509,7 +514,9 @@ ir::Node* ControlFlowGraph::GetNodeByName(const std::string& name, for (auto* node : ops_) { if (node == op) break; for (auto& output : node->outputs) { - if (output->Name() == name) { + PADDLE_ENFORCE((output != nullptr && output->IsVar()), + "Output is empty!"); + if (output->Var() && output->Name() == name) { found_node = output; } } diff --git a/paddle/fluid/framework/details/memory_optimize_helper.h b/paddle/fluid/framework/details/memory_optimize_helper.h index e17030b2ab..dba96309fd 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.h +++ b/paddle/fluid/framework/details/memory_optimize_helper.h @@ -55,6 +55,7 @@ class OrderedSet { void Insert(ir::Node* var); void Erase(ir::Node* var); + void Erase(const std::string& var); bool Has(ir::Node* var) const; void Clear() { mark_table_.clear(); diff --git a/paddle/fluid/framework/details/memory_optimize_helper_test.cc b/paddle/fluid/framework/details/memory_optimize_helper_test.cc index 5c13dda9e5..3cfe297a73 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper_test.cc +++ b/paddle/fluid/framework/details/memory_optimize_helper_test.cc @@ -107,6 +107,52 @@ TEST(OrderedSet, Normal) { ASSERT_EQ(pool.GetNodeIndexInPool(cache), 5); // match 4:[5,2] } } + +TEST(OrderedSet, FindBestFitNode) { + OrderedSet pool; + std::vector> nodes; + ProgramDesc prog; + BlockDesc* block_desc = prog.MutableBlock(0); + auto* op_desc = block_desc->AppendOp(); + op_desc->SetType("dummy"); + std::unique_ptr op = ir::CreateNodeForTest(op_desc); + + { + auto desc = block_desc->Var("a"); + desc->SetShape({128, 128}); + std::unique_ptr node = ir::CreateNodeForTest(desc); + node->inputs.emplace_back(op.get()); + nodes.emplace_back(std::move(node)); + } + { + auto desc = block_desc->Var("b"); + desc->SetShape({128, 129}); + std::unique_ptr node = ir::CreateNodeForTest(desc); + node->inputs.emplace_back(op.get()); + nodes.emplace_back(std::move(node)); + } + { + auto desc = block_desc->Var("c"); + desc->SetShape({128, 128}); + std::unique_ptr node = ir::CreateNodeForTest(desc); + node->inputs.emplace_back(op.get()); + nodes.emplace_back(std::move(node)); + } + + for (auto& node : nodes) { + pool.Insert(node.get()); + } + + // FindNextBestFitNode + auto* n = nodes[0].get(); + auto* cache = pool.FindBestFitNode(n); + PADDLE_ENFORCE(cache->Name() == "a"); + cache = pool.FindNextBestFitNode(n, cache); + PADDLE_ENFORCE(cache->Name() == "c"); + cache = pool.FindNextBestFitNode(n, cache); + PADDLE_ENFORCE(cache->Name() == "b"); +} + } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index 2f9e2e662b..c426059a6a 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -69,7 +69,7 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( } for (auto& var : op->outputs) { - if (skip_set_.count(var->Name())) { + if (var->IsVar() && !var->IsCtrlVar() && skip_set_.count(var->Name())) { VLOG(3) << "Skip set contains variable of " << var->Name() << "disable reuse on it. skipped"; continue; @@ -77,8 +77,8 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( if (NodeCanReused(var) && cfg_->Use(op).count(var->Name()) == 0) { ir::Node* cache = pool_.FindBestFitNode(var); while (cache != nullptr && var->Name() == cache->Name()) { - VLOG(3) << "The same cache variable is cascade reused." << var->Name() - << " is re-filled to the pool after" + VLOG(3) << "The same cache variable is cascade reused. " + << var->Name() << " is re-filled to the pool after" << "the reused op is finished. Current op can not " << "replace it again. Skip this candidate."; cache = pool_.FindNextBestFitNode(var, cache); @@ -107,11 +107,13 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( // // CFG Graph store the liveness information, when reuse happens // we also need to update the variable liveness. - cfg_->RenameVarInCFGGraph(var->Name(), cache->Name(), idx); - RenameVarInGraphDesc(var->Name(), cache->Name(), idx); - RenameVarInGraphNode(var->Name(), cache->Name(), idx, graph.get()); + const std::string var_name = var->Name(); + const std::string cache_name = cache->Name(); - pool_.Erase(cache); + cfg_->RenameVarInCFGGraph(var_name, cache_name, idx); + RenameVarInGraphDesc(var_name, cache_name, idx); + RenameVarInGraphNode(var_name, cache_name, idx, graph.get()); + pool_.Erase(cache_name); } } } @@ -119,7 +121,7 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( for (auto var : cfg_->LiveIn(op)) { if (cfg_->LiveOut(op).count(var) == 0) { ir::Node* var_node = cfg_->GetNodeByName(var, op); - if (var_node == nullptr) continue; + if (var_node == nullptr || var_node->IsCtrlVar()) continue; if (NodeCanReused(var_node) && !pool_.Has(var_node)) { pool_.Insert(var_node); } @@ -275,8 +277,7 @@ void MemoryOptimizePass::RenameVarInGraphNode(const std::string& var, // redirect the input to the latest version of cache_var for (auto* node : op->inputs) { if (node->Name() == var) { - ir::Node* cache_node = graph->CreateVarNode(var_desc.get()); - var_nodes_[cache_var].emplace_back(cache_node); + ir::Node* cache_node = var_nodes_[cache_var].back(); // swap node to cache_node cache_node->outputs.insert(cache_node->outputs.end(), @@ -285,11 +286,15 @@ void MemoryOptimizePass::RenameVarInGraphNode(const std::string& var, auto* prev_op = node->inputs[0]; std::replace(prev_op->outputs.begin(), prev_op->outputs.end(), node, cache_node); - cache_node->inputs.emplace_back(prev_op); for (auto* next_op : node->outputs) { std::replace(next_op->inputs.begin(), next_op->inputs.end(), node, cache_node); } + + // erase unused node + auto& nodes = var_nodes_.at(var); + nodes.erase(std::remove(nodes.begin(), nodes.end(), node), nodes.end()); + graph->RemoveNode(node); } } @@ -309,15 +314,14 @@ void MemoryOptimizePass::RenameVarInGraphNode(const std::string& var, std::replace(next_op->inputs.begin(), next_op->inputs.end(), node, cache_node); } + + // erase unused node + auto& nodes = var_nodes_.at(var); + nodes.erase(std::remove(nodes.begin(), nodes.end(), node), nodes.end()); + graph->RemoveNode(node); } } } - - // release node of unused var in graph - for (auto* node : var_nodes_[var]) { - graph->RemoveNode(node); - } - var_nodes_.at(var).clear(); } } // namespace details diff --git a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py index c429c8af7d..a94487e67d 100644 --- a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py +++ b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py @@ -79,7 +79,7 @@ class TestParallelExecutorBase(unittest.TestCase): if use_reduce else fluid.BuildStrategy.ReduceStrategy.AllReduce build_strategy.fuse_elewise_add_act_ops = fuse_elewise_add_act_ops build_strategy.fuse_relu_depthwise_conv = fuse_relu_depthwise_conv - build_strategy.memory_optimize = use_ir_memory_optimize + build_strategy.memory_optimize = False if memory_opt else use_ir_memory_optimize # python memory optimization is conflict with inplace pass. # Use ir graph memory optimization after inplace pass is the correct way. build_strategy.enable_inplace = False if memory_opt else enable_inplace diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py new file mode 100644 index 0000000000..d34ce44d7c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py @@ -0,0 +1,46 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest +os.environ['FLAGS_eager_delete_tensor_gb'] = "0.0" +os.environ['FLAGS_fast_eager_deletion_mode'] = True + +os.environ[ + 'RECORDIO_FILENAME'] = '/tmp/ir_memory_optimize_transformer.wmt16.recordio' + +from test_parallel_executor_transformer import TestTransformer + + +# NOTE(dzhwinter): test diferent strategy colisions. +# open the eager delete tensor strategy by default. +class TestTransformerWithIR(TestTransformer): + def test_main(self): + if core.is_compiled_with_cuda(): + # check python transpiler + self.check_network_convergence( + transformer, + use_cuda=True, + memory_opt=True, + use_ir_memory_optimize=False) + # check IR memory optimize + self.check_network_convergence( + transformer, + use_cuda=True, + memory_opt=False, + use_ir_memory_optimize=True) + + +if __name__ == '__main__': + unittest.main() From d0a2a202d03d79daad60ac82dde5de74f72368f1 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 00:33:50 +0800 Subject: [PATCH 12/33] polish code for reading. test=develop --- .../tests/unittests/test_ir_memory_optimize_transformer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py index d34ce44d7c..f32e1161ad 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py @@ -14,9 +14,10 @@ import os import unittest -os.environ['FLAGS_eager_delete_tensor_gb'] = "0.0" -os.environ['FLAGS_fast_eager_deletion_mode'] = True +import paddle.fluid as fluid +import paddle.fluid.core as core +os.environ['FLAGS_eager_delete_tensor_gb'] = "0.0" os.environ[ 'RECORDIO_FILENAME'] = '/tmp/ir_memory_optimize_transformer.wmt16.recordio' From 6deac40724995e04039f1fda19b7ea037bf1597c Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 00:41:26 +0800 Subject: [PATCH 13/33] polish code for reading. test=develop --- .../fluid/tests/unittests/test_ir_memory_optimize_transformer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py index f32e1161ad..c0f480e34d 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py @@ -22,6 +22,7 @@ os.environ[ 'RECORDIO_FILENAME'] = '/tmp/ir_memory_optimize_transformer.wmt16.recordio' from test_parallel_executor_transformer import TestTransformer +from test_parallel_executor_transformer import transformer # NOTE(dzhwinter): test diferent strategy colisions. From 3787e61fcaada5f5ac36fe17bf504cbda1cdfa0b Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 09:34:55 +0800 Subject: [PATCH 14/33] polish code for reading. test=develop --- paddle/fluid/framework/details/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index 6b1957ae59..dc308fd259 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -53,7 +53,7 @@ cc_library(fuse_vars_op_handle SRCS fuse_vars_op_handle.cc DEPS op_handle_base s if(WITH_GPU) cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper gpu_info) else() -nv_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper cpu_info) +cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper cpu_info) endif() cc_library(memory_optimize_pass SRCS memory_optimize_pass.cc DEPS memory_optimize_helper pass) From 684b572307ccbcbc038c175fda038ab5607c6c1f Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 11:14:42 +0800 Subject: [PATCH 15/33] polish code for reading. test=develop --- .../details/memory_optimize_helper.cc | 5 +++ .../framework/inplace_op_inference_test.cc | 32 +++++++++---------- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/paddle/fluid/framework/details/memory_optimize_helper.cc b/paddle/fluid/framework/details/memory_optimize_helper.cc index 33c2186067..6126c168cc 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.cc +++ b/paddle/fluid/framework/details/memory_optimize_helper.cc @@ -172,6 +172,11 @@ struct NodeComparator { bool operator()(ir::Node* lhs, ir::Node* rhs) const { auto* lhs_desc = FindVarDescInBlock(lhs); auto* rhs_desc = FindVarDescInBlock(rhs); + // match data type + if (lhs_desc->GetDataType() != rhs_desc->GetDataType()) { + return false; + } + // match shape auto lhs_shape = lhs_desc->GetShape(); auto rhs_shape = rhs_desc->GetShape(); if ((lhs_shape[0] == -1 && rhs_shape[0] == -1) || diff --git a/paddle/fluid/framework/inplace_op_inference_test.cc b/paddle/fluid/framework/inplace_op_inference_test.cc index 3e4d715c6f..bf9d1dcd38 100644 --- a/paddle/fluid/framework/inplace_op_inference_test.cc +++ b/paddle/fluid/framework/inplace_op_inference_test.cc @@ -179,11 +179,11 @@ TEST(InferInplace, SingleOpInplaceInToOut) { op->SetOutput("Out", {"test2_out"}); prog.MutableBlock(0)->Var("test2_a")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 64}); + prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 64, 128, 128}); prog.MutableBlock(0)->Var("test2_b")->SetType(proto::VarType::LOD_TENSOR); prog.MutableBlock(0)->Var("test2_c")->SetType(proto::VarType::LOD_TENSOR); prog.MutableBlock(0)->Var("test2_out"); - prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16}); + prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16, 128, 128}); auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; auto in_to_outs = infer_inplace(*op, op->Block()); @@ -201,11 +201,11 @@ TEST(InferInplace, SingleGradOpInplaceInToOut) { op->SetOutput(GradVarName("X"), {"test2_a", "test2_b", "test2_c"}); prog.MutableBlock(0)->Var("test2_a")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 16}); + prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 16, 1024, 1024}); prog.MutableBlock(0)->Var("test2_b")->SetType(proto::VarType::LOD_TENSOR); prog.MutableBlock(0)->Var("test2_c")->SetType(proto::VarType::LOD_TENSOR); prog.MutableBlock(0)->Var("test2_out"); - prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16}); + prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16, 1024, 1024}); auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; auto in_to_outs = infer_inplace(*op, op->Block()); @@ -233,12 +233,12 @@ TEST(InferInplace, MultiOutInplaceInToOut) { prog.MutableBlock(0)->Var("o0"); prog.MutableBlock(0)->Var("y0"); prog.MutableBlock(0)->Var("z0"); - prog.MutableBlock(0)->Var("a0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("b0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("c0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("o0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("y0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("z0")->SetShape({32, 16}); + prog.MutableBlock(0)->Var("a0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("b0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("c0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("o0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("y0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("z0")->SetShape({32, 16, 1024, 1024}); auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; auto in_to_outs = infer_inplace(*op, op->Block()); @@ -267,12 +267,12 @@ TEST(InferInplace, MultiGradInplaceInToOut) { prog.MutableBlock(0)->Var("o0"); prog.MutableBlock(0)->Var("y0"); prog.MutableBlock(0)->Var("z0"); - prog.MutableBlock(0)->Var("a0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("b0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("c0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("o0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("y0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("z0")->SetShape({32, 16}); + prog.MutableBlock(0)->Var("a0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("b0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("c0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("o0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("y0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("z0")->SetShape({32, 16, 1024, 1024}); auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; auto in_to_outs = infer_inplace(*op, op->Block()); From c2a5d97172ddff73fa1f634ecaf733ee89a7c63e Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Mon, 18 Feb 2019 03:20:55 +0000 Subject: [PATCH 16/33] test=develop, uninstall protobuf on linux brefore install latest version of it --- paddle/scripts/paddle_build.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index bb24ada223..dbae55db56 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -88,6 +88,7 @@ function cmake_gen() { -DPYTHON_INCLUDE_DIR:PATH=/Library/Frameworks/Python.framework/Versions/3.5/include/python3.5m/ -DPYTHON_LIBRARY:FILEPATH=/Library/Frameworks/Python.framework/Versions/3.5/lib/libpython3.5m.dylib" WITH_FLUID_ONLY=${WITH_FLUID_ONLY:-ON} + pip3.5 uninstall -y protobuf pip3.5 install --user -r ${PADDLE_ROOT}/python/requirements.txt else exit 1 @@ -101,6 +102,7 @@ function cmake_gen() { -DPYTHON_INCLUDE_DIR:PATH=/Library/Frameworks/Python.framework/Versions/3.6/include/python3.6m/ -DPYTHON_LIBRARY:FILEPATH=/Library/Frameworks/Python.framework/Versions/3.6/lib/libpython3.6m.dylib" WITH_FLUID_ONLY=${WITH_FLUID_ONLY:-ON} + pip3.6 uninstall -y protobuf pip3.6 install --user -r ${PADDLE_ROOT}/python/requirements.txt else exit 1 @@ -114,6 +116,7 @@ function cmake_gen() { -DPYTHON_INCLUDE_DIR:PATH=/Library/Frameworks/Python.framework/Versions/3.7/include/python3.7m/ -DPYTHON_LIBRARY:FILEPATH=/Library/Frameworks/Python.framework/Versions/3.7/lib/libpython3.7m.dylib" WITH_FLUID_ONLY=${WITH_FLUID_ONLY:-ON} + pip3.7 uninstall -y protobuf pip3.7 install --user -r ${PADDLE_ROOT}/python/requirements.txt else exit 1 @@ -128,6 +131,7 @@ function cmake_gen() { PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27m/bin/python -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27m/include/python2.7 -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs2/lib/libpython2.7.so" + pip uninstall -y protobuf pip install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp27-cp27mu" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs2/lib:} @@ -135,6 +139,7 @@ function cmake_gen() { PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27mu/bin/python -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27mu/include/python2.7 -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs4/lib/libpython2.7.so" + pip uninstall -y protobuf pip install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp35-cp35m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-3.5.1/lib/:${LD_LIBRARY_PATH} @@ -142,6 +147,7 @@ function cmake_gen() { export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.5.1/bin/python3 -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.5.1/include/python3.5m -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.5.1/lib/libpython3.so" + pip3.5 uninstall -y protobuf pip3.5 install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp36-cp36m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-3.6.0/lib/:${LD_LIBRARY_PATH} @@ -149,6 +155,7 @@ function cmake_gen() { export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.6.0/bin/python3 -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.6.0/include/python3.6m -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.6.0/lib/libpython3.so" + pip3.6 uninstall -y protobuf pip3.6 install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp37-cp37m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-3.7.0/lib/:${LD_LIBRARY_PATH} @@ -156,6 +163,7 @@ function cmake_gen() { export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.7.0/bin/python3.7 -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.7.0/include/python3.7m -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.7.0/lib/libpython3.so" + pip3.7 uninstall -y protobuf pip3.7 install -r ${PADDLE_ROOT}/python/requirements.txt fi fi From 18afb77e78bae25ed1d0ac768b37ff229cecef3c Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 12:12:21 +0800 Subject: [PATCH 17/33] polish code for reading. test=develop --- .../framework/details/memory_optimize_pass.cc | 28 ++++++++++++++++++- .../framework/details/memory_optimize_pass.h | 1 + .../test_fuse_elewise_add_act_pass.py | 4 +++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index c426059a6a..fabcd2ecd2 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -128,7 +128,7 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( } } } - graph->ResolveHazard(var_nodes_); + // graph->ResolveHazard(var_nodes_); return graph; } @@ -324,6 +324,32 @@ void MemoryOptimizePass::RenameVarInGraphNode(const std::string& var, } } +void MemoryOptimizePass::ClearControlDepVars(ir::Graph* graph) const { + for (auto& op : graph->Nodes()) { + if (!op->IsOp()) continue; + { + auto& nodes = op->inputs; + nodes.erase( + std::remove_if(nodes.begin(), nodes.end(), + [&](ir::Node* var) { return var->IsCtrlVar(); }), + nodes.end()); + } + { + auto& nodes = op->outputs; + nodes.erase( + std::remove_if(nodes.begin(), nodes.end(), + [&](ir::Node* var) { return var->IsCtrlVar(); }), + nodes.end()); + } + } + + for (auto& node : graph->Nodes()) { + if (node->IsCtrlVar()) { + graph->RemoveNode(node); + } + } +} + } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/memory_optimize_pass.h b/paddle/fluid/framework/details/memory_optimize_pass.h index 593ffc10fc..f5d188101f 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.h +++ b/paddle/fluid/framework/details/memory_optimize_pass.h @@ -48,6 +48,7 @@ class MemoryOptimizePass : public ir::Pass { void RenameVarInGraphNode(const std::string& var, const std::string& cache_var, size_t idx, ir::Graph* graph) const; + void ClearControlDepVars(ir::Graph* graph) const; void SubGraphOptimize(OpDesc* op_desc) const; // 1. scan op with subblock and collect the output/input vars. diff --git a/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py index 03471a4432..c1fb53ecf5 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py @@ -121,6 +121,8 @@ class TestMNIST(TestParallelExecutorBase): regularization=fluid.regularizer.L2Decay(1e-6)) return optimizer + # NOTE(dzh): + # need to make it compatible with elewise fuse act not_fuse_op_first_loss, not_fuse_op_last_loss = self.check_network_convergence( model, feed_dict={"image": img, @@ -128,6 +130,7 @@ class TestMNIST(TestParallelExecutorBase): use_cuda=use_cuda, fuse_elewise_add_act_ops=False, memory_opt=False, + use_ir_memory_optimize=False, optimizer=_optimizer) fuse_op_first_loss, fuse_op_last_loss = self.check_network_convergence( model, @@ -136,6 +139,7 @@ class TestMNIST(TestParallelExecutorBase): use_cuda=use_cuda, fuse_elewise_add_act_ops=True, memory_opt=False, + use_ir_memory_optimize=False, optimizer=_optimizer) for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss): From 591ad33e32a3528b9def15ef8c707b6a2be10334 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 12:14:09 +0800 Subject: [PATCH 18/33] polish code for reading. test=develop --- paddle/fluid/framework/details/memory_optimize_pass.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index fabcd2ecd2..aa6641d3f2 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -46,6 +46,7 @@ namespace details { std::unique_ptr MemoryOptimizePass::ApplyImpl( std::unique_ptr graph) const { auto nodes = graph->Nodes(); + ClearControlDepVars(graph.get()); CollectSkipVarsSet(nodes); cfg_.reset(new details::ControlFlowGraph(*graph)); @@ -128,7 +129,7 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( } } } - // graph->ResolveHazard(var_nodes_); + graph->ResolveHazard(var_nodes_); return graph; } From 576e7d71f8a39d03c0ff3453105c8547d3d6586c Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Mon, 18 Feb 2019 05:22:48 +0000 Subject: [PATCH 19/33] test=develop, fix pip --- paddle/scripts/paddle_build.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index dbae55db56..5ef3a31024 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -125,6 +125,8 @@ function cmake_gen() { else if [ "$1" != "" ]; then echo "using python abi: $1" + pip uninstall -y protobuf + pip install -r ${PADDLE_ROOT}/python/requirements.txt if [ "$1" == "cp27-cp27m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs4/lib:} export PATH=/opt/python/cp27-cp27m/bin/:${PATH} From d386a71b65d44587892b3b0110cd1c6625f1592e Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Mon, 18 Feb 2019 06:15:25 +0000 Subject: [PATCH 20/33] test=develop, install protobuf in linux --- paddle/scripts/paddle_build.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 5ef3a31024..e7078499ca 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -125,8 +125,6 @@ function cmake_gen() { else if [ "$1" != "" ]; then echo "using python abi: $1" - pip uninstall -y protobuf - pip install -r ${PADDLE_ROOT}/python/requirements.txt if [ "$1" == "cp27-cp27m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs4/lib:} export PATH=/opt/python/cp27-cp27m/bin/:${PATH} @@ -168,6 +166,9 @@ function cmake_gen() { pip3.7 uninstall -y protobuf pip3.7 install -r ${PADDLE_ROOT}/python/requirements.txt fi + else + pip uninstall -y protobuf + pip install -r ${PADDLE_ROOT}/python/requirements.txt fi fi From d94a314db55e82e7cef707d016a2796f0b6cc2bb Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 14:37:53 +0800 Subject: [PATCH 21/33] add reference. test=develop --- .../framework/details/memory_optimize_pass.cc | 29 +------------------ .../framework/details/memory_optimize_pass.h | 1 - 2 files changed, 1 insertion(+), 29 deletions(-) diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index aa6641d3f2..b35b967c72 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -46,7 +46,6 @@ namespace details { std::unique_ptr MemoryOptimizePass::ApplyImpl( std::unique_ptr graph) const { auto nodes = graph->Nodes(); - ClearControlDepVars(graph.get()); CollectSkipVarsSet(nodes); cfg_.reset(new details::ControlFlowGraph(*graph)); @@ -79,7 +78,7 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( ir::Node* cache = pool_.FindBestFitNode(var); while (cache != nullptr && var->Name() == cache->Name()) { VLOG(3) << "The same cache variable is cascade reused. " - << var->Name() << " is re-filled to the pool after" + << cache->Name() << " is re-filled to the pool after " << "the reused op is finished. Current op can not " << "replace it again. Skip this candidate."; cache = pool_.FindNextBestFitNode(var, cache); @@ -325,32 +324,6 @@ void MemoryOptimizePass::RenameVarInGraphNode(const std::string& var, } } -void MemoryOptimizePass::ClearControlDepVars(ir::Graph* graph) const { - for (auto& op : graph->Nodes()) { - if (!op->IsOp()) continue; - { - auto& nodes = op->inputs; - nodes.erase( - std::remove_if(nodes.begin(), nodes.end(), - [&](ir::Node* var) { return var->IsCtrlVar(); }), - nodes.end()); - } - { - auto& nodes = op->outputs; - nodes.erase( - std::remove_if(nodes.begin(), nodes.end(), - [&](ir::Node* var) { return var->IsCtrlVar(); }), - nodes.end()); - } - } - - for (auto& node : graph->Nodes()) { - if (node->IsCtrlVar()) { - graph->RemoveNode(node); - } - } -} - } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/memory_optimize_pass.h b/paddle/fluid/framework/details/memory_optimize_pass.h index f5d188101f..593ffc10fc 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.h +++ b/paddle/fluid/framework/details/memory_optimize_pass.h @@ -48,7 +48,6 @@ class MemoryOptimizePass : public ir::Pass { void RenameVarInGraphNode(const std::string& var, const std::string& cache_var, size_t idx, ir::Graph* graph) const; - void ClearControlDepVars(ir::Graph* graph) const; void SubGraphOptimize(OpDesc* op_desc) const; // 1. scan op with subblock and collect the output/input vars. From 6cb0208ab0c8ac7e2133788b09fca797ecd78020 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 15:44:21 +0800 Subject: [PATCH 22/33] add reference. test=develop --- python/paddle/fluid/tests/unittests/CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 534411219b..289a48aac9 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -77,6 +77,7 @@ list(REMOVE_ITEM TEST_OPS test_bilinear_interp_op) list(REMOVE_ITEM TEST_OPS test_nearest_interp_op) list(REMOVE_ITEM TEST_OPS test_imperative_resnet) list(REMOVE_ITEM TEST_OPS test_imperative_optimizer) +list(REMOVE_ITEM TEST_OPS test_ir_memory_optimize_transformer) foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) endforeach(TEST_OP) @@ -107,6 +108,9 @@ py_test_modules(test_parallel_executor_crf MODULES test_parallel_executor_crf SE py_test_modules(test_parallel_executor_fetch_feed MODULES test_parallel_executor_fetch_feed SERIAL) set_tests_properties(test_parallel_executor_fetch_feed PROPERTIES TIMEOUT 450) py_test_modules(test_parallel_executor_transformer MODULES test_parallel_executor_transformer SERIAL) +if(NOT WIN32) +py_test_modules(test_ir_memory_optimize_transformer MODULES test_ir_memory_optimize_transformer SERIAL) +endif() if(NOT APPLE) py_test_modules(test_image_classification_resnet MODULES test_image_classification_resnet SERIAL) if(CMAKE_BUILD_TYPE STREQUAL "Debug") From 40402d5e6885b2f0e938a6a30c46869c53d63b6e Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 15 Feb 2019 12:39:56 +0000 Subject: [PATCH 23/33] add emb seqpool jitcode test=develop --- paddle/fluid/operators/jit/gen/CMakeLists.txt | 1 + paddle/fluid/operators/jit/gen/embseqpool.cc | 148 ++++++++++++++++++ paddle/fluid/operators/jit/gen/embseqpool.h | 81 ++++++++++ paddle/fluid/operators/jit/gen/seqpool.h | 2 +- 4 files changed, 231 insertions(+), 1 deletion(-) create mode 100644 paddle/fluid/operators/jit/gen/embseqpool.cc create mode 100644 paddle/fluid/operators/jit/gen/embseqpool.h diff --git a/paddle/fluid/operators/jit/gen/CMakeLists.txt b/paddle/fluid/operators/jit/gen/CMakeLists.txt index efc7eb79d3..294f73d964 100644 --- a/paddle/fluid/operators/jit/gen/CMakeLists.txt +++ b/paddle/fluid/operators/jit/gen/CMakeLists.txt @@ -31,3 +31,4 @@ USE_JITKERNEL_GEN(kNCHW16CMulNC) USE_JITKERNEL_GEN(kSeqPool) USE_JITKERNEL_GEN(kHMax) USE_JITKERNEL_GEN(kHSum) +USE_JITKERNEL_GEN(kEmbSeqPool) diff --git a/paddle/fluid/operators/jit/gen/embseqpool.cc b/paddle/fluid/operators/jit/gen/embseqpool.cc new file mode 100644 index 0000000000..3f233acee9 --- /dev/null +++ b/paddle/fluid/operators/jit/gen/embseqpool.cc @@ -0,0 +1,148 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#include "paddle/fluid/operators/jit/gen/embseqpool.h" +#include // offsetof +#include +#include "paddle/fluid/operators/jit/gen/act.h" // for exp_float_consts ones +#include "paddle/fluid/operators/jit/registry.h" +#include "paddle/fluid/platform/cpu_info.h" + +namespace paddle { +namespace operators { +namespace jit { +namespace gen { + +void EmbSeqPoolJitCode::genCode() { + preCode(); + constexpr int block = YMM_FLOAT_BLOCK; + constexpr int max_num_regs = 8; + const int num_block = tbl_w_ / block; + const int num_groups = num_block / max_num_regs; + const size_t block_size = sizeof(float) * block; + std::vector groups(num_groups, max_num_regs); + int rest_num_regs = num_block % max_num_regs; + if (rest_num_regs > 0) { + groups.push_back(rest_num_regs); + } + + // protect param_dst + mov(reg_ptr_param_dst, param_dst); + mov(reg_idx_width_in_byte, + qword[param_attr + offsetof(emb_seq_pool_attr_t, index_width)]); + mov(reg_idx_height, + qword[param_attr + offsetof(emb_seq_pool_attr_t, index_height)]); + mov(rax, sizeof(int64_t)); + mul(reg_idx_width_in_byte); + mov(reg_idx_width_in_byte, rax); + const size_t tbl_width_in_byte = sizeof(float) * tbl_w_; + int acc_num_regs = 0; + for (int num_regs : groups) { + Label l_next_idx_w, l_next_idx_h, l_save_now; + xor_(reg_idx_w_i_in_byte, reg_idx_w_i_in_byte); + mov(reg_ptr_dst_i, reg_ptr_param_dst); + add(reg_ptr_dst_i, acc_num_regs * block_size); + add(param_tbl, acc_num_regs * block_size); + + L(l_next_idx_w); + { + // h == 0 + mov(reg_ptr_idx_i, param_idx); + add(reg_ptr_idx_i, reg_idx_w_i_in_byte); + mov(reg_idx, qword[reg_ptr_idx_i]); + mov(rax, tbl_width_in_byte); + mul(reg_idx); + mov(reg_ptr_tbl_i, rax); // reg is offset now + add(reg_ptr_tbl_i, param_tbl); // reg is ptr_i now + size_t w_offset = 0; + for (int reg_i = 0; reg_i < num_regs; ++reg_i) { + vmovups(ymm_t(reg_i + num_regs), ptr[reg_ptr_tbl_i + w_offset]); + w_offset += block_size; + } + add(reg_ptr_idx_i, reg_idx_width_in_byte); + + // end condition of idx h + mov(reg_idx_h_end, reg_idx_height); + mov(rax, reg_idx_width_in_byte); + mul(reg_idx_h_end); + mov(reg_idx_h_end, rax); + add(reg_idx_h_end, reg_idx_w_i_in_byte); + add(reg_idx_h_end, param_idx); + + cmp(reg_ptr_idx_i, reg_idx_h_end); + jge(l_save_now, T_NEAR); + L(l_next_idx_h); + { + mov(reg_idx, qword[reg_ptr_idx_i]); + mov(reg_ptr_tbl_i, reg_idx); + mov(rax, tbl_width_in_byte); + mul(reg_idx); + mov(reg_ptr_tbl_i, rax); + add(reg_ptr_tbl_i, param_tbl); + size_t w_offset = 0; + for (int reg_i = 0; reg_i < num_regs; ++reg_i) { + vmovups(ymm_t(reg_i), ptr[reg_ptr_tbl_i + w_offset]); + vaddps(ymm_t(reg_i + num_regs), ymm_t(reg_i + num_regs), + ymm_t(reg_i)); + w_offset += block_size; + } + add(reg_ptr_idx_i, reg_idx_width_in_byte); + cmp(reg_ptr_idx_i, reg_idx_h_end); + jl(l_next_idx_h, T_NEAR); + } // end of idx h + L(l_save_now); + // avg or sqrt here, if needed + w_offset = 0; + for (int reg_i = 0; reg_i < num_regs; ++reg_i) { + vmovups(ptr[reg_ptr_dst_i + w_offset], ymm_t(reg_i + num_regs)); + w_offset += block_size; + } + add(reg_ptr_dst_i, tbl_width_in_byte); + add(reg_idx_w_i_in_byte, sizeof(int64_t)); + cmp(reg_idx_w_i_in_byte, reg_idx_width_in_byte); + jl(l_next_idx_w, T_NEAR); + } // end of idx w + acc_num_regs += num_regs; + } // end of groups + postCode(); +} + +class EmbSeqPoolCreator : public JitCodeCreator { + public: + bool UseMe(const emb_seq_pool_attr_t& attr) const override { + return platform::MayIUse(platform::avx) && + attr.table_width % YMM_FLOAT_BLOCK == 0; + } + size_t CodeSize(const emb_seq_pool_attr_t& attr) const override { + return 96 + (attr.table_width / YMM_FLOAT_BLOCK) * 96 * 8; + } + std::unique_ptr CreateJitCode( + const emb_seq_pool_attr_t& attr) const override { + PADDLE_ENFORCE_GT(attr.table_height, 0); + PADDLE_ENFORCE_GT(attr.table_width, 0); + PADDLE_ENFORCE_GT(attr.index_height, 0); + PADDLE_ENFORCE_GT(attr.index_width, 0); + PADDLE_ENFORCE_GT(attr.out_width, 0); + return make_unique(attr, CodeSize(attr)); + } +}; + +} // namespace gen +} // namespace jit +} // namespace operators +} // namespace paddle + +namespace gen = paddle::operators::jit::gen; + +REGISTER_JITKERNEL_GEN(kEmbSeqPool, gen::EmbSeqPoolCreator); diff --git a/paddle/fluid/operators/jit/gen/embseqpool.h b/paddle/fluid/operators/jit/gen/embseqpool.h new file mode 100644 index 0000000000..5afcfbdc17 --- /dev/null +++ b/paddle/fluid/operators/jit/gen/embseqpool.h @@ -0,0 +1,81 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#pragma once + +#include +#include "glog/logging.h" +#include "paddle/fluid/operators/jit/gen/jitcode.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace operators { +namespace jit { +namespace gen { + +class EmbSeqPoolJitCode : public JitCode { + public: + explicit EmbSeqPoolJitCode(const emb_seq_pool_attr_t& attr, + size_t code_size = 256 * 1024, + void* code_ptr = nullptr) + : JitCode(code_size, code_ptr), + tbl_w_(attr.table_width), + type_(attr.pool_type) { + if (type_ != SeqPoolType::kSum) { + LOG(FATAL) << "Only support sum pool yet "; + } + this->genCode(); + } + + std::string name() const override { + std::string base = "EmbSeqPoolJitCode"; + if (type_ == SeqPoolType::kSum) { + base += "_Sum"; + } else if (type_ == SeqPoolType::kAvg) { + base += "_Avg"; + } else if (type_ == SeqPoolType::kSqrt) { + base += "_Sqrt"; + } + base += ("_W" + std::to_string(tbl_w_)); + return base; + } + void genCode() override; + + private: + int tbl_w_; + SeqPoolType type_; + reg64_t param_tbl{abi_param1}; + reg64_t param_idx{abi_param2}; + reg64_t param_dst{abi_param3}; + reg64_t param_attr{abi_param4}; + + reg64_t reg_tmp{rax}; + + reg64_t reg_idx_width_in_byte{r8}; + reg64_t reg_idx_height{r9}; + + reg64_t reg_ptr_tbl_i{r10}; + reg64_t reg_idx{r10}; // could use same of reg_ptr_tbl_i + reg64_t reg_ptr_idx_i{r11}; + reg64_t reg_ptr_dst_i{r12}; + reg64_t reg_ptr_param_dst{r13}; // rdx is used in mul so protect param_dst + + reg64_t reg_idx_w_i_in_byte{r14}; + reg64_t reg_idx_h_end{r15}; +}; + +} // namespace gen +} // namespace jit +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/jit/gen/seqpool.h b/paddle/fluid/operators/jit/gen/seqpool.h index 4108ee2f46..e909bc7c79 100644 --- a/paddle/fluid/operators/jit/gen/seqpool.h +++ b/paddle/fluid/operators/jit/gen/seqpool.h @@ -32,7 +32,7 @@ class SeqPoolJitCode : public JitCode { : JitCode(code_size, code_ptr), w_(attr.w), type_(attr.type) { if (!(type_ == SeqPoolType::kSum || type_ == SeqPoolType::kAvg || type_ == SeqPoolType::kSqrt)) { - LOG(FATAL) << "Only support sum pool yet "; + LOG(FATAL) << "Only supported pool type: sum, avg and sqrt."; } fp_h_[0] = 1.f; this->genCode(); From 75fc792d40990e6ac7755a56b5d5861f36066fb4 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 18 Feb 2019 09:33:18 +0000 Subject: [PATCH 24/33] fix when table width larger than 64 test=develop --- paddle/fluid/operators/jit/benchmark.cc | 2 +- paddle/fluid/operators/jit/gen/embseqpool.cc | 5 +++-- paddle/fluid/operators/jit/test.cc | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/operators/jit/benchmark.cc b/paddle/fluid/operators/jit/benchmark.cc index 9831b6ef92..96196d26a8 100644 --- a/paddle/fluid/operators/jit/benchmark.cc +++ b/paddle/fluid/operators/jit/benchmark.cc @@ -312,7 +312,7 @@ void BenchEmbSeqPoolKernel() { const T* table_data = table.data(); for (auto type : pool_types) { for (int idx_w : {1, 2, 10, 16}) { - for (int idx_h : {1, 2, 10, 16}) { + for (int idx_h : {1, 2, 9, 13, 16}) { int64_t out_w = tbl_w * idx_w; jit::emb_seq_pool_attr_t attr(tbl_h, tbl_w, idx_h, idx_w, out_w, type); diff --git a/paddle/fluid/operators/jit/gen/embseqpool.cc b/paddle/fluid/operators/jit/gen/embseqpool.cc index 3f233acee9..23837a3fb9 100644 --- a/paddle/fluid/operators/jit/gen/embseqpool.cc +++ b/paddle/fluid/operators/jit/gen/embseqpool.cc @@ -53,7 +53,6 @@ void EmbSeqPoolJitCode::genCode() { xor_(reg_idx_w_i_in_byte, reg_idx_w_i_in_byte); mov(reg_ptr_dst_i, reg_ptr_param_dst); add(reg_ptr_dst_i, acc_num_regs * block_size); - add(param_tbl, acc_num_regs * block_size); L(l_next_idx_w); { @@ -113,8 +112,10 @@ void EmbSeqPoolJitCode::genCode() { cmp(reg_idx_w_i_in_byte, reg_idx_width_in_byte); jl(l_next_idx_w, T_NEAR); } // end of idx w + acc_num_regs += num_regs; - } // end of groups + add(param_tbl, num_regs * block_size); // do not use acc_num_regs + } // end of groups postCode(); } diff --git a/paddle/fluid/operators/jit/test.cc b/paddle/fluid/operators/jit/test.cc index c35b6aef23..15e2993824 100644 --- a/paddle/fluid/operators/jit/test.cc +++ b/paddle/fluid/operators/jit/test.cc @@ -625,7 +625,7 @@ void TestEmbSeqPoolKernel() { const T* table_data = table.data(); for (auto type : pool_types) { for (int idx_w : {1, 2, 10, 16}) { - for (int idx_h : {1, 2, 10, 16}) { + for (int idx_h : {1, 2, 9, 13, 16}) { auto ref = jit::GetRefer>(); EXPECT_TRUE(ref != nullptr); std::vector idx(idx_h * idx_w); From 700495e11f3a7567fed5552fc7a6d8d833b3d3e1 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 18 Feb 2019 18:47:26 +0800 Subject: [PATCH 25/33] Fix FtrlOptimizer's API comment test=develop --- python/paddle/fluid/optimizer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index fbd04f1eb4..fe2b3fbbd9 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -1368,9 +1368,9 @@ class FtrlOptimizer(Optimizer): Args: learning_rate (float|Variable): global learning rate. - l1 (float): - l2 (float): - lr_power (float): + l1 (float): L1 regularization strength. + l2 (float): L2 regularization strength. + lr_power (float): Learning Rate Power. regularization: A Regularizer, such as fluid.regularizer.L2DecayRegularizer. name: A optional name prefix. From 78d6bb3a7a5c191722593f23cf195bda6d62634b Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Mon, 18 Feb 2019 11:06:13 +0000 Subject: [PATCH 26/33] test=develop, fix patch ELF install failed --- tools/manylinux1/build_scripts/build.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/manylinux1/build_scripts/build.sh b/tools/manylinux1/build_scripts/build.sh index 6c551eceb4..3b78af00fd 100644 --- a/tools/manylinux1/build_scripts/build.sh +++ b/tools/manylinux1/build_scripts/build.sh @@ -107,11 +107,11 @@ curl-config --features rm -rf /usr/local/ssl # Install patchelf (latest with unreleased bug fixes) -curl -sLO http://nipy.bic.berkeley.edu/manylinux/patchelf-0.9njs2.tar.gz -check_sha256sum patchelf-0.9njs2.tar.gz $PATCHELF_HASH -tar -xzf patchelf-0.9njs2.tar.gz -(cd patchelf-0.9njs2 && ./configure && make && make install) -rm -rf patchelf-0.9njs2.tar.gz patchelf-0.9njs2 +curl -sLO https://nixos.org/releases/patchelf/patchelf-0.9/patchelf-0.9.tar.gz +check_sha256sum patchelf-0.9.tar.gz $PATCHELF_HASH +tar -xzf patchelf-0.9.tar.gz +(cd patchelf-0.9 && ./configure && make && make install) +rm -rf patchelf-0.9.tar.gz patchelf-0.9 # Install latest pypi release of auditwheel LD_LIBRARY_PATH="${ORIGINAL_LD_LIBRARY_PATH}:$(dirname ${PY35_BIN})/lib" $PY35_BIN/pip install auditwheel From 96b861a83690fa306f0a76df5abb91297e7502f3 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Tue, 19 Feb 2019 02:45:30 +0000 Subject: [PATCH 27/33] test=develop, change md5 for patchELF --- tools/manylinux1/build_scripts/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/manylinux1/build_scripts/build.sh b/tools/manylinux1/build_scripts/build.sh index 3b78af00fd..5b676c0243 100644 --- a/tools/manylinux1/build_scripts/build.sh +++ b/tools/manylinux1/build_scripts/build.sh @@ -17,7 +17,7 @@ OPENSSL_ROOT=openssl-1.1.0i OPENSSL_HASH=ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99 EPEL_RPM_HASH=e5ed9ecf22d0c4279e92075a64c757ad2b38049bcf5c16c4f2b75d5f6860dc0d DEVTOOLS_HASH=a8ebeb4bed624700f727179e6ef771dafe47651131a00a78b342251415646acc -PATCHELF_HASH=d9afdff4baeacfbc64861454f368b7f2c15c44d245293f7587bbf726bfe722fb +PATCHELF_HASH=f2aa40a6148cb3b0ca807a1bf836b081793e55ec9e5540a5356d800132be7e0a CURL_ROOT=curl-7.49.1 CURL_HASH=eb63cec4bef692eab9db459033f409533e6d10e20942f4b060b32819e81885f1 AUTOCONF_ROOT=autoconf-2.69 From 72061b0ac0a135e40eb811278e9ad9b8cac48168 Mon Sep 17 00:00:00 2001 From: baojun <32073718+baojun-nervana@users.noreply.github.com> Date: Mon, 18 Feb 2019 18:56:45 -0800 Subject: [PATCH 28/33] Add ngraph op coverage (#15721) --- .../operators/ngraph/ops/fill_constant_op.h | 2 - .../ngraph/test_accuracy_ngraph_op.py | 34 +---------- .../ngraph/test_batch_norm_ngraph_op.py | 16 ------ .../unittests/ngraph/test_conv2d_ngraph_op.py | 55 ------------------ .../ngraph/test_elementwise_add_ngraph_op.py | 13 +---- .../ngraph/test_fill_constant_ngraph_op.py | 24 +++++--- .../unittests/ngraph/test_mean_ngraph_op.py | 7 --- .../unittests/ngraph/test_mul_ngraph_op.py | 34 +---------- .../unittests/ngraph/test_pool2d_ngraph_op.py | 56 ++++--------------- .../unittests/ngraph/test_scale_ngraph_op.py | 19 ------- .../ngraph/test_softmax_ngraph_op.py | 6 -- .../unittests/ngraph/test_top_k_ngraph_op.py | 25 --------- .../paddle/fluid/tests/unittests/op_test.py | 4 ++ 13 files changed, 35 insertions(+), 260 deletions(-) diff --git a/paddle/fluid/operators/ngraph/ops/fill_constant_op.h b/paddle/fluid/operators/ngraph/ops/fill_constant_op.h index 406a4314f8..58783bc220 100644 --- a/paddle/fluid/operators/ngraph/ops/fill_constant_op.h +++ b/paddle/fluid/operators/ngraph/ops/fill_constant_op.h @@ -46,8 +46,6 @@ void BuildFillConstantNode( ng_dtype = ngraph::element::i64; } else if (data_type == paddle::framework::proto::VarType::INT32) { ng_dtype = ngraph::element::i32; - } else if (data_type == paddle::framework::proto::VarType::BOOL) { - ng_dtype = ngraph::element::boolean; } else { PADDLE_THROW("unsupported data type: %s", data_type); } diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_accuracy_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_accuracy_ngraph_op.py index 84b9198dbf..5298c3c2f6 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_accuracy_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_accuracy_ngraph_op.py @@ -15,39 +15,7 @@ from __future__ import print_function import unittest -import numpy as np -from paddle.fluid.tests.unittests.op_test import OpTest - - -class TestNGRAPHAccuracyOp(OpTest): - def setUp(self): - self.op_type = "accuracy" - self.dtype = np.float32 - self.init_dtype() - n = 128 - infer = np.random.random((n, 1)).astype(self.dtype) - indices = np.random.randint(0, 2, (n, 1)) - label = np.random.randint(0, 2, (n, 1)) - self.inputs = {'Out': infer, 'Indices': indices, "Label": label} - num_correct = 0 - for rowid in range(n): - for ele in indices[rowid]: - if ele == label[rowid]: - num_correct += 1 - break - self.outputs = { - 'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype), - 'Correct': np.array([num_correct]).astype("int64"), - 'Total': np.array([n]).astype("int64") - } - self._cpu_only = True - - def init_dtype(self): - pass - - def test_check_output(self): - self.check_output() - +from paddle.fluid.tests.unittests.test_accuracy_op import TestAccuracyOp if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_batch_norm_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_batch_norm_ngraph_op.py index 511173af5e..34fb73f3cf 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_batch_norm_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_batch_norm_ngraph_op.py @@ -17,21 +17,5 @@ from __future__ import print_function import unittest from paddle.fluid.tests.unittests.test_batch_norm_op import TestBatchNormOpTraining, TestBatchNormOpInference - -class TestNGRAPHBatchNormOpTraining(TestBatchNormOpTraining): - def init_kernel_type(self): - super(TestNGRAPHBatchNormOpTraining, self).init_kernel_type() - - -class TestNGRAPHBatchNormOpInference(TestBatchNormOpInference): - def init_kernel_type(self): - super(TestNGRAPHBatchNormOpInference, self).init_kernel_type() - - -class TestNGRAPHBatchNormOpWithReluInference(TestBatchNormOpInference): - def init_kernel_type(self): - super(TestNGRAPHBatchNormOpWithReluInference, self).init_kernel_type() - - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_conv2d_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_conv2d_ngraph_op.py index dbc8557b4e..ff2e865b66 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_conv2d_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_conv2d_ngraph_op.py @@ -17,60 +17,5 @@ from __future__ import print_function import unittest from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp, TestWithPad, TestWithStride, TestWithGroup, TestWith1x1, TestWithInput1x1Filter1x1 - -class TestNGRAPH(TestConv2dOp): - def setUp(self): - super(TestNGRAPH, self).setUp() - self._cpu_only = True - - def init_kernel_type(self): - super(TestNGRAPH, self).init_kernel_type() - - -class TestNGRAPHWithPad(TestWithPad): - def setUp(self): - super(TestNGRAPHWithPad, self).setUp() - self._cpu_only = True - - def init_kernel_type(self): - super(TestNGRAPHWithPad, self).init_kernel_type() - - -class TestNGRAPHWithStride(TestWithStride): - def setUp(self): - super(TestNGRAPHWithStride, self).setUp() - self._cpu_only = True - - def init_kernel_type(self): - super(TestNGRAPHWithStride, self).init_kernel_type() - - -class TestNGRAPHWithGroup(TestWithGroup): - def setUp(self): - super(TestNGRAPHWithGroup, self).setUp() - self._cpu_only = True - - def init_kernel_type(self): - super(TestNGRAPHWithGroup, self).init_kernel_type() - - -class TestNGRAPHWith1x1(TestWith1x1): - def setUp(self): - super(TestNGRAPHWith1x1, self).setUp() - self._cpu_only = True - - def init_kernel_type(self): - super(TestNGRAPHWith1x1, self).init_kernel_type() - - -class TestNGRAPHWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): - def setUp(self): - super(TestNGRAPHWithInput1x1Filter1x1, self).setUp() - self._cpu_only = True - - def init_kernel_type(self): - super(TestNGRAPHWithInput1x1Filter1x1, self).init_kernel_type() - - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_add_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_add_ngraph_op.py index 67f749bfee..3fb9af3a54 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_add_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_add_ngraph_op.py @@ -13,18 +13,9 @@ # limitations under the License. from __future__ import print_function -import unittest -from paddle.fluid.tests.unittests.test_elementwise_add_op import TestElementwiseAddOp - - -class TestNGRAPHElementwiseAddOp(TestElementwiseAddOp): - def setUp(self): - super(TestNGRAPHElementwiseAddOp, self).setUp() - self._cpu_only = True - - def init_input_output(self): - super(TestNGRAPHElementwiseAddOp, self).init_input_output() +import unittest +from paddle.fluid.tests.unittests.test_elementwise_add_op import TestElementwiseAddOp, TestElementwiseAddOp_broadcast_0 if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py index 835376ffe7..2b10b8f7a3 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py @@ -13,24 +13,34 @@ # limitations under the License. from __future__ import print_function + import unittest +import numpy as np from paddle.fluid.tests.unittests.test_fill_constant_op import TestFillConstantOp1, TestFillConstantOp2, TestFillConstantOpWithSelectedRows -class TestNGRAPHFillConstantOp1(TestFillConstantOp1): +class TestNGRAPHFillConstantFP64(TestFillConstantOp1): def setUp(self): - super(TestNGRAPHFillConstantOp1, self).setUp() + super(TestNGRAPHFillConstantFP64, self).setUp() + + self.attrs = {'shape': [123, 92], 'value': 3.8, 'dtype': 6} + self.outputs = {'Out': np.full((123, 92), 3.8)} -class TestNGRAPHFillConstantOp2(TestFillConstantOp2): +class TestNGRAPHFillConstantINT32(TestFillConstantOp2): def setUp(self): - super(TestNGRAPHFillConstantOp2, self).setUp() + super(TestNGRAPHFillConstantINT32, self).setUp() + self.attrs = {'shape': [123, 92], 'dtype': 2} + self.outputs = {'Out': np.full((123, 92), 0)} -class TestNGRAPHFillConstantOpWithSelectedRows( - TestFillConstantOpWithSelectedRows): + +class TestNGRAPHFillConstantINT64(TestFillConstantOp2): def setUp(self): - super(TestFillConstantOpWithSelectedRows, self).setUp() + super(TestNGRAPHFillConstantINT64, self).setUp() + + self.attrs = {'shape': [123, 92], 'dtype': 3} + self.outputs = {'Out': np.full((123, 92), 0)} if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_mean_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_mean_ngraph_op.py index 11881ac6e5..b4894734cb 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_mean_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_mean_ngraph_op.py @@ -16,12 +16,5 @@ from __future__ import print_function import unittest from paddle.fluid.tests.unittests.test_mean_op import TestMeanOp - -class TestNGRAPHMeanOp(TestMeanOp): - def setUp(self): - super(TestNGRAPHMeanOp, self).setUp() - self._cpu_only = True - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py index a916c8d450..549d03f6e9 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py @@ -15,39 +15,7 @@ from __future__ import print_function import unittest -import numpy as np -from paddle.fluid.tests.unittests.op_test import OpTest - - -class TestNGRAPHMulOp(OpTest): - def setUp(self): - self.op_type = "mul" - self.dtype = np.float32 - self.init_dtype_type() - self.inputs = { - 'X': np.random.random((2, 4)).astype(self.dtype), - 'Y': np.random.random((4, 4)).astype(self.dtype) - } - self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} - self._cpu_only = True - - def init_dtype_type(self): - pass - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) - - def test_check_grad_ingore_x(self): - self.check_grad( - ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) - - def test_check_grad_ingore_y(self): - self.check_grad( - ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) - +from paddle.fluid.tests.unittests.test_mul_op import TestMulOp, TestMulOp2 if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py index 96a2b72d8a..ff82e9fa1d 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py @@ -14,61 +14,25 @@ from __future__ import print_function -from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5 - - -class TestNGRAPHPool2D_Op(TestPool2D_Op): - def setUp(self): - super(TestNGRAPHPool2D_Op, self).setUp() - self._cpu_only = True - - def init_test_case(self): - super(TestNGRAPHPool2D_Op, self).init_test_case() - - -class TestNGRAPHCase1(TestCase1): - def setUp(self): - super(TestNGRAPHCase1, self).setUp() - self._cpu_only = True - - def init_test_case(self): - super(TestNGRAPHCase1, self).init_test_case() +import unittest - -class TestNGRAPHCase2(TestCase2): - def setUp(self): - super(TestNGRAPHCase2, self).setUp() - self._cpu_only = True - - def init_test_case(self): - super(TestNGRAPHCase2, self).init_test_case() - - -class TestNGRAPHCase3(TestCase3): - def setUp(self): - super(TestNGRAPHCase3, self).setUp() - self._cpu_only = True - - def init_pool_type(self): - super(TestNGRAPHCase3, self).init_pool_type() +from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5 -class TestNGRAPHCase4(TestCase4): +class TestNGRAPHCeilMode(TestCase1): def setUp(self): - super(TestNGRAPHCase4, self).setUp() - self._cpu_only = True + super(TestNGRAPHCeilMode, self).setUp() - def init_pool_type(self): - super(TestNGRAPHCase4, self).init_pool_type() + def init_ceil_mode(self): + self.ceil_mode = True -class TestNGRAPHCase5(TestCase5): +class TestNGRAPHAdaptive(TestCase1): def setUp(self): - super(TestNGRAPHCase5, self).setUp() - self._cpu_only = True + super(TestNGRAPHAdaptive, self).setUp() - def init_pool_type(self): - super(TestNGRAPHCase5, self).init_pool_type() + def init_adaptive(self): + self.adaptive = True if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_scale_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_scale_ngraph_op.py index 4da5ca4583..8beb44f55e 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_scale_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_scale_ngraph_op.py @@ -15,24 +15,5 @@ from __future__ import print_function import unittest from paddle.fluid.tests.unittests.test_scale_op import TestScaleOp, TestScaleOpSelectedRows - -class TestNGRAPHScaleOp(TestScaleOp): - def setUp(self): - super(TestNGRAPHScaleOp, self).setUp() - self._cpu_only = True - - def init_dtype_type(self): - pass - - -class TestNGRAPHScaleOpSelectedRows(TestScaleOpSelectedRows): - def setUp(self): - super(TestNGRAPHScaleOpSelectedRows, self).setUp() - self._cpu_only = True - - def init_dtype_type(self): - pass - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py index 81894c6e38..0cb08842df 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py @@ -16,11 +16,5 @@ from __future__ import print_function import unittest from paddle.fluid.tests.unittests.test_softmax_op import TestSoftmaxOp - -class TestSoftmaxNGRAPHOp(TestSoftmaxOp): - def setUp(self): - super(TestSoftmaxNGRAPHOp, self).setUp() - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py index fa68df1adf..d2319c4d92 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py @@ -16,30 +16,5 @@ from __future__ import print_function import unittest from paddle.fluid.tests.unittests.test_top_k_op import TestTopkOp, TestTopkOp3d, TestTopkOp2, TestTopkOp3, TestTopkOp4 - -class TestNGRAPHTopkOp(TestTopkOp): - def setUp(self): - super(TestNGRAPHTopkOp, self).setUp() - self._cpu_only = True - - -class TestNGRAPHTopkOp2(TestTopkOp2): - def setUp(self): - super(TestNGRAPHTopkOp2, self).setUp() - self._cpu_only = True - - -class TestNGRAPHTopkOp3(TestTopkOp3): - def setUp(self): - super(TestNGRAPHTopkOp3, self).setUp() - self._cpu_only = True - - -class TestNGRAPHTopkOp4(TestTopkOp4): - def setUp(self): - super(TestNGRAPHTopkOp4, self).setUp() - self._cpu_only = True - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 0fe836683b..8234457243 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -14,6 +14,7 @@ from __future__ import print_function +import os import unittest import numpy as np import random @@ -374,6 +375,9 @@ class OpTest(unittest.TestCase): return [] places = [fluid.CPUPlace()] cpu_only = self._cpu_only if hasattr(self, '_cpu_only') else False + use_ngraph = bool(os.getenv("FLAGS_use_ngraph", False)) + if use_ngraph: + cpu_only = True if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type)\ and not cpu_only: places.append(core.CUDAPlace(0)) From 796e221efc896beb6670088c14f47120d7798c4a Mon Sep 17 00:00:00 2001 From: sneaxiy Date: Mon, 18 Feb 2019 07:52:15 +0000 Subject: [PATCH 29/33] fix api arg0 test=release/1.3 --- paddle/fluid/API.spec | 6 +- paddle/fluid/pybind/pybind.cc | 109 +++++++++++++++++++++++++++++----- 2 files changed, 96 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index df961be911..8a3c062dba 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -473,11 +473,11 @@ paddle.fluid.LoDTensor.has_valid_recursive_sequence_lengths has_valid_recursive_ paddle.fluid.LoDTensor.lod lod(self: paddle.fluid.core.LoDTensor) -> List[List[int]] paddle.fluid.LoDTensor.recursive_sequence_lengths recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor) -> List[List[int]] paddle.fluid.LoDTensor.set 1. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CPUPlace) -> None 2. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CPUPlace) -> None 3. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CPUPlace) -> None 4. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CPUPlace) -> None 5. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CPUPlace) -> None 6. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CPUPlace) -> None 7. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CPUPlace) -> None 8. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int8], arg1: paddle::platform::CPUPlace) -> None 9. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CUDAPlace) -> None 10. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CUDAPlace) -> None 11. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CUDAPlace) -> None 12. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CUDAPlace) -> None 13. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CUDAPlace) -> None 14. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CUDAPlace) -> None 15. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CUDAPlace) -> None 16. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int8], arg1: paddle::platform::CUDAPlace) -> None 17. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CUDAPinnedPlace) -> None 18. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CUDAPinnedPlace) -> None 19. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CUDAPinnedPlace) -> None 20. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CUDAPinnedPlace) -> None 21. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CUDAPinnedPlace) -> None 22. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CUDAPinnedPlace) -> None 23. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CUDAPinnedPlace) -> None 24. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int8], arg1: paddle::platform::CUDAPinnedPlace) -> None -paddle.fluid.LoDTensor.set_lod set_lod(self: paddle.fluid.core.LoDTensor, arg0: List[List[int]]) -> None -paddle.fluid.LoDTensor.set_recursive_sequence_lengths set_recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor, arg0: List[List[int]]) -> None +paddle.fluid.LoDTensor.set_lod set_lod(self: paddle.fluid.core.LoDTensor, lod: List[List[int]]) -> None +paddle.fluid.LoDTensor.set_recursive_sequence_lengths set_recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor, recursive_sequence_lengths: List[List[int]]) -> None paddle.fluid.LoDTensor.shape shape(self: paddle.fluid.core.Tensor) -> List[int] paddle.fluid.LoDTensorArray.__init__ __init__(self: paddle.fluid.core.LoDTensorArray) -> None -paddle.fluid.LoDTensorArray.append append(self: paddle.fluid.core.LoDTensorArray, arg0: paddle.fluid.core.LoDTensor) -> None +paddle.fluid.LoDTensorArray.append append(self: paddle.fluid.core.LoDTensorArray, tensor: paddle.fluid.core.LoDTensor) -> None paddle.fluid.CPUPlace.__init__ __init__(self: paddle.fluid.core.CPUPlace) -> None paddle.fluid.CUDAPlace.__init__ __init__(self: paddle.fluid.core.CUDAPlace, arg0: int) -> None paddle.fluid.CUDAPinnedPlace.__init__ __init__(self: paddle.fluid.core.CUDAPinnedPlace) -> None diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index a4a01ad647..a3a3872087 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -373,7 +373,13 @@ PYBIND11_MODULE(core, m) { PADDLE_ENFORCE(CheckLoD(new_lod, vectorize(self.dims()).front()), "the provided lod info is invalid"); self.set_lod(new_lod); - }) + }, + py::arg("lod"), R"DOC( + Set LoD of the LoDTensor. + + Args: + lod (List[List[int]]): the lod to be set. + )DOC") .def("set_recursive_sequence_lengths", [](LoDTensor &self, const std::vector> &recursive_sequence_lengths) { @@ -389,7 +395,17 @@ PYBIND11_MODULE(core, m) { CheckLoD(new_offset_lod, vectorize(self.dims()).front()), "the provided recursive_sequence_lengths info is invalid"); self.set_lod(new_offset_lod); - }) + }, + py::arg("recursive_sequence_lengths"), R"DOC( + Set LoD of the LoDTensor according to recursive sequence length. + + For example, if recursive_sequence_lengths=[2, 3], meaning that + there are two sequences with length 2 and 3 respectively, the + corresponding lod would be [0, 2, 2+3], i.e, [0, 2, 5]. + + Args: + recursive_sequence_lengths (List[List[int]]): sequence lengths. + )DOC") .def("lod", [](LoDTensor &self) -> std::vector> { // output the offset-based lod info @@ -398,7 +414,13 @@ PYBIND11_MODULE(core, m) { new_lod.reserve(lod.size()); std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); return new_lod; - }) + }, + R"DOC( + Return the LoD of the LoDTensor. + + Returns: + out (List[List[int]]): the lod of the LoDTensor. + )DOC") // Set above comments of set_lod. .def("recursive_sequence_lengths", [](LoDTensor &self) -> std::vector> { @@ -408,12 +430,25 @@ PYBIND11_MODULE(core, m) { new_lod.reserve(lod.size()); std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); return new_lod; - }) - .def("has_valid_recursive_sequence_lengths", [](LoDTensor &self) -> bool { - // Check that the lod info is valid and match the outermost - // dimension of the LoDTensor data - return CheckLoD(self.lod(), vectorize(self.dims()).front()); - }); + }, + R"DOC( + Return the sequence length of the LoDTensor corresponding to LoD. + + Returns: + out (List[List[int]): the sequence lengths. + )DOC") + .def("has_valid_recursive_sequence_lengths", + [](LoDTensor &self) -> bool { + // Check that the lod info is valid and match the outermost + // dimension of the LoDTensor data + return CheckLoD(self.lod(), vectorize(self.dims()).front()); + }, + R"DOC( + Check whether the lod of the LoDTensor is valid. + + Returns: + out (bool): whether the lod is valid. + )DOC"); py::class_(m, "SelectedRows") .def("__init__", @@ -549,11 +584,45 @@ All parameter, weight, gradient are variables in Paddle. [](Scope &self, const std::string &name) -> Variable * { return self.Var(name); }, + py::arg("name"), + R"DOC( + Find or create variable named :code:`name` in the current scope. + + If the variable named :code:`name` does not exist in the + current scope, the variable would be created. Otherwise, + return the existing variable. + + Args: + name (str): the variable name. + + Returns: + out (core.Variable): the found or created variable. + )DOC", + py::return_value_policy::reference) + .def("find_var", &Scope::FindVar, py::arg("name"), + R"DOC( + Find variable named :code:`name` in the current scope or + its parent scope. Return None if not found. + + Args: + name (str): the variable name. + + Returns: + out (core.Variable|None): the found variable or None. + )DOC", py::return_value_policy::reference) - .def("find_var", &Scope::FindVar, py::return_value_policy::reference) .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); }, + R"DOC( + Create a new sub-scope of the current scope. + + Returns: + out (core._Scope): the created sub-scope. + )DOC", py::return_value_policy::reference) - .def("drop_kids", &Scope::DropKids); + .def("drop_kids", &Scope::DropKids, + R"DOC( + Delete all sub-scopes of the current scope. + )DOC"); m.def("Scope", []() -> Scope * { @@ -561,6 +630,12 @@ All parameter, weight, gradient are variables in Paddle. ScopePool::Instance().Insert(std::unique_ptr(s)); return s; }, + R"DOC( + Create a new scope. + + Returns: + out (core._Scope): the created scope. + )DOC", py::return_value_policy::reference); //! @note: Be careful! PyBind will return std::string as an unicode, not @@ -789,11 +864,13 @@ All parameter, weight, gradient are variables in Paddle. self[i].ShareDataWith(t); self[i].set_lod(t.lod()); }) - .def("append", [](LoDTensorArray &self, const LoDTensor &t) { - self.emplace_back(); - self.back().ShareDataWith(t); - self.back().set_lod(t.lod()); - }); + .def("append", + [](LoDTensorArray &self, const LoDTensor &t) { + self.emplace_back(); + self.back().ShareDataWith(t); + self.back().set_lod(t.lod()); + }, + py::arg("tensor"), "Append a LoDensor to LoDTensorArray."); m.def("IsInplace", [](std::string op) -> bool { return operators::IsInplace(op); }); From e6ff5498494134c0e5351450da7005c6da31ab5d Mon Sep 17 00:00:00 2001 From: sneaxiy Date: Mon, 18 Feb 2019 07:56:45 +0000 Subject: [PATCH 30/33] small fix doc test=release/1.3 --- paddle/fluid/pybind/pybind.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index a3a3872087..c50c38160e 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -399,9 +399,9 @@ PYBIND11_MODULE(core, m) { py::arg("recursive_sequence_lengths"), R"DOC( Set LoD of the LoDTensor according to recursive sequence length. - For example, if recursive_sequence_lengths=[2, 3], meaning that + For example, if recursive_sequence_lengths=[[2, 3]], meaning that there are two sequences with length 2 and 3 respectively, the - corresponding lod would be [0, 2, 2+3], i.e, [0, 2, 5]. + corresponding lod would be [[0, 2, 2+3]], i.e, [[0, 2, 5]]. Args: recursive_sequence_lengths (List[List[int]]): sequence lengths. From 3d0610b59bed21a79c1c93bf8083e8a083f17848 Mon Sep 17 00:00:00 2001 From: sneaxiy Date: Mon, 18 Feb 2019 08:03:59 +0000 Subject: [PATCH 31/33] fix data doc test=develop --- python/paddle/fluid/layers/io.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index b88be66906..a9b391fd53 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -56,7 +56,10 @@ def data(name, Args: name(str): The name/alias of the function - shape(list): Tuple declaring the shape. + shape(list): Tuple declaring the shape. If :code:`append_batch_size` is + True and there is no -1 inside :code:`shape`, it should be + considered as the shape of the each sample. Otherwise, it + should be considered as the shape of the batched data. append_batch_size(bool): 1. If true, it prepends -1 to the shape. For example if shape=[1], the resulting shape is [-1, 1]. From 56a5039e24ba581602185841fff970d89ab6e177 Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Tue, 19 Feb 2019 11:20:21 +0800 Subject: [PATCH 32/33] Correct the doc in Python API (#15725) * Correct the comment in control_flow.py. * Correct the argument list of ops. test=develop * Update API.spec. test=develop * Skip op_callstack attr for all op apis. test=develop * Remove use_mkldnn and is_test from python api. test=develop * Remove use_mkldnn and is_test from op_proto_maker and hard-coding them in python when generating doc string. test=develop --- paddle/fluid/API.spec | 2 +- .../fluid/operators/controlflow/compare_op.cc | 10 +++++----- python/paddle/fluid/framework.py | 3 ++- python/paddle/fluid/layers/control_flow.py | 20 ++++++++----------- .../fluid/layers/layer_function_generator.py | 8 ++++++-- python/paddle/fluid/layers/ops.py | 4 ++-- 6 files changed, 24 insertions(+), 23 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index df961be911..a9fc840e8e 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -261,7 +261,7 @@ paddle.fluid.layers.increment ArgSpec(args=['x', 'value', 'in_place'], varargs=N paddle.fluid.layers.array_write ArgSpec(args=['x', 'i', 'array'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.create_array ArgSpec(args=['dtype'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.less_than ArgSpec(args=['x', 'y', 'force_cpu', 'cond'], varargs=None, keywords='ignored', defaults=(None, None)) -paddle.fluid.layers.equal ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords='ignored', defaults=(None,)) +paddle.fluid.layers.equal ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.array_read ArgSpec(args=['array', 'i'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.array_length ArgSpec(args=['array'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.IfElse.__init__ ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,)) diff --git a/paddle/fluid/operators/controlflow/compare_op.cc b/paddle/fluid/operators/controlflow/compare_op.cc index 688457d4a7..5d3f9b43f8 100644 --- a/paddle/fluid/operators/controlflow/compare_op.cc +++ b/paddle/fluid/operators/controlflow/compare_op.cc @@ -51,6 +51,11 @@ class CompareOpProtoMaker : public framework::OpProtoAndCheckerMaker { comment.type)); AddInput("Y", string::Sprintf("the right hand operand of %s operator", comment.type)); + AddAttr( + "axis", + "The start dimension index for broadcasting Y onto X. [default -1]") + .SetDefault(-1) + .EqualGreaterThan(-1); AddAttr("force_cpu", "Force fill output variable to cpu " "memory. Otherwise, fill output variable to the running " @@ -64,11 +69,6 @@ N-dim tensor. X and Y could be any type. The each element of the Out tensor is calculated by $%s$ )DOC", comment.equation)); - AddAttr( - "axis", - "The start dimension index for broadcasting Y onto X. [default -1]") - .SetDefault(-1) - .EqualGreaterThan(-1); } }; diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index ef304b1110..15367c724e 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -557,7 +557,8 @@ class OpProtoHolder(object): return { core.op_proto_and_checker_maker.kOpRoleAttrName(), core.op_proto_and_checker_maker.kOpRoleVarAttrName(), - core.op_proto_and_checker_maker.kOpNameScopeAttrName() + core.op_proto_and_checker_maker.kOpNameScopeAttrName(), + core.op_proto_and_checker_maker.kOpCreationCallstackAttrName() } diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 3a6753b01f..539c9675b2 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -506,9 +506,9 @@ class While(object): while loop control flow. Args: - cond (Variable): condition used to compare. + cond(Variable): condition used to compare. is_test(bool): A flag indicating whether execution is in test phase. - name (str): The name of this layer. + name(str): The name of this layer. Examples: .. code-block:: python @@ -589,7 +589,8 @@ class While(object): def lod_rank_table(x, level=0): - """LoD Rank Table Operator. Given an input variable **x** and a level number + """ + LoD Rank Table Operator. Given an input variable **x** and a level number of LoD, this layer creates a LodRankTable object. A LoDRankTable object contains a list of bi-element tuples. Each tuple consists of an index and a length, both of which are int type. Refering to specified level of LoD, @@ -883,10 +884,8 @@ def less_than(x, y, force_cpu=None, cond=None, **ignored): return cond -def equal(x, y, cond=None, **ignored): +def equal(x, y, cond=None): """ - **equal** - This layer returns the truth value of :math:`x == y` elementwise. Args: @@ -1458,7 +1457,6 @@ class DynamicRNN(object): Returns: The current timestep in the input sequence. - """ self._assert_in_rnn_block_("step_input") if not isinstance(x, Variable): @@ -1535,8 +1533,7 @@ class DynamicRNN(object): @signature_safe_contextmanager def block(self): """ - The block for user to define operators in RNN. See the class docstring - for more details. + The block for user to define operators in RNN. """ if self.status != DynamicRNN.BEFORE_RNN: raise ValueError("rnn.block() can only be invoke once") @@ -1640,8 +1637,7 @@ class DynamicRNN(object): dtype(str|numpy.dtype): The data type of the initialized memory. Returns: - the memory variable. - + The memory variable. """ self._assert_in_rnn_block_('memory') self._init_zero_idx_() @@ -1740,7 +1736,7 @@ class DynamicRNN(object): def output(self, *outputs): """ - mark the RNN output variables. + Mark the RNN output variables. Args: outputs: The output variables. diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 09b1b30216..da6c241004 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -24,7 +24,7 @@ from ..framework import OpProtoHolder, Variable, core, convert_np_dtype_to_dtype from ..layer_helper import LayerHelper __all__ = [ - 'deprecated', 'generate_layer_fn', 'generate_layer_fn_noattr', 'autodoc', + 'deprecated', 'generate_layer_fn', 'generate_activation_fn', 'autodoc', 'templatedoc' ] @@ -89,6 +89,9 @@ def _generate_doc_string_(op_proto, additional_args_lines=None): buf.write('\n') skip_attrs = OpProtoHolder.generated_op_attr_names() + # attr use_mkldnn and is_test also should not be visible to users. + skip_attrs.add("use_mkldnn") + skip_attrs.add("is_test") for each_attr in op_proto.attrs: if each_attr.name in skip_attrs: @@ -226,7 +229,7 @@ def generate_layer_fn(op_type): return func -def generate_layer_fn_noattr(op_type): +def generate_activation_fn(op_type): """Register the Python layer for an Operator without Attribute. Args: @@ -246,6 +249,7 @@ def generate_layer_fn_noattr(op_type): func.__name__ = op_type func.__doc__ = _generate_doc_string_(op_proto) + return func diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 3dcf9dc069..6b4dc4ac89 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -14,7 +14,7 @@ from __future__ import print_function import os -from .layer_function_generator import generate_layer_fn, generate_layer_fn_noattr +from .layer_function_generator import generate_layer_fn, generate_activation_fn from .. import core from ..framework import convert_np_dtype_to_dtype_ @@ -53,7 +53,7 @@ globals()['_elementwise_div'] = generate_layer_fn('elementwise_div') __all__ += __activations_noattr__ for _OP in set(__activations_noattr__): - globals()[_OP] = generate_layer_fn_noattr(_OP) + globals()[_OP] = generate_activation_fn(_OP) __all__ += ["uniform_random"] From df23a6f894e74975448318f34a70120e05f96a85 Mon Sep 17 00:00:00 2001 From: mozga-intel Date: Tue, 19 Feb 2019 05:05:27 +0100 Subject: [PATCH 33/33] Enable cross_entropy operator for a ngraph engine (#15674) * Enable cross_entropy operator for a ngraph engine test=develop * Update tests test=develop * Added PADDLE_ENFORCE for the batch_norm operator test=develop * Update the message about which format are supported right now test=develop --- .../fluid/operators/ngraph/ngraph_bridge.cc | 2 + paddle/fluid/operators/ngraph/ngraph_ops.h | 1 + .../operators/ngraph/ops/batch_norm_op.h | 7 + .../operators/ngraph/ops/cross_entropy_op.h | 145 +++++++++ .../ngraph/test_cross_entropy_ngraph_op.py | 275 ++++++++++++++++++ 5 files changed, 430 insertions(+) create mode 100644 paddle/fluid/operators/ngraph/ops/cross_entropy_op.h create mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py diff --git a/paddle/fluid/operators/ngraph/ngraph_bridge.cc b/paddle/fluid/operators/ngraph/ngraph_bridge.cc index 08d72a5b39..36a2efc0ce 100644 --- a/paddle/fluid/operators/ngraph/ngraph_bridge.cc +++ b/paddle/fluid/operators/ngraph/ngraph_bridge.cc @@ -36,6 +36,8 @@ std::map("epsilon"); const float momentum = op_attrs.Get("momentum"); + PADDLE_ENFORCE( + data_layout == "NHWC" || data_layout == "NCHW" || data_layout == "NC", + "The BatchNorm operator only supports NHWC/NCHW/NC data format"); + if (data_layout == "NHWC") { x = paddle::platform::Nhwc2Nchw(x); } @@ -110,6 +114,9 @@ void BuildBatchNormGradNode( "BN grap input size needs to be 2 or 4"); PADDLE_ENFORCE_EQ(x_shape.size(), dy_shape.size(), "BN grap input and delta size needs to be equal"); + PADDLE_ENFORCE( + data_layout == "NHWC" || data_layout == "NCHW" || data_layout == "NC", + "The BatchNorm operator only supports NHWC/NCHW/NC data format"); if (x_shape.size() == 2) { x = std::make_shared( diff --git a/paddle/fluid/operators/ngraph/ops/cross_entropy_op.h b/paddle/fluid/operators/ngraph/ops/cross_entropy_op.h new file mode 100644 index 0000000000..f88a2cb941 --- /dev/null +++ b/paddle/fluid/operators/ngraph/ops/cross_entropy_op.h @@ -0,0 +1,145 @@ +/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include + +#include "ngraph/ngraph.hpp" +#include "paddle/fluid/platform/ngraph_helper.h" + +namespace paddle { +namespace operators { +namespace ngraphs { + +void BuildCrossEntropyNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map); + auto label_shape = label->get_shape(); + auto x_shape = x->get_shape(); + auto label_rank = label_shape.size(); + auto x_rank = x_shape.size(); + std::shared_ptr x_2d = x, label_2d = label; + auto label_2d_shape = label_shape, x_2d_shape = x_shape; + + if (label_rank > 2) { + label_2d_shape = paddle::platform::FlattenTo2d(label_shape, label_rank - 1); + label_2d = paddle::platform::NgReshaper(label, label_2d_shape); + } + if (x_rank > 2) { + x_2d_shape = paddle::platform::FlattenTo2d(x_shape, x_rank - 1); + x_2d = paddle::platform::NgReshaper(x, x_2d_shape); + } + + auto batch_size = x_2d_shape.at(0); + auto op_attrs = paddle::framework::AttrReader(op->Attrs()); + const bool is_soft_label = op_attrs.Get("soft_label"); + + std::shared_ptr node_1_hot = label_2d; + if (!is_soft_label) { + auto label_1d = paddle::platform::NgReshaper( + label_2d, ngraph::Shape{label_2d_shape.at(0)}); + node_1_hot = std::make_shared(label_1d, x_2d_shape, 1); + } + if (x->get_element_type() != node_1_hot->get_element_type()) { + node_1_hot = std::make_shared(node_1_hot, + x->get_element_type()); + } + + auto node_log = std::make_shared(x_2d); + auto high_clip = ngraph::op::Constant::create(node_log->get_element_type(), + node_log->get_shape(), {1e20}); + auto low_clip = ngraph::op::Constant::create(node_log->get_element_type(), + node_log->get_shape(), {-1e20}); + auto node_min = std::make_shared(node_log, high_clip); + auto node_max = std::make_shared(node_min, low_clip); + auto node_mul = node_1_hot * node_log; + auto node_sum = + std::make_shared(node_mul, ngraph::AxisSet{1}); + auto node_neg = std::make_shared(node_sum); + auto xe = + paddle::platform::NgReshaper(node_neg, ngraph::Shape{batch_size, 1}); + + if (!is_soft_label) { + auto ignore_index = op_attrs.Get("ignore_index"); + auto ignore_node = ngraph::op::Constant::create( + label->get_element_type(), label_2d_shape, {ignore_index}); + auto not_equal_node = + std::make_shared(label_2d, ignore_node); + auto mask = std::make_shared(not_equal_node, + xe->get_element_type()); + xe = xe * mask; + } + + paddle::platform::SetOutputNode(op, "Y", xe, ngb_node_map); +} + +void BuildCrossEntropyGradNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto op_attrs = paddle::framework::AttrReader(op->Attrs()); + const bool is_soft_label = op_attrs.Get("soft_label"); + + auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map); + auto dy = paddle::platform::GetInputNode(op, "Y@GRAD", ngb_node_map); + auto x_shape = x->get_shape(); + auto rank = x_shape.size(); + + std::shared_ptr mask; + if (!is_soft_label) { + auto label_shape = label->get_shape(); + label_shape.pop_back(); + label = paddle::platform::NgReshaper(label, label_shape); + + auto ignore_index = op_attrs.Get("ignore_index"); + auto ignore_node = ngraph::op::Constant::create( + label->get_element_type(), label_shape, {ignore_index}); + auto not_equal_node = + std::make_shared(label, ignore_node); + mask = std::make_shared(not_equal_node, + x->get_element_type()); + mask = std::make_shared(mask, x_shape, + ngraph::AxisSet{rank - 1}); + + label = std::make_shared(label, x_shape, rank - 1); + } + + auto dy_shape = dy->get_shape(); + dy_shape.pop_back(); + auto dy_reshape = paddle::platform::NgReshaper(dy, dy_shape); + auto dy_bcast = std::make_shared( + dy_reshape, x_shape, ngraph::AxisSet{rank - 1}); + if (x->get_element_type() != label->get_element_type()) { + label = std::make_shared(label, x->get_element_type()); + } + + auto xe_grad = -label * dy_bcast / x; + + if (!is_soft_label) { + xe_grad = xe_grad * mask; + } + + paddle::platform::SetOutputNode(op, "X@GRAD", xe_grad, ngb_node_map); +} +} // namespace ngraphs +} // namespace operators +} // namespace paddle diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py new file mode 100644 index 0000000000..9a185eb97c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py @@ -0,0 +1,275 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import paddle.fluid.core as core +from paddle.fluid.tests.unittests.op_test import OpTest, randomize_probability + + +class TestCrossEntropyOp(OpTest): + """Test cross-entropy with discrete one-hot labels. + """ + + def setUp(self): + self.op_type = "cross_entropy" + self.soft_label = False + self.ignore_index = -100 + self.dtype = np.float64 + self.batch_size = 30 + self.class_num = 10 + self._cpu_only = True + + self.init_dtype_type() + self.init_attr_type() + self.init_bs_class_num() + self.init_x() + self.init_label() + self.get_cross_entropy() + + self.inputs = {"X": self.x, "Label": self.label} + self.outputs = {"Y": self.cross_entropy} + self.attrs = { + "soft_label": self.soft_label, + "ignore_index": self.ignore_index + } + + def init_x(self): + self.x = randomize_probability( + self.batch_size, self.class_num, dtype=self.dtype) + + def init_label(self): + self.label = np.random.randint( + 0, self.class_num, (self.batch_size, 1), dtype="int64") + + def get_cross_entropy(self): + self.cross_entropy = np.asmatrix( + [[-np.log(self.x[i][self.label[i][0]])] + for i in range(self.x.shape[0])], + dtype="float64") + + def init_attr_type(self): + pass + + def init_dtype_type(self): + pass + + def init_bs_class_num(self): + pass + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Y", numeric_grad_delta=0.001) + + +class TestCrossEntropyOp2(TestCrossEntropyOp): + """Test cross-entropy with vectorized soft labels. + """ + + def init_label(self): + self.label = np.random.uniform( + 0.1, 1.0, [self.batch_size, self.class_num]).astype(self.dtype) + self.label /= self.label.sum(axis=1, keepdims=True) + + def get_cross_entropy(self): + self.cross_entropy = (-self.label * np.log(self.x)).sum( + axis=1, keepdims=True).astype(self.dtype) + + def init_attr_type(self): + self.soft_label = True + + def init_dtype_type(self): + self.dtype = np.float32 + + def init_bs_class_num(self): + self.batch_size = 5 + self.class_num = 37 + + def test_check_grad(self): + self.check_grad( + ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) + + +class TestCrossEntropyOp3(TestCrossEntropyOp): + """Test cross-entropy with vectorized one-hot representation of labels. + """ + + def init_label(self): + self.label_index = np.random.randint(0, self.class_num, + (self.batch_size)) + self.label = np.zeros(self.x.shape).astype(self.dtype) + self.label[np.arange(self.batch_size), self.label_index] = 1 + + def get_cross_entropy(self): + self.cross_entropy = np.asmatrix( + [[-np.log(self.x[i][self.label_index[i]])] + for i in range(self.x.shape[0])]).astype(self.dtype) + + def init_attr_type(self): + self.soft_label = True + + def init_dtype_type(self): + self.dtype = np.float32 + + def init_bs_class_num(self): + self.batch_size = 5 + self.class_num = 17 + + def test_check_grad(self): + self.check_grad( + ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) + + +class TestCrossEntropyOp4(TestCrossEntropyOp): + """Test high rank tensor cross-entropy with discrete one-hot labels. + """ + + def init_x(self): + self.shape = [10, 2, 4] + self.ins_num = np.prod(np.array(self.shape)) + self.X_2d = randomize_probability(self.ins_num, + self.class_num).astype(self.dtype) + self.x = self.X_2d.reshape(self.shape + [self.class_num]) + + def init_label(self): + self.label_2d = np.random.randint( + 0, self.class_num, (self.ins_num, 1), dtype="int64") + self.label = self.label_2d.reshape(self.shape + [1]) + + def get_cross_entropy(self): + cross_entropy_2d = np.asmatrix( + [[-np.log(self.X_2d[i][self.label_2d[i][0]])] + for i in range(self.X_2d.shape[0])]).astype(self.dtype) + self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape + + [1]) + + def init_attr_type(self): + self.soft_label = False + + def init_dtype_type(self): + self.dtype = np.float64 + + def init_bs_class_num(self): + self.class_num = 10 + + +class TestCrossEntropyOp5(TestCrossEntropyOp): + """Test high rank tensor cross-entropy with vectorized soft labels. + """ + + def init_x(self): + self.shape = [4, 3] + self.ins_num = np.prod(np.array(self.shape)) + self.X_2d = randomize_probability(self.ins_num, + self.class_num).astype(self.dtype) + self.x = self.X_2d.reshape(self.shape + [self.class_num]) + + def init_label(self): + self.label_2d = np.random.uniform( + 0.1, 1.0, [self.ins_num, self.class_num]).astype(self.dtype) + self.label_2d /= self.label_2d.sum(axis=1, keepdims=True) + self.label = self.label_2d.reshape(self.shape + [self.class_num]) + + def get_cross_entropy(self): + cross_entropy_2d = (-self.label_2d * np.log(self.X_2d)).sum( + axis=1, keepdims=True).astype(self.dtype) + self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape + + [1]) + + def init_attr_type(self): + self.soft_label = True + + def init_dtype_type(self): + self.dtype = np.float32 + + def init_bs_class_num(self): + self.class_num = 37 + + def test_check_grad(self): + self.check_grad( + ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) + + +class TestCrossEntropyOp6(TestCrossEntropyOp): + """Test high rank tensor cross-entropy with vectorized one-hot representation of labels. + """ + + def init_x(self): + self.shape = [4, 3, 2] + self.ins_num = np.prod(np.array(self.shape)) + self.X_2d = randomize_probability(self.ins_num, + self.class_num).astype(self.dtype) + self.x = self.X_2d.reshape(self.shape + [self.class_num]) + + def init_label(self): + self.label_index_2d = np.random.randint( + 0, self.class_num, (self.ins_num), dtype="int64") + label_2d = np.zeros(self.X_2d.shape) + label_2d[np.arange(self.ins_num), self.label_index_2d] = 1 + self.label = label_2d.reshape(self.shape + [self.class_num]).astype( + self.dtype) + + def get_cross_entropy(self): + cross_entropy_2d = np.asmatrix( + [[-np.log(self.X_2d[i][self.label_index_2d[i]])] + for i in range(self.X_2d.shape[0])]) + self.cross_entropy = np.array(cross_entropy_2d).reshape( + self.shape + [1]).astype(self.dtype) + + def init_attr_type(self): + self.soft_label = True + + def init_dtype_type(self): + self.dtype = np.float32 + + def init_bs_class_num(self): + self.class_num = 17 + + def test_check_grad(self): + self.check_grad( + ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) + + +class TestCrossEntropyOp7(TestCrossEntropyOp): + """Test cross-entropy with ignore index. + """ + + def init_label(self): + self.label = np.random.randint( + 0, self.class_num, (self.batch_size, 1), dtype="int64") + + def get_cross_entropy(self): + self.cross_entropy = np.asmatrix( + [[-np.log(self.x[i][self.label[i][0]])] + if self.label[i][0] != self.ignore_index else [0] + for i in range(self.x.shape[0])]).astype(self.dtype) + + def init_attr_type(self): + self.soft_label = False + self.ignore_index = 3 + + def init_dtype_type(self): + self.dtype = np.float64 + + def init_bs_class_num(self): + self.batch_size = 30 + self.class_num = 10 + + +if __name__ == "__main__": + unittest.main()