From 880836329d4c0ba0c1b05b9ce3d69dec60bf664a Mon Sep 17 00:00:00 2001 From: xuezhong Date: Wed, 30 Jan 2019 12:16:17 +0000 Subject: [PATCH 001/117] add cell clip and proj clip, fix bug for h0 --- paddle/fluid/operators/lstm_op.h | 8 +- paddle/fluid/operators/lstmp_op.cc | 21 ++- paddle/fluid/operators/lstmp_op.h | 122 ++++++++++----- .../operators/math/detail/lstm_cpu_kernel.h | 38 ++--- .../operators/math/detail/lstm_gpu_kernel.h | 30 ++-- .../fluid/operators/math/detail/lstm_kernel.h | 55 +++++-- paddle/fluid/operators/math/lstm_compute.cc | 9 +- paddle/fluid/operators/math/lstm_compute.cu | 12 +- paddle/fluid/operators/math/lstm_compute.h | 4 +- python/paddle/fluid/layers/nn.py | 44 ++++-- .../paddle/fluid/tests/unittests/op_test.py | 3 + .../fluid/tests/unittests/test_lstmp_op.py | 142 +++++++++++++++--- 12 files changed, 353 insertions(+), 135 deletions(-) diff --git a/paddle/fluid/operators/lstm_op.h b/paddle/fluid/operators/lstm_op.h index 7d62d2d020..9f9594366c 100644 --- a/paddle/fluid/operators/lstm_op.h +++ b/paddle/fluid/operators/lstm_op.h @@ -151,9 +151,10 @@ class LSTMKernel : public framework::OpKernel { lstm_value.output_value = out_t.data(); lstm_value.state_value = cell_t.data(); lstm_value.state_active_value = cell_pre_act_t.data(); + T cell_clip = 0.0; math::LstmUnitFunctor::compute( - device_ctx, lstm_value, frame_size, cur_batch_size, gate_act, - cell_act, cand_act); + device_ctx, lstm_value, frame_size, cur_batch_size, cell_clip, + gate_act, cell_act, cand_act); lstm_value.prev_state_value = lstm_value.state_value; } @@ -312,9 +313,10 @@ class LSTMGradKernel : public framework::OpKernel { } int cur_batch_size = bend - bstart; + T cell_clip = 0.0; math::LstmUnitGradFunctor::compute( device_ctx, lstm_value, lstm_grad, frame_size, cur_batch_size, - gate_act, cell_act, cand_act); + cell_clip, gate_act, cell_act, cand_act); if (n > 0) { int pre_h_start = static_cast(batch_starts[n - 1]); diff --git a/paddle/fluid/operators/lstmp_op.cc b/paddle/fluid/operators/lstmp_op.cc index 7a62bc9f82..2728aa8a4e 100644 --- a/paddle/fluid/operators/lstmp_op.cc +++ b/paddle/fluid/operators/lstmp_op.cc @@ -73,12 +73,6 @@ class LSTMPOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasInput("C0"), "Input(C0) of LSTMP operator should not be null after " "Input(H0) provided."); - auto h_dims = ctx->GetInputDim("H0"); - auto c_dims = ctx->GetInputDim("C0"); - PADDLE_ENFORCE(h_dims == c_dims, - "The dimension of Input(H0) and Input(C0) " - "should be the same."); - ctx->SetOutputDim("OrderedP0", {h_dims[0], proj_dims[1]}); } auto b_dims = ctx->GetInputDim("Bias"); @@ -180,11 +174,6 @@ class LSTMPOpMaker : public framework::OpProtoAndCheckerMaker { "This LoDTensor is obtained in the forward and used in the " "backward.") .AsIntermediate(); - AddOutput("OrderedP0", - "(Tensor) the projection of the initial hidden state " - "H0. This is a tensor with shape (N x P), where N is the " - "batch size and P is the hidden size.") - .AsIntermediate(); AddAttr("use_peepholes", "(bool, defalut: True) " "whether to enable diagonal/peephole connections.") @@ -193,6 +182,16 @@ class LSTMPOpMaker : public framework::OpProtoAndCheckerMaker { "(bool, defalut: False) " "whether to compute reversed LSTMP.") .SetDefault(false); + AddAttr("cell_clip", + "(float, defalut: 0.0) " + "Clip for Tensor for cell state tensor when clip value is " + "greater than 0.0") + .SetDefault(0.0); + AddAttr("proj_clip", + "(float, defalut: 0.0) " + "Clip for Tensor for projection tensor when clip value is " + "greater than 0.0") + .SetDefault(0.0); AddAttr( "gate_activation", "(string, default: sigmoid)" diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h index 370dd04d14..8424aa8723 100644 --- a/paddle/fluid/operators/lstmp_op.h +++ b/paddle/fluid/operators/lstmp_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once #include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/activation_op.h" @@ -21,17 +22,50 @@ limitations under the License. */ #include "paddle/fluid/operators/math/detail/activation_functions.h" #include "paddle/fluid/operators/math/lstm_compute.h" #include "paddle/fluid/operators/math/sequence2batch.h" +#include "paddle/fluid/platform/transform.h" namespace paddle { namespace operators { using LoDTensor = framework::LoDTensor; using Tensor = framework::Tensor; +using platform::Transform; template using EigenMatrix = framework::EigenMatrix; +template +class _ClipFunctor { + public: + explicit _ClipFunctor(const T min, const T max) : min_(min), max_(max) {} + HOSTDEVICE T operator()(const T& x) const { + if (x < min_) + return min_; + else if (x > max_) + return max_; + else + return x; + } + + private: + T min_; + T max_; +}; + +template +class _ClipGradFunctor { + public: + explicit _ClipGradFunctor(const T min, const T max) : min_(min), max_(max) {} + HOSTDEVICE T operator()(const T& x, const T& y) const { + return (y > min_ && y < max_) ? x : 0; + } + + private: + T min_; + T max_; +}; + template inline void ReorderInitState(const DeviceContext& ctx, const framework::Tensor& src, @@ -60,6 +94,25 @@ class LSTMPKernel : public framework::OpKernel { PADDLE_THROW("unsupported activation type"); } + void Print(const Tensor& t, std::string name) const { + VLOG(1) << name << "size = " << t.numel(); + size_t size = t.numel(); + T* d = t.data(); +#ifdef PADDLE_WITH_CUDA + std::vector vec; + platform::DeviceContextPool::Instance().Get(t.place())->Wait(); + if (platform::is_gpu_place(t.place())) { + vec.resize(size); + cudaMemcpy(vec.data(), d, sizeof(T) * size, cudaMemcpyDeviceToHost); + d = vec.data(); + } +#endif + VLOG(1) << name << " data_ptr = " << static_cast(d); + for (size_t i = 0; i < size; i++) { + VLOG(1) << d[i] << ","; + } + } + void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input("Input"); auto* weight = ctx.Input("Weight"); @@ -67,9 +120,11 @@ class LSTMPKernel : public framework::OpKernel { auto* bias = ctx.Input("Bias"); auto* hidden_t0 = ctx.Input("H0"); - auto* ordered_proj0 = ctx.Output("OrderedP0"); auto* cell_t0 = ctx.Input("C0"); + auto proj_clip = static_cast(ctx.Attr("proj_clip")); + auto cell_clip = static_cast(ctx.Attr("cell_clip")); + auto* batch_gate = ctx.Output("BatchGate"); batch_gate->mutable_data(ctx.GetPlace()); auto* proj_out = ctx.Output("Projection"); @@ -110,6 +165,7 @@ class LSTMPKernel : public framework::OpKernel { } lstmp_value.prev_state_value = nullptr; Tensor ordered_c0; + Tensor ordered_h0; framework::Vector order(batch_gate->lod()[2]); @@ -169,18 +225,10 @@ class LSTMPKernel : public framework::OpKernel { // Since the batch computing for LSTMP reorders the input sequence // according to their length. The initialized hidden state also needs // to reorder. - - Tensor ordered_h0; - ordered_proj0->mutable_data(ctx.GetPlace()); + VLOG(1) << "qxz h0 used"; ReorderInitState(device_ctx, *hidden_t0, order, &ordered_h0, true); - blas.MatMul(ordered_h0, false, *proj_weight, false, static_cast(1.0), - ordered_proj0, static_cast(0.0)); - if (proj_act != math::detail::ActivationType::kIdentity) { - auto proj0_dev = EigenMatrix::From(*ordered_proj0); - ActCompute(cell_act, place, proj0_dev, proj0_dev); - } - blas.MatMul(*ordered_proj0, false, *weight, false, static_cast(1.0), + blas.MatMul(ordered_h0, false, *weight, false, static_cast(1.0), &gate_t, static_cast(1.0)); } @@ -189,8 +237,8 @@ class LSTMPKernel : public framework::OpKernel { lstmp_value.state_value = cell_t.data(); lstmp_value.state_active_value = cell_pre_act_t.data(); math::LstmUnitFunctor::compute( - device_ctx, lstmp_value, frame_size, cur_batch_size, gate_act, - cell_act, cand_act); + device_ctx, lstmp_value, frame_size, cur_batch_size, cell_clip, + gate_act, cell_act, cand_act); lstmp_value.prev_state_value = lstmp_value.state_value; blas.MatMul(hidden_t, false, *proj_weight, false, static_cast(1.0), &proj_t, static_cast(0.0)); @@ -198,6 +246,14 @@ class LSTMPKernel : public framework::OpKernel { auto proj_t_dev = EigenMatrix::From(proj_t); ActCompute(cell_act, place, proj_t_dev, proj_t_dev); } + if (proj_clip && proj_clip > 0.0) { + T* x_data = proj_t.data(); + int64_t numel = proj_t.numel(); + Transform trans; + trans(ctx.template device_context(), x_data, + x_data + numel, x_data, + _ClipFunctor(-1.0 * proj_clip, proj_clip)); + } } math::Batch2LoDTensorFunctor to_seq; @@ -239,6 +295,9 @@ class LSTMPGradKernel : public framework::OpKernel { auto* proj_out = ctx.Input("Projection"); auto* cell_out = ctx.Input("Cell"); + auto proj_clip = static_cast(ctx.Attr("proj_clip")); + auto cell_clip = static_cast(ctx.Attr("cell_clip")); + auto* batch_gate = ctx.Input("BatchGate"); auto* batch_cell_pre_act = ctx.Input("BatchCellPreAct"); auto* batch_hidden = ctx.Input("BatchHidden"); @@ -253,7 +312,6 @@ class LSTMPGradKernel : public framework::OpKernel { auto* bias_g = ctx.Output(framework::GradVarName("Bias")); auto* h0 = ctx.Input("H0"); - auto* ordered_proj0 = ctx.Input("OrderedP0"); auto* c0 = ctx.Input("C0"); auto* h0_g = ctx.Output(framework::GradVarName("H0")); @@ -363,6 +421,17 @@ class LSTMPGradKernel : public framework::OpKernel { Tensor cur_proj = batch_proj.Slice(bstart, bend); Tensor proj_g = batch_proj_g.Slice(bstart, bend); + + if (proj_clip && proj_clip > 0.0) { + T* dx_data = proj_g.data(); + T* x_data = cur_proj.data(); + int64_t numel = proj_g.numel(); + Transform trans; + trans(ctx.template device_context(), dx_data, + dx_data + numel, x_data, dx_data, + _ClipGradFunctor(-1.0 * proj_clip, proj_clip)); + } + if (proj_act != math::detail::ActivationType::kIdentity) { auto cur_proj_dev = EigenMatrix::From(cur_proj); auto proj_g_dev = EigenMatrix::From(proj_g); @@ -407,7 +476,7 @@ class LSTMPGradKernel : public framework::OpKernel { int cur_batch_size = bend - bstart; math::LstmUnitGradFunctor::compute( device_ctx, lstmp_value, lstmp_grad, frame_size, cur_batch_size, - gate_act, cell_act, cand_act); + cell_clip, gate_act, cell_act, cand_act); if (n > 0) { int pre_h_start = static_cast(batch_starts[n - 1]); @@ -426,31 +495,14 @@ class LSTMPGradKernel : public framework::OpKernel { ReorderInitState(device_ctx, *h0, order, &ordered_h0, true); if (weight_g) { - blas.MatMul(*ordered_proj0, true, gate_g, false, - static_cast(1.0), weight_g, static_cast(1.0)); + blas.MatMul(ordered_h0, true, gate_g, false, static_cast(1.0), + weight_g, static_cast(1.0)); } } if (h0 && (h0_g || proj_weight_g)) { ordered_h0_g.mutable_data(h0_g->dims(), ctx.GetPlace()); - Tensor proj0_g; - proj0_g.Resize({in_dims[0], proj_weight->dims()[1]}); - proj0_g.mutable_data(ctx.GetPlace()); blas.MatMul(gate_g, false, *weight, true, static_cast(1.0), - &proj0_g, static_cast(0.0)); - if (proj_act != math::detail::ActivationType::kIdentity) { - auto proj0_dev = EigenMatrix::From(*ordered_proj0); - auto proj0_g_dev = EigenMatrix::From(proj0_g); - ActGradCompute(cell_act, place, proj0_dev, proj0_dev, proj0_g_dev, - proj0_g_dev); - } - if (h0_g) { - blas.MatMul(proj0_g, false, *proj_weight, true, static_cast(1.0), - &ordered_h0_g, static_cast(0.0)); - } - if (proj_weight_g) { - blas.MatMul(ordered_h0, true, proj0_g, false, static_cast(1.0), - proj_weight_g, static_cast(1.0)); - } + &ordered_h0_g, static_cast(0.0)); } } } diff --git a/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h b/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h index 2e3779ff08..ad79c58063 100644 --- a/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h @@ -32,7 +32,8 @@ namespace detail { template void naive_lstm_forward_one_sequence(Op op, LstmMetaValue value, - int frame_size, ActivationType active_node, + int frame_size, T cell_clip, + ActivationType active_node, ActivationType active_gate, ActivationType active_state) { T r_value_in; @@ -67,7 +68,7 @@ void naive_lstm_forward_one_sequence(Op op, LstmMetaValue value, op(&r_value_in, &r_value_ig, &r_value_fg, &r_value_og, &r_prev_state, &r_state, &r_state_atv, &r_out, &r_checkI, &r_checkF, &r_checkO, - active_node, active_gate, active_state); + &cell_clip, active_node, active_gate, active_state); value_in[i] = r_value_in; value_ig[i] = r_value_ig; @@ -82,7 +83,7 @@ void naive_lstm_forward_one_sequence(Op op, LstmMetaValue value, template void naive_lstm_backward_one_sequence(Op op, LstmMetaValue value, LstmMetaGrad grad, int frame_size, - ActivationType active_node, + T cell_clip, ActivationType active_node, ActivationType active_gate, ActivationType active_state) { T r_value_in; @@ -135,7 +136,7 @@ void naive_lstm_backward_one_sequence(Op op, LstmMetaValue value, &r_grad_ig, &r_grad_fg, &r_grad_og, &r_prev_state, &r_prev_state_grad, &r_state, &r_state_grad, &r_state_atv, &r_output_grad, &r_checkI, &r_checkF, &r_checkO, &r_checkIGrad, &r_checkFGrad, &r_checkOGrad, - active_node, active_gate, active_state); + &cell_clip, active_node, active_gate, active_state); grad_in[i] = r_grad_in; grad_ig[i] = r_grad_ig; @@ -154,7 +155,8 @@ void naive_lstm_backward_one_sequence(Op op, LstmMetaValue value, template void avx_lstm_forward_one_sequence(Op op, LstmMetaValue value, - int frame_size, ActivationType active_node, + int frame_size, T cell_clip, + ActivationType active_node, ActivationType active_gate, ActivationType active_state) { #ifdef __AVX__ @@ -194,7 +196,7 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue value, op(&r_value_in, &r_value_ig, &r_value_fg, &r_value_og, &r_prev_state, &r_state, &r_state_atv, &r_out, &r_checkI, &r_checkF, &r_checkO, - active_node, active_gate, active_state); + &cell_clip, active_node, active_gate, active_state); value_in[i] = r_value_in; value_ig[i] = r_value_ig; @@ -210,7 +212,7 @@ void avx_lstm_forward_one_sequence(Op op, LstmMetaValue value, template void avx_lstm_backward_one_sequence(Op op, LstmMetaValue value, LstmMetaGrad grad, int frame_size, - ActivationType active_node, + T cell_clip, ActivationType active_node, ActivationType active_gate, ActivationType active_state) { #ifdef __AVX__ @@ -268,7 +270,7 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue value, &r_grad_ig, &r_grad_fg, &r_grad_og, &r_prev_state, &r_prev_state_grad, &r_state, &r_state_grad, &r_state_atv, &r_output_grad, &r_checkI, &r_checkF, &r_checkO, &r_checkIGrad, &r_checkFGrad, &r_checkOGrad, - active_node, active_gate, active_state); + &cell_clip, active_node, active_gate, active_state); grad_in[i] = r_grad_in; grad_ig[i] = r_grad_ig; @@ -292,27 +294,27 @@ void avx_lstm_backward_one_sequence(Op op, LstmMetaValue value, template void cpu_lstm_forward(Op op, LstmMetaValue value, int frame_size, - ActivationType active_node, ActivationType active_gate, - ActivationType active_state) { + T cell_clip, ActivationType active_node, + ActivationType active_gate, ActivationType active_state) { if (Op::avx && !(frame_size & (8 - 1)) && (std::is_same::value)) { - avx_lstm_forward_one_sequence(op, value, frame_size, active_node, - active_gate, active_state); + avx_lstm_forward_one_sequence(op, value, frame_size, cell_clip, + active_node, active_gate, active_state); } else { - naive_lstm_forward_one_sequence(op, value, frame_size, active_node, - active_gate, active_state); + naive_lstm_forward_one_sequence(op, value, frame_size, cell_clip, + active_node, active_gate, active_state); } } template void cpu_lstm_backward(Op op, LstmMetaValue value, LstmMetaGrad grad, - int frame_size, ActivationType active_node, + int frame_size, T cell_clip, ActivationType active_node, ActivationType active_gate, ActivationType active_state) { if (Op::avx && !(frame_size & (8 - 1)) && (std::is_same::value)) { - avx_lstm_backward_one_sequence(op, value, grad, frame_size, active_node, - active_gate, active_state); + avx_lstm_backward_one_sequence(op, value, grad, frame_size, cell_clip, + active_node, active_gate, active_state); } else { - naive_lstm_backward_one_sequence(op, value, grad, frame_size, + naive_lstm_backward_one_sequence(op, value, grad, frame_size, cell_clip, active_node, active_gate, active_state); } } diff --git a/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h b/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h index 2aecb69237..e0ca9e7f5b 100644 --- a/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h @@ -31,7 +31,8 @@ namespace detail { */ template __global__ void KeLstmForward(Op op, LstmMetaValue value, int frame_size, - int batch_size, ActivationType active_node, + int batch_size, T cell_clip, + ActivationType active_node, ActivationType active_gate, ActivationType active_state) { const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; @@ -72,7 +73,7 @@ __global__ void KeLstmForward(Op op, LstmMetaValue value, int frame_size, op(&r_value_in, &r_value_ig, &r_value_fg, &r_value_og, &r_prev_state, &r_state, &r_state_atv, &r_out, &r_checkI, &r_checkF, &r_checkO, - active_node, active_gate, active_state); + &cell_clip, active_node, active_gate, active_state); value.gate_value[frame_idx] = r_value_in; value.gate_value[frame_idx + frame_size] = r_value_ig; @@ -91,7 +92,8 @@ __global__ void KeLstmForward(Op op, LstmMetaValue value, int frame_size, template __global__ void KeLstmBackward(Op op, LstmMetaValue value, LstmMetaGrad grad, int frame_size, - int batch_size, ActivationType active_node, + int batch_size, T cell_clip, + ActivationType active_node, ActivationType active_gate, ActivationType active_state) { const int frame_idx = blockIdx.x * blockDim.x + threadIdx.x; @@ -148,8 +150,8 @@ __global__ void KeLstmBackward(Op op, LstmMetaValue value, op(&r_value_in, &r_value_ig, &r_value_fg, &r_value_og, &r_grad_in, &r_grad_ig, &r_grad_fg, &r_grad_og, &r_prev_state, &r_prev_state_grad, &r_state, &r_state_grad, &r_state_atv, &r_output_grad, &r_checkI, &r_checkF, - &r_checkO, &r_checkIGrad, &r_checkFGrad, &r_checkOGrad, active_node, - active_gate, active_state); + &r_checkO, &r_checkIGrad, &r_checkFGrad, &r_checkOGrad, &cell_clip, + active_node, active_gate, active_state); grad.gate_grad[frame_idx] = r_grad_in; grad.gate_grad[frame_idx + frame_size] = r_grad_ig; @@ -185,8 +187,8 @@ __global__ void KeLstmBackward(Op op, LstmMetaValue value, template void gpu_lstm_forward(const platform::DeviceContext& context, Op op, LstmMetaValue value, int frame_size, int batch_size, - ActivationType active_node, ActivationType active_gate, - ActivationType active_state) { + T cell_clip, ActivationType active_node, + ActivationType active_gate, ActivationType active_state) { dim3 threads; dim3 grid; if (batch_size == 1) { @@ -205,12 +207,12 @@ void gpu_lstm_forward(const platform::DeviceContext& context, Op op, if (batch_size == 1) { KeLstmForward<<>>( - op, value, frame_size, batch_size, active_node, active_gate, + op, value, frame_size, batch_size, cell_clip, active_node, active_gate, active_state); } else { KeLstmForward<<>>( - op, value, frame_size, batch_size, active_node, active_gate, + op, value, frame_size, batch_size, cell_clip, active_node, active_gate, active_state); } } @@ -218,7 +220,7 @@ void gpu_lstm_forward(const platform::DeviceContext& context, Op op, template void gpu_lstm_backward(const platform::DeviceContext& context, Op op, LstmMetaValue value, LstmMetaGrad grad, - int frame_size, int batch_size, + int frame_size, int batch_size, T cell_clip, ActivationType active_node, ActivationType active_gate, ActivationType active_state) { dim3 threads; @@ -239,13 +241,13 @@ void gpu_lstm_backward(const platform::DeviceContext& context, Op op, if (batch_size == 1) { KeLstmBackward<<>>( - op, value, grad, frame_size, batch_size, active_node, active_gate, - active_state); + op, value, grad, frame_size, batch_size, cell_clip, active_node, + active_gate, active_state); } else { KeLstmBackward<<>>( - op, value, grad, frame_size, batch_size, active_node, active_gate, - active_state); + op, value, grad, frame_size, batch_size, cell_clip, active_node, + active_gate, active_state); } } diff --git a/paddle/fluid/operators/math/detail/lstm_kernel.h b/paddle/fluid/operators/math/detail/lstm_kernel.h index cbe73d6293..e1be0071f2 100644 --- a/paddle/fluid/operators/math/detail/lstm_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_kernel.h @@ -29,7 +29,7 @@ class lstm { public: HOSTDEVICE void operator()(T *value_in, T *value_ig, T *value_fg, T *value_og, T *prev_state, T *state, T *state_atv, T *output, - T *checkI, T *checkF, T *checkO, + T *checkI, T *checkF, T *checkO, T *cell_clip, ActivationType active_node, ActivationType active_gate, ActivationType active_state) { @@ -37,6 +37,14 @@ class lstm { *value_ig = activation(*value_ig + (*prev_state) * (*checkI), active_gate); *value_fg = activation(*value_fg + (*prev_state) * (*checkF), active_gate); *state = (*value_in) * (*value_ig) + (*prev_state) * (*value_fg); + if (*cell_clip > 0.0) { + if (*state < -1.0 * (*cell_clip)) { + *state = -1.0 * (*cell_clip); + } + if (*state > *cell_clip) { + *state = *cell_clip; + } + } *value_og = activation(*value_og + (*state) * (*checkO), active_gate); *state_atv = activation(*state, active_state); *output = (*value_og) * (*state_atv); @@ -52,7 +60,7 @@ class lstm { __m256 *value_fg, __m256 *value_og, __m256 *prev_state, __m256 *state, __m256 *state_atv, __m256 *output, __m256 *checkI, - __m256 *checkF, __m256 *checkO, + __m256 *checkF, __m256 *checkO, T *cell_clip, ActivationType active_node, ActivationType active_gate, ActivationType active_state) { @@ -65,6 +73,12 @@ class lstm { active_gate); *state = _mm256_add_ps(_mm256_mul_ps(*value_in, *value_ig), _mm256_mul_ps(*prev_state, *value_fg)); + if (*cell_clip > 0.0f) { + __m256 min = _mm256_set1_ps(0.0f - *cell_clip); + __m256 max = _mm256_set1_ps(*cell_clip); + *state = _mm256_min_ps(max, *state); + *state = _mm256_max_ps(min, *state); + } *value_og = activation( _mm256_add_ps(*value_og, _mm256_mul_ps(*state, *checkO)), active_gate); *state_atv = activation(*state, active_state); @@ -86,15 +100,21 @@ class lstm { T *prev_state, T *prev_state_grad, T *state, T *state_grad, T *state_atv, T *output_grad, T *checkI, T *checkF, T *checkO, T *checkIGrad, - T *checkFGrad, T *checkOGrad, + T *checkFGrad, T *checkOGrad, T *cell_clip, ActivationType active_node, ActivationType active_gate, ActivationType active_state) { *grad_og = activation((*output_grad) * (*state_atv), *value_og, active_gate); - *state_grad += - activation((*output_grad) * (*value_og), *state_atv, active_state) + - (*grad_og) * (*checkO); + if (*cell_clip > 0.0f) { + if (*state >= (*cell_clip) || *state <= (0.0f - (*cell_clip))) { + *state_grad = 0.0f; + } else { + *state_grad += + activation((*output_grad) * (*value_og), *state_atv, active_state) + + (*grad_og) * (*checkO); + } + } *grad_in = activation((*state_grad) * (*value_ig), *value_in, active_node); *grad_ig = activation((*state_grad) * (*value_in), *value_ig, active_gate); *grad_fg = @@ -117,15 +137,24 @@ class lstm { __m256 *prev_state, __m256 *prev_state_grad, __m256 *state, __m256 *state_grad, __m256 *state_atv, __m256 *output_grad, __m256 *checkI, __m256 *checkF, __m256 *checkO, __m256 *checkIGrad, - __m256 *checkFGrad, __m256 *checkOGrad, ActivationType active_node, - ActivationType active_gate, ActivationType active_state) { + __m256 *checkFGrad, __m256 *checkOGrad, T *cell_clip, + ActivationType active_node, ActivationType active_gate, + ActivationType active_state) { *grad_og = activation(_mm256_mul_ps(*output_grad, *state_atv), *value_og, active_gate); - *state_grad = - _mm256_add_ps(activation(_mm256_mul_ps(*output_grad, *value_og), - *state_atv, active_state), - *state_grad); - *state_grad = _mm256_add_ps(_mm256_mul_ps(*grad_og, *checkO), *state_grad); + if (*cell_clip > 0.0f) { + T *state_ = reinterpret_cast(state); + if (*state_ >= (*cell_clip) || *state_ <= (0.0f - (*cell_clip))) { + *state_grad = _mm256_set1_ps(0.0f); + } else { + *state_grad = + _mm256_add_ps(activation(_mm256_mul_ps(*output_grad, *value_og), + *state_atv, active_state), + *state_grad); + *state_grad = + _mm256_add_ps(_mm256_mul_ps(*grad_og, *checkO), *state_grad); + } + } *grad_in = activation(_mm256_mul_ps(*state_grad, *value_ig), *value_in, active_node); *grad_ig = activation(_mm256_mul_ps(*state_grad, *value_in), *value_ig, diff --git a/paddle/fluid/operators/math/lstm_compute.cc b/paddle/fluid/operators/math/lstm_compute.cc index b6882b4fd8..94bbcbb506 100644 --- a/paddle/fluid/operators/math/lstm_compute.cc +++ b/paddle/fluid/operators/math/lstm_compute.cc @@ -24,12 +24,12 @@ template struct LstmUnitFunctor { static void compute(const platform::CPUDeviceContext& context, LstmMetaValue value, int frame_size, int batch_size, - const detail::ActivationType& gate_act, + T cell_clip, const detail::ActivationType& gate_act, const detail::ActivationType& cell_act, const detail::ActivationType& cand_act) { for (int b = 0; b < batch_size; b++) { detail::cpu_lstm_forward(detail::forward::lstm(), value, frame_size, - cand_act, gate_act, cell_act); + cell_clip, cand_act, gate_act, cell_act); value.gate_value += frame_size * 4; value.state_value += frame_size; value.state_active_value += frame_size; @@ -45,13 +45,14 @@ template struct LstmUnitGradFunctor { static void compute(const platform::CPUDeviceContext& context, LstmMetaValue value, LstmMetaGrad grad, - int frame_size, int batch_size, + int frame_size, int batch_size, T cell_clip, const detail::ActivationType& gate_act, const detail::ActivationType& cell_act, const detail::ActivationType& cand_act) { for (int b = 0; b < batch_size; b++) { detail::cpu_lstm_backward(detail::backward::lstm(), value, grad, - frame_size, cand_act, gate_act, cell_act); + frame_size, cell_clip, cand_act, gate_act, + cell_act); value.gate_value += frame_size * 4; value.state_value += frame_size; diff --git a/paddle/fluid/operators/math/lstm_compute.cu b/paddle/fluid/operators/math/lstm_compute.cu index 1233000083..e7445d3d40 100644 --- a/paddle/fluid/operators/math/lstm_compute.cu +++ b/paddle/fluid/operators/math/lstm_compute.cu @@ -24,12 +24,12 @@ template struct LstmUnitFunctor { static void compute(const platform::CUDADeviceContext& context, LstmMetaValue value, int frame_size, int batch_size, - const detail::ActivationType& gate_act, + T cell_clip, const detail::ActivationType& gate_act, const detail::ActivationType& cell_act, const detail::ActivationType& cand_act) { detail::gpu_lstm_forward(context, detail::forward::lstm(), value, - frame_size, batch_size, cand_act, gate_act, - cell_act); + frame_size, batch_size, cell_clip, cand_act, + gate_act, cell_act); } }; @@ -37,13 +37,13 @@ template struct LstmUnitGradFunctor { static void compute(const platform::CUDADeviceContext& context, LstmMetaValue value, LstmMetaGrad grad, - int frame_size, int batch_size, + int frame_size, int batch_size, T cell_clip, const detail::ActivationType& gate_act, const detail::ActivationType& cell_act, const detail::ActivationType& cand_act) { detail::gpu_lstm_backward(context, detail::backward::lstm(), value, grad, - frame_size, batch_size, cand_act, gate_act, - cell_act); + frame_size, batch_size, cell_clip, cand_act, + gate_act, cell_act); } }; diff --git a/paddle/fluid/operators/math/lstm_compute.h b/paddle/fluid/operators/math/lstm_compute.h index ca2f78e6f3..80af563938 100644 --- a/paddle/fluid/operators/math/lstm_compute.h +++ b/paddle/fluid/operators/math/lstm_compute.h @@ -50,7 +50,7 @@ template class LstmUnitFunctor { public: static void compute(const DeviceContext &context, LstmMetaValue value, - int frame_size, int batch_size, + int frame_size, int batch_size, T cell_clip, const detail::ActivationType &gate_act, const detail::ActivationType &cell_act, const detail::ActivationType &cand_act); @@ -61,7 +61,7 @@ class LstmUnitGradFunctor { public: static void compute(const DeviceContext &context, LstmMetaValue value, LstmMetaGrad grad, int frame_size, int batch_size, - const detail::ActivationType &gate_act, + T cell_clip, const detail::ActivationType &gate_act, const detail::ActivationType &cell_act, const detail::ActivationType &cand_act); }; diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 0e4b5aadc0..b5f6b5d443 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -659,14 +659,18 @@ def lstm(input, def dynamic_lstmp(input, size, proj_size, + h_0=None, + c_0=None, param_attr=None, bias_attr=None, use_peepholes=True, + cell_clip=None, + proj_clip=None, is_reverse=False, gate_activation='sigmoid', cell_activation='tanh', candidate_activation='tanh', - proj_activation='tanh', + proj_activation='identity', dtype='float32', name=None): """ @@ -736,6 +740,12 @@ def dynamic_lstmp(input, mini-batch, D is the hidden size. size(int): 4 * hidden size. proj_size(int): The size of projection output. + h_0(Variable): The initial hidden state is an optional input, default is zero. + This is a tensor with shape (N x D), where N is the + batch size and D is the projection size. + c_0(Variable): The initial cell state is an optional input, default is zero. + This is a tensor with shape (N x D), where N is the + batch size. `h_0` and `c_0` can be NULL but only at the same time. param_attr(ParamAttr|None): The parameter attribute for the learnable hidden-hidden weight and projection weight. @@ -770,6 +780,11 @@ def dynamic_lstmp(input, the bias is initialized zero. Default: None. use_peepholes(bool): Whether to enable diagonal/peephole connections, default `True`. + cell_clip(float): If provided the cell state is clipped + by this value prior to the cell output activation. + proj_clip(float): If `num_proj > 0` and `proj_clip` is + provided, then the projected values are clipped elementwise to within + `[-proj_clip, proj_clip]`. is_reverse(bool): Whether to compute reversed LSTM, default `False`. gate_activation(str): The activation for input gate, forget gate and output gate. Choices = ["sigmoid", "tanh", "relu", @@ -781,7 +796,7 @@ def dynamic_lstmp(input, default "tanh". proj_activation(str): The activation for projection output. Choices = ["sigmoid", "tanh", "relu", "identity"], - default "tanh". + default "identity". dtype(str): Data type. Choices = ["float32", "float64"], default "float32". name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. @@ -831,25 +846,36 @@ def dynamic_lstmp(input, batch_hidden = helper.create_variable_for_type_inference(dtype) batch_gate = helper.create_variable_for_type_inference(dtype) batch_cell_pre_act = helper.create_variable_for_type_inference(dtype) + inputs = { + 'Input': input, + 'Weight': weight, + 'ProjWeight': proj_weight, + 'Bias': bias + } + batch_size = input.shape[0] + if h_0: + assert h_0.shape == (batch_size, proj_size), \ + 'The shape of h0 should be (batch_size, %d)' % proj_size + inputs['H0'] = h_0 + if c_0: + assert c_0.shape == (batch_size, size), \ + 'The shape of c0 should be (batch_size, %d)' % size + inputs['C0'] = c_0 helper.append_op( type='lstmp', - inputs={ - 'Input': input, - 'Weight': weight, - 'ProjWeight': proj_weight, - 'Bias': bias - }, + inputs=inputs, outputs={ 'Projection': projection, 'Cell': cell, - 'OrderedP0': ordered_proj0, 'BatchHidden': batch_hidden, 'BatchGate': batch_gate, 'BatchCellPreAct': batch_cell_pre_act }, attrs={ 'use_peepholes': use_peepholes, + 'cell_clip': cell_clip, + 'proj_clip': proj_clip, 'is_reverse': is_reverse, 'gate_activation': gate_activation, 'cell_activation': cell_activation, diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 0fe836683b..ec41c4e653 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -294,6 +294,7 @@ class OpTest(unittest.TestCase): # fetch_list = map(block.var, fetch_list) if not isinstance(fetch_list[0], fluid.framework.Variable): fetch_list = list(map(block.var, fetch_list)) + #import pdb; pdb.set_trace() outs = executor.run(program, feed=feed_map, fetch_list=fetch_list, @@ -468,8 +469,10 @@ class OpTest(unittest.TestCase): delta=numeric_grad_delta, in_place=in_place) for input_to_check in inputs_to_check ] + #import pdb; pdb.set_trace() analytic_grads = self._get_gradient(inputs_to_check, place, output_names, no_grad_set) + #import pdb; pdb.set_trace() self._assert_is_close(numeric_grads, analytic_grads, inputs_to_check, max_relative_error, diff --git a/python/paddle/fluid/tests/unittests/test_lstmp_op.py b/python/paddle/fluid/tests/unittests/test_lstmp_op.py index 9c3ec45515..98252f86cc 100644 --- a/python/paddle/fluid/tests/unittests/test_lstmp_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstmp_op.py @@ -36,12 +36,15 @@ def lstmp( w_b=None, # 1 x 4D w_c=None, # 1 x 3D is_reverse=False, + proj_clip=0.0, + cell_clip=0.0, act_gate=None, act_cell=None, act_cand=None, act_proj=None): - def _step(x, w_r, w_rh, w_c, r_pre, c_pre, act_gate, act_cell, act_cand, - act_proj): + def _step(x, w_r, w_rh, w_c, r_pre, c_pre, proj_clip, cell_clip, act_gate, + act_cell, act_cand, act_proj): + #import pdb; pdb.set_trace() g = np.dot(r_pre, w_r) # 1 x 4D g = g + x g = np.reshape(g, (1, g.size)) @@ -55,6 +58,21 @@ def lstmp( g_f = act_gate(g_f + w_fc * c_pre) # 1 x D c = g_f * c_pre + g_i * act_cand(c) # 1 x D + def array_clip(a, clip): + #print('clip:{}'.format(clip)) + #print('old' + str(a)) + + size = np.prod(a.shape) + new_a = np.reshape(a, (size)) + for i in range(size): + new_a[i] = max(new_a[i], -1.0 * clip) + new_a[i] = min(new_a[i], clip) + new_a = np.reshape(new_a, a.shape) + #print('new' + str(new_a)) + return new_a + + if cell_clip > 0.0: + c = array_clip(c, cell_clip) if w_c is None: g_o = act_gate(g_o) # 1 x D else: @@ -64,6 +82,8 @@ def lstmp( # projection r = np.dot(h, w_rh) r = act_proj(r) + if proj_clip > 0.0: + r = array_clip(r, proj_clip) return r, c def _reverse(x, offset): @@ -87,13 +107,15 @@ def lstmp( # compute one sequence seq_len = lod[0][i] x = input[offset[i]:offset[i + 1], :] - r_pre = np.dot(h0[i], w_rh) # 1 x P - r_pre = act_proj(r_pre) + #r_pre = np.dot(h0[i], w_rh) # 1 x P + r_pre = h0[i] + #r_pre = act_proj(r_pre) c_pre = c0[i] # 1 x D for j in range(seq_len): # compute one step - r_pre, c_pre = _step(x[j], w_r, w_rh, w_c, r_pre, c_pre, act_gate, - act_cell, act_cand, act_proj) + r_pre, c_pre = _step(x[j], w_r, w_rh, w_c, r_pre, c_pre, proj_clip, + cell_clip, act_gate, act_cell, act_cand, + act_proj) projection.append(r_pre.flatten()) cell.append(c_pre.flatten()) @@ -112,24 +134,98 @@ class TestLstmpOp(LstmTest.TestLstmOp): def reset_argument(self): pass + def setUp2(self): + self.set_argument() + # projection size + self.P = 2 + + self.reset_argument() + self.op_type = 'lstmp' + self.act_proj = 'identity' + self.use_peepholes = False + self.has_initial_state = True + self.lod = [[5]] + + T = sum(self.lod[0]) + N = len(self.lod[0]) + + proj_clip = 0.5 + cell_clip = 0.0 + + #import pdb; pdb.set_trace() + x=np.array([[-0.50806344, 0.50909436], \ + [-0.50087136, 0.4904187 ], \ + [-0.48933774, 0.50408053], \ + [ 0.00896523, 0.00770854], \ + [-0.00851139,-0.01005108]]) + wx = np.array([[ 0.2932311, -0.8829277, 1.100133, 0.8197811, -0.8194872, -0.829262, 0.7708865, -0.62339246, -0.7656475, 0.4283645, -0.27164033, -0.3600223 ], \ + [-0.609142, 0.25025278, 0.15731744, -0.66051376, -0.70994514, 0.8344964, -0.00551117, -0.7072167, -0.63929003, -0.52340907, -0.8842589, 0.9531688 ]]) + x = np.dot(x, wx) + + w = np.array([[ 0.7808204, -0.7412322, -0.9458036, -0.01664658, 0.7930616, 0.10208707, 0.20036687, -0.16743736, 1.0295134, -0.3118722, 0.02241168, 0.3154219 ], \ + [-0.29026014, 0.24638331, -0.5435432, 0.87635124, -0.96091515, -0.1411362, 0.58606523, -0.38996056, -0.9003789, 0.8540163, -0.8831781, -0.28499633]]) + + w_rh = np.array([[0.15685119, 0.05694652], [-0.9641068, -1.5106804], + [0.3599193, 1.2540514]]) + w_b = np.array([[ + -0.49999997, 0.5, -0.49999997, -0.5, 0.5, 0.5, 0.49999997, + -0.49999997, 0.49999997, -0.5, 0.49999997, 0.5 + ]]) + h0 = np.array([[-1.3392334e-04, -6.8468950e-04]]) + c0 = np.array([[4.5552300e-04, 1.3302206e-03, -3.6721351e-04]]) + w_c = None + self.lod = [[5]] + #import pdb; pdb.set_trace() + r, c = lstmp(x, self.lod, h0, c0, w, w_rh, w_b, w_c, self.is_reverse, + proj_clip, cell_clip, ACTIVATION[self.act_gate], + ACTIVATION[self.act_cell], ACTIVATION[self.act_cand], + ACTIVATION[self.act_proj]) + self.inputs = {'Input': (x, self.lod), 'Weight': w, 'ProjWeight': w_rh} + + self.inputs['Bias'] = w_b + + if self.has_initial_state: + self.inputs['H0'] = h0 + self.inputs['C0'] = c0 + + self.outputs = { + 'Projection': (r, self.lod), + 'Cell': (c, self.lod), + } + self.attrs = { + 'use_peepholes': self.use_peepholes, + 'is_reverse': self.is_reverse, + 'proj_clip': proj_clip, + 'cell_clip': cell_clip, + 'gate_activation': self.act_gate, + 'cell_activation': self.act_cell, + 'candidate_activation': self.act_cand, + 'proj_activation': self.act_proj + } + def setUp(self): self.set_argument() # projection size self.P = 10 + #self.D = 9 self.act_proj = self.act_cell self.reset_argument() self.op_type = 'lstmp' + #self.use_peepholes=False + #self.lod=[[7]] + #self.act_proj='identity' + #self.act_proj='tanh' T = sum(self.lod[0]) N = len(self.lod[0]) - + #np.random.seed=123 x = np.random.normal(size=(T, 4 * self.D)).astype('float64') if self.has_initial_state: - h0 = np.random.normal(size=(N, self.D)).astype('float64') + h0 = np.random.normal(size=(N, self.P)).astype('float64') c0 = np.random.normal(size=(N, self.D)).astype('float64') else: - h0 = np.zeros((N, self.D)).astype('float64') + h0 = np.zeros((N, self.P)).astype('float64') c0 = np.zeros((N, self.D)).astype('float64') w = np.random.normal(size=(self.P, 4 * self.D)).astype('float64') if self.use_peepholes: @@ -140,9 +236,13 @@ class TestLstmpOp(LstmTest.TestLstmOp): w_b = b[:, 0:4 * self.D] w_c = b[:, 4 * self.D:] if self.use_peepholes else None w_rh = np.random.normal(size=(self.D, self.P)).astype('float64') + proj_clip = 0.1 + cell_clip = 0.1 + #import pdb; pdb.set_trace() r, c = lstmp(x, self.lod, h0, c0, w, w_rh, w_b, w_c, self.is_reverse, - ACTIVATION[self.act_gate], ACTIVATION[self.act_cell], - ACTIVATION[self.act_cand], ACTIVATION[self.act_proj]) + proj_clip, cell_clip, ACTIVATION[self.act_gate], + ACTIVATION[self.act_cell], ACTIVATION[self.act_cand], + ACTIVATION[self.act_proj]) self.inputs = {'Input': (x, self.lod), 'Weight': w, 'ProjWeight': w_rh} @@ -159,6 +259,8 @@ class TestLstmpOp(LstmTest.TestLstmOp): self.attrs = { 'use_peepholes': self.use_peepholes, 'is_reverse': self.is_reverse, + 'proj_clip': proj_clip, + 'cell_clip': cell_clip, 'gate_activation': self.act_gate, 'cell_activation': self.act_cell, 'candidate_activation': self.act_cand, @@ -171,14 +273,14 @@ class TestLstmpOp(LstmTest.TestLstmOp): def test_check_grad(self): # TODO(qingqing) remove folowing lines after the check_grad is refined. N = len(self.lod[0]) - self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( ['Input', 'Weight', 'ProjWeight', 'Bias'], ['Projection'], - max_relative_error=1e-2) + max_relative_error=1e-2, + numeric_grad_delta=0.0000005) class TestLstmpOpHasInitial(TestLstmpOp): @@ -188,7 +290,6 @@ class TestLstmpOpHasInitial(TestLstmpOp): def test_check_grad(self): # TODO(qingqing) remove folowing lines after the check_grad is refined. N = len(self.lod[0]) - self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( @@ -196,11 +297,11 @@ class TestLstmpOpHasInitial(TestLstmpOp): self.check_grad( ['Input', 'Weight', 'ProjWeight', 'Bias', 'H0', 'C0'], ['Projection'], + numeric_grad_delta=0.0000005, max_relative_error=1e-2) def test_check_grad_ingore_bias(self): N = len(self.lod[0]) - self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( @@ -208,11 +309,11 @@ class TestLstmpOpHasInitial(TestLstmpOp): self.check_grad( ['Input', 'ProjWeight', 'Weight'], ['Projection'], max_relative_error=1e-2, + numeric_grad_delta=0.0000005, no_grad_set=set('Bias')) def test_check_grad_ingore_weight(self): N = len(self.lod[0]) - self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( @@ -220,11 +321,11 @@ class TestLstmpOpHasInitial(TestLstmpOp): self.check_grad( ['Input', 'ProjWeight', 'Bias'], ['Projection'], max_relative_error=1e-2, + numeric_grad_delta=0.0000005, no_grad_set=set('Weight')) def test_check_grad_ingore_proj_weight(self): N = len(self.lod[0]) - self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( @@ -232,11 +333,11 @@ class TestLstmpOpHasInitial(TestLstmpOp): self.check_grad( ['Input', 'Weight', 'Bias'], ['Projection'], max_relative_error=1e-2, + numeric_grad_delta=0.0000005, no_grad_set=set('ProjWeight')) def test_check_grad_ingore_input(self): N = len(self.lod[0]) - self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( @@ -244,11 +345,11 @@ class TestLstmpOpHasInitial(TestLstmpOp): self.check_grad( ['Weight', 'ProjWeight', 'Bias'], ['Projection'], max_relative_error=1e-2, + numeric_grad_delta=0.0000005, no_grad_set=set('Input')) def test_check_grad_ingore_h0(self): N = len(self.lod[0]) - self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( @@ -256,11 +357,11 @@ class TestLstmpOpHasInitial(TestLstmpOp): self.check_grad( ['Input', 'Weight', 'ProjWeight', 'Bias', 'C0'], ['Projection'], max_relative_error=1e-2, + numeric_grad_delta=0.0000005, no_grad_set=set('H0')) def test_check_grad_ingore_c0(self): N = len(self.lod[0]) - self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( @@ -268,6 +369,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): self.check_grad( ['Input', 'Weight', 'ProjWeight', 'Bias', 'H0'], ['Projection'], max_relative_error=1e-2, + numeric_grad_delta=0.0000005, no_grad_set=set('C0')) From b0c75f1763994012b7f12a3afe0a9df42d0917c6 Mon Sep 17 00:00:00 2001 From: xuezhong Date: Wed, 30 Jan 2019 12:30:17 +0000 Subject: [PATCH 002/117] remove debug print --- paddle/fluid/operators/lstmp_op.h | 1 - .../fluid/tests/unittests/test_lstmp_op.py | 80 ------------------- 2 files changed, 81 deletions(-) diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h index 8424aa8723..9cad0bfd04 100644 --- a/paddle/fluid/operators/lstmp_op.h +++ b/paddle/fluid/operators/lstmp_op.h @@ -225,7 +225,6 @@ class LSTMPKernel : public framework::OpKernel { // Since the batch computing for LSTMP reorders the input sequence // according to their length. The initialized hidden state also needs // to reorder. - VLOG(1) << "qxz h0 used"; ReorderInitState(device_ctx, *hidden_t0, order, &ordered_h0, true); blas.MatMul(ordered_h0, false, *weight, false, static_cast(1.0), diff --git a/python/paddle/fluid/tests/unittests/test_lstmp_op.py b/python/paddle/fluid/tests/unittests/test_lstmp_op.py index 98252f86cc..299a8c9695 100644 --- a/python/paddle/fluid/tests/unittests/test_lstmp_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstmp_op.py @@ -44,7 +44,6 @@ def lstmp( act_proj=None): def _step(x, w_r, w_rh, w_c, r_pre, c_pre, proj_clip, cell_clip, act_gate, act_cell, act_cand, act_proj): - #import pdb; pdb.set_trace() g = np.dot(r_pre, w_r) # 1 x 4D g = g + x g = np.reshape(g, (1, g.size)) @@ -59,9 +58,6 @@ def lstmp( c = g_f * c_pre + g_i * act_cand(c) # 1 x D def array_clip(a, clip): - #print('clip:{}'.format(clip)) - #print('old' + str(a)) - size = np.prod(a.shape) new_a = np.reshape(a, (size)) for i in range(size): @@ -134,92 +130,17 @@ class TestLstmpOp(LstmTest.TestLstmOp): def reset_argument(self): pass - def setUp2(self): - self.set_argument() - # projection size - self.P = 2 - - self.reset_argument() - self.op_type = 'lstmp' - self.act_proj = 'identity' - self.use_peepholes = False - self.has_initial_state = True - self.lod = [[5]] - - T = sum(self.lod[0]) - N = len(self.lod[0]) - - proj_clip = 0.5 - cell_clip = 0.0 - - #import pdb; pdb.set_trace() - x=np.array([[-0.50806344, 0.50909436], \ - [-0.50087136, 0.4904187 ], \ - [-0.48933774, 0.50408053], \ - [ 0.00896523, 0.00770854], \ - [-0.00851139,-0.01005108]]) - wx = np.array([[ 0.2932311, -0.8829277, 1.100133, 0.8197811, -0.8194872, -0.829262, 0.7708865, -0.62339246, -0.7656475, 0.4283645, -0.27164033, -0.3600223 ], \ - [-0.609142, 0.25025278, 0.15731744, -0.66051376, -0.70994514, 0.8344964, -0.00551117, -0.7072167, -0.63929003, -0.52340907, -0.8842589, 0.9531688 ]]) - x = np.dot(x, wx) - - w = np.array([[ 0.7808204, -0.7412322, -0.9458036, -0.01664658, 0.7930616, 0.10208707, 0.20036687, -0.16743736, 1.0295134, -0.3118722, 0.02241168, 0.3154219 ], \ - [-0.29026014, 0.24638331, -0.5435432, 0.87635124, -0.96091515, -0.1411362, 0.58606523, -0.38996056, -0.9003789, 0.8540163, -0.8831781, -0.28499633]]) - - w_rh = np.array([[0.15685119, 0.05694652], [-0.9641068, -1.5106804], - [0.3599193, 1.2540514]]) - w_b = np.array([[ - -0.49999997, 0.5, -0.49999997, -0.5, 0.5, 0.5, 0.49999997, - -0.49999997, 0.49999997, -0.5, 0.49999997, 0.5 - ]]) - h0 = np.array([[-1.3392334e-04, -6.8468950e-04]]) - c0 = np.array([[4.5552300e-04, 1.3302206e-03, -3.6721351e-04]]) - w_c = None - self.lod = [[5]] - #import pdb; pdb.set_trace() - r, c = lstmp(x, self.lod, h0, c0, w, w_rh, w_b, w_c, self.is_reverse, - proj_clip, cell_clip, ACTIVATION[self.act_gate], - ACTIVATION[self.act_cell], ACTIVATION[self.act_cand], - ACTIVATION[self.act_proj]) - self.inputs = {'Input': (x, self.lod), 'Weight': w, 'ProjWeight': w_rh} - - self.inputs['Bias'] = w_b - - if self.has_initial_state: - self.inputs['H0'] = h0 - self.inputs['C0'] = c0 - - self.outputs = { - 'Projection': (r, self.lod), - 'Cell': (c, self.lod), - } - self.attrs = { - 'use_peepholes': self.use_peepholes, - 'is_reverse': self.is_reverse, - 'proj_clip': proj_clip, - 'cell_clip': cell_clip, - 'gate_activation': self.act_gate, - 'cell_activation': self.act_cell, - 'candidate_activation': self.act_cand, - 'proj_activation': self.act_proj - } - def setUp(self): self.set_argument() # projection size self.P = 10 - #self.D = 9 self.act_proj = self.act_cell self.reset_argument() self.op_type = 'lstmp' - #self.use_peepholes=False - #self.lod=[[7]] - #self.act_proj='identity' - #self.act_proj='tanh' T = sum(self.lod[0]) N = len(self.lod[0]) - #np.random.seed=123 x = np.random.normal(size=(T, 4 * self.D)).astype('float64') if self.has_initial_state: h0 = np.random.normal(size=(N, self.P)).astype('float64') @@ -238,7 +159,6 @@ class TestLstmpOp(LstmTest.TestLstmOp): w_rh = np.random.normal(size=(self.D, self.P)).astype('float64') proj_clip = 0.1 cell_clip = 0.1 - #import pdb; pdb.set_trace() r, c = lstmp(x, self.lod, h0, c0, w, w_rh, w_b, w_c, self.is_reverse, proj_clip, cell_clip, ACTIVATION[self.act_gate], ACTIVATION[self.act_cell], ACTIVATION[self.act_cand], From d600d0ac703caf34e5ca9e2b0bb764a0068cf73b Mon Sep 17 00:00:00 2001 From: xuezhong Date: Wed, 30 Jan 2019 12:33:58 +0000 Subject: [PATCH 003/117] remove debug pdb --- python/paddle/fluid/tests/unittests/op_test.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index ec41c4e653..a67a0e4073 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -469,10 +469,8 @@ class OpTest(unittest.TestCase): delta=numeric_grad_delta, in_place=in_place) for input_to_check in inputs_to_check ] - #import pdb; pdb.set_trace() analytic_grads = self._get_gradient(inputs_to_check, place, output_names, no_grad_set) - #import pdb; pdb.set_trace() self._assert_is_close(numeric_grads, analytic_grads, inputs_to_check, max_relative_error, From 74da01191e52b14b45e31c00aaf45637ed1abc5a Mon Sep 17 00:00:00 2001 From: xuezhong Date: Wed, 30 Jan 2019 12:38:48 +0000 Subject: [PATCH 004/117] refine code --- python/paddle/fluid/tests/unittests/test_lstmp_op.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_lstmp_op.py b/python/paddle/fluid/tests/unittests/test_lstmp_op.py index 299a8c9695..0645cfedb8 100644 --- a/python/paddle/fluid/tests/unittests/test_lstmp_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstmp_op.py @@ -64,7 +64,6 @@ def lstmp( new_a[i] = max(new_a[i], -1.0 * clip) new_a[i] = min(new_a[i], clip) new_a = np.reshape(new_a, a.shape) - #print('new' + str(new_a)) return new_a if cell_clip > 0.0: @@ -103,9 +102,7 @@ def lstmp( # compute one sequence seq_len = lod[0][i] x = input[offset[i]:offset[i + 1], :] - #r_pre = np.dot(h0[i], w_rh) # 1 x P r_pre = h0[i] - #r_pre = act_proj(r_pre) c_pre = c0[i] # 1 x D for j in range(seq_len): # compute one step From 2857dac260bc0c858d1338a76cff1018ea67a877 Mon Sep 17 00:00:00 2001 From: xuezhong Date: Thu, 31 Jan 2019 13:21:17 +0000 Subject: [PATCH 005/117] add assert for clip and remove print --- paddle/fluid/operators/lstmp_op.h | 19 ------------------- python/paddle/fluid/layers/nn.py | 5 +++++ 2 files changed, 5 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h index 9cad0bfd04..94040c5977 100644 --- a/paddle/fluid/operators/lstmp_op.h +++ b/paddle/fluid/operators/lstmp_op.h @@ -94,25 +94,6 @@ class LSTMPKernel : public framework::OpKernel { PADDLE_THROW("unsupported activation type"); } - void Print(const Tensor& t, std::string name) const { - VLOG(1) << name << "size = " << t.numel(); - size_t size = t.numel(); - T* d = t.data(); -#ifdef PADDLE_WITH_CUDA - std::vector vec; - platform::DeviceContextPool::Instance().Get(t.place())->Wait(); - if (platform::is_gpu_place(t.place())) { - vec.resize(size); - cudaMemcpy(vec.data(), d, sizeof(T) * size, cudaMemcpyDeviceToHost); - d = vec.data(); - } -#endif - VLOG(1) << name << " data_ptr = " << static_cast(d); - for (size_t i = 0; i < size; i++) { - VLOG(1) << d[i] << ","; - } - } - void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input("Input"); auto* weight = ctx.Input("Weight"); diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index b5f6b5d443..c56fd1c917 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -862,6 +862,11 @@ def dynamic_lstmp(input, 'The shape of c0 should be (batch_size, %d)' % size inputs['C0'] = c_0 + if cell_clip: + assert cell_clip >= 0, "cell_clip should not be negtive." + if proj_clip: + assert proj_clip >= 0, "proj_clip should not be negtive." + helper.append_op( type='lstmp', inputs=inputs, From c5c6bd7b02db7cfd2c55a5e0a9c5e743906419a1 Mon Sep 17 00:00:00 2001 From: xuezhong Date: Thu, 31 Jan 2019 13:42:35 +0000 Subject: [PATCH 006/117] refine code test=develop --- python/paddle/fluid/tests/unittests/op_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index a67a0e4073..0fe836683b 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -294,7 +294,6 @@ class OpTest(unittest.TestCase): # fetch_list = map(block.var, fetch_list) if not isinstance(fetch_list[0], fluid.framework.Variable): fetch_list = list(map(block.var, fetch_list)) - #import pdb; pdb.set_trace() outs = executor.run(program, feed=feed_map, fetch_list=fetch_list, From 20e579ef2ad9e3afe184ae05ea31ca4b575f810f Mon Sep 17 00:00:00 2001 From: xuezhong Date: Fri, 1 Feb 2019 03:50:46 +0000 Subject: [PATCH 007/117] add initial_accumulator_value for adagrad test=develop --- python/paddle/fluid/optimizer.py | 14 +++++++++++++- .../paddle/fluid/tests/unittests/test_optimizer.py | 2 +- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index e0e781a322..ce5e5c4f37 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -662,7 +662,8 @@ class AdagradOptimizer(Optimizer): learning_rate, epsilon=1.0e-6, regularization=None, - name=None): + name=None, + initial_accumulator_value=0.1): assert learning_rate is not None assert epsilon is not None super(AdagradOptimizer, self).__init__( @@ -671,6 +672,7 @@ class AdagradOptimizer(Optimizer): name=name) self.type = "adagrad" self._epsilon = epsilon + self.initial_accumulator_value = initial_accumulator_value def _create_accumulators(self, block, parameters): assert isinstance(block, framework.Block) @@ -683,6 +685,16 @@ class AdagradOptimizer(Optimizer): moment_acc = self._get_accumulator(self._moment_acc_str, param_and_grad[0]) + startup_block = framework.default_startup_program().global_block() + startup_block.append_op( + type='fill_constant', + inputs={}, + outputs={'Out': [moment_acc]}, + attrs={ + 'dtype': moment_acc.dtype, + 'value': self.initial_accumulator_value, + 'shape': moment_acc.shape, + }) # Create the adagrad optimizer op adagrad_op = block.append_op( diff --git a/python/paddle/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py index 34c9b7e006..95ddc135b3 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer.py @@ -274,7 +274,7 @@ class TestAdagradOptimizer(unittest.TestCase): # Check init_program init_ops = init_program.global_block().ops - self.assertEqual(len(init_ops), 2) + self.assertEqual(len(init_ops), 3) self.assertEqual(init_ops[0].type, "fill_constant") self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate) self.assertEqual(init_ops[1].type, "fill_constant") From e261b60f97e31c60a775df02a9f138e47f8d67ae Mon Sep 17 00:00:00 2001 From: xuezhong Date: Fri, 1 Feb 2019 07:27:59 +0000 Subject: [PATCH 008/117] change api spec for adagrad optimizer test=develop --- paddle/fluid/API.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index f50a38842a..03478a932c 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -427,7 +427,7 @@ paddle.fluid.optimizer.MomentumOptimizer.__init__ ArgSpec(args=['self', 'learnin paddle.fluid.optimizer.MomentumOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) paddle.fluid.optimizer.MomentumOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.MomentumOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) -paddle.fluid.optimizer.AdagradOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'epsilon', 'regularization', 'name'], varargs=None, keywords=None, defaults=(1e-06, None, None)) +paddle.fluid.optimizer.AdagradOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'epsilon', 'regularization', 'name', 'initial_accumulator_value'], varargs=None, keywords=None, defaults=(1e-06, None, None, 0.1)) paddle.fluid.optimizer.AdagradOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) paddle.fluid.optimizer.AdagradOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.AdagradOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) From 18bff5298dc3ff90a53378bd1c45740a8ab20d79 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 1 Feb 2019 10:58:47 +0000 Subject: [PATCH 009/117] extract fused_emb_seq_pool forward function test=develop --- .../fused/fused_embedding_seq_pool_op.h | 58 ++++++++++++------- 1 file changed, 37 insertions(+), 21 deletions(-) diff --git a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h index 758432fd9e..744e83541d 100644 --- a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h +++ b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h @@ -31,38 +31,54 @@ using LoDTensor = framework::LoDTensor; using SelectedRows = framework::SelectedRows; using DDim = framework::DDim; +template +void emb_seqpool(const framework::ExecutionContext &context, const T *table, + const int64_t *idx, T *out, int64_t table_height, + int64_t table_width, int64_t idx_height, int64_t idx_width, + int64_t out_width) { // pool type == sum + PADDLE_ENFORCE_EQ(table_width * idx_width, out_width); + + auto check_idx_value_valid = [&](int i) { + PADDLE_ENFORCE_LT(idx[i], table_height, "idx value: %d, i: %d", idx[i], i); + PADDLE_ENFORCE_GE(idx[i], 0, "idx value: %d, i: %d", idx[i], i); + }; + auto blas = math::GetBlas(context); + + for (int w = 0; w != idx_width; ++w) { + check_idx_value_valid(w); + blas.VCOPY(table_width, table + idx[w] * table_width, + out + w * table_width); + } + + for (int h = 1; h < idx_height; ++h) { + for (int w = 0; w < idx_width; ++w) { + int i = h * idx_width + w; + check_idx_value_valid(i); + blas.AXPY(table_width, static_cast(1), table + idx[i] * table_width, + out + w * table_width); + } + } +} + template struct EmbeddingVSumFunctor { void operator()(const framework::ExecutionContext &context, const LoDTensor *table_t, const LoDTensor *ids_t, LoDTensor *output_t) { auto *table = table_t->data(); - int64_t row_number = table_t->dims()[0]; - int64_t row_width = table_t->dims()[1]; - int64_t last_dim = output_t->dims()[1]; + int64_t table_height = table_t->dims()[0]; + int64_t table_width = table_t->dims()[1]; + int64_t out_width = output_t->dims()[1]; const int64_t *ids = ids_t->data(); auto ids_lod = ids_t->lod()[0]; - int64_t ids_count = ids_t->numel() / ids_lod.back(); - + int64_t idx_width = ids_t->numel() / ids_lod.back(); auto *output = output_t->mutable_data(context.GetPlace()); - auto blas = math::GetBlas(context); + PADDLE_ENFORCE_LE(table_width * idx_width, out_width); for (int64_t i = 0; i != ids_lod.size() - 1; ++i) { - size_t begin = ids_lod[i] * ids_count; - for (int64_t j = 0; j != ids_count; ++j) { - PADDLE_ENFORCE_LT(ids[begin], row_number); - PADDLE_ENFORCE_GE(ids[begin], 0, "ids %d", i); - blas.VCOPY(row_width, table + ids[begin + j] * row_width, - output + i * last_dim + j * row_width); - } - - for (int64_t r = (ids_lod[i] + 1) * ids_count; - r < ids_lod[i + 1] * ids_count; ++r) { - PADDLE_ENFORCE_LT(ids[r], row_number); - PADDLE_ENFORCE_GE(ids[r], 0, "ids %d", i); - blas.AXPY(row_width, 1., table + ids[r] * row_width, - output + i * last_dim + (r % ids_count) * row_width); - } + emb_seqpool(context, table, ids + ids_lod[i] * idx_width, + output + i * out_width, table_height, table_width, + ids_lod[i + 1] - ids_lod[i], idx_width, out_width); } } }; From fb9a6a2bc6cbc88893544198ca1d9242523e3a06 Mon Sep 17 00:00:00 2001 From: xuezhong Date: Mon, 11 Feb 2019 10:17:02 +0000 Subject: [PATCH 010/117] pass test for lstm op test=develop --- paddle/fluid/operators/math/detail/lstm_kernel.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/paddle/fluid/operators/math/detail/lstm_kernel.h b/paddle/fluid/operators/math/detail/lstm_kernel.h index e1be0071f2..8149686c97 100644 --- a/paddle/fluid/operators/math/detail/lstm_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_kernel.h @@ -37,6 +37,7 @@ class lstm { *value_ig = activation(*value_ig + (*prev_state) * (*checkI), active_gate); *value_fg = activation(*value_fg + (*prev_state) * (*checkF), active_gate); *state = (*value_in) * (*value_ig) + (*prev_state) * (*value_fg); + if (*cell_clip > 0.0) { if (*state < -1.0 * (*cell_clip)) { *state = -1.0 * (*cell_clip); @@ -73,6 +74,7 @@ class lstm { active_gate); *state = _mm256_add_ps(_mm256_mul_ps(*value_in, *value_ig), _mm256_mul_ps(*prev_state, *value_fg)); + if (*cell_clip > 0.0f) { __m256 min = _mm256_set1_ps(0.0f - *cell_clip); __m256 max = _mm256_set1_ps(*cell_clip); @@ -114,7 +116,12 @@ class lstm { activation((*output_grad) * (*value_og), *state_atv, active_state) + (*grad_og) * (*checkO); } + } else { + *state_grad += + activation((*output_grad) * (*value_og), *state_atv, active_state) + + (*grad_og) * (*checkO); } + *grad_in = activation((*state_grad) * (*value_ig), *value_in, active_node); *grad_ig = activation((*state_grad) * (*value_in), *value_ig, active_gate); *grad_fg = From c0b8fd7ca00cb8b39be548bf7f1bdfffbc02c6f1 Mon Sep 17 00:00:00 2001 From: xuezhong Date: Mon, 11 Feb 2019 14:16:22 +0000 Subject: [PATCH 011/117] update lstmp op api spec test=develop --- paddle/fluid/API.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index f50a38842a..ecfcab9479 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -71,7 +71,7 @@ paddle.fluid.initializer.NumpyArrayInitializer.__init__ ArgSpec(args=['self', 'v paddle.fluid.layers.fc ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, None, False, None)) paddle.fluid.layers.embedding ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32')) paddle.fluid.layers.dynamic_lstm ArgSpec(args=['input', 'size', 'h_0', 'c_0', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'float32', None)) -paddle.fluid.layers.dynamic_lstmp ArgSpec(args=['input', 'size', 'proj_size', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'proj_activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'tanh', 'float32', None)) +paddle.fluid.layers.dynamic_lstmp ArgSpec(args=['input', 'size', 'proj_size', 'h_0', 'c_0', 'param_attr', 'bias_attr', 'use_peepholes', 'cell_clip', 'proj_clip', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'proj_activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, True, None, None, False, 'sigmoid', 'tanh', 'tanh', 'identity', 'float32', None)) paddle.fluid.layers.dynamic_gru ArgSpec(args=['input', 'size', 'param_attr', 'bias_attr', 'is_reverse', 'gate_activation', 'candidate_activation', 'h_0', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, False, 'sigmoid', 'tanh', None, False)) paddle.fluid.layers.gru_unit ArgSpec(args=['input', 'hidden', 'size', 'param_attr', 'bias_attr', 'activation', 'gate_activation', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, 'tanh', 'sigmoid', False)) paddle.fluid.layers.linear_chain_crf ArgSpec(args=['input', 'label', 'param_attr'], varargs=None, keywords=None, defaults=(None,)) From 03f091a9d3c0614561e85ed7b686fb3e0a0253e6 Mon Sep 17 00:00:00 2001 From: chengduozh Date: Tue, 12 Feb 2019 17:32:06 +0800 Subject: [PATCH 012/117] fix api doc test=develop --- python/paddle/fluid/layers/nn.py | 49 ++++++++++++++++++++++++---- python/paddle/fluid/layers/tensor.py | 6 +++- 2 files changed, 47 insertions(+), 8 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 0e4b5aadc0..ea043b0eba 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -5935,13 +5935,10 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): than :attr:`shape`. act (str): The non-linear activation to be applied to the reshaped tensor variable. - inplace(bool): Must use :attr:`False` if :attr:`x` is used in multiple - operators. If this flag is set :attr:`True`, reuse input - :attr:`x` to reshape, which will change the shape of - tensor variable :attr:`x` and might cause errors when - :attr:`x` is used in multiple operators. If :attr:`False`, - preserve the shape :attr:`x` and create a new output tensor - variable whose data is copied from input x but reshaped. + inplace(bool): If ``inplace`` is `True`, the input and output of ``layers.reshape`` + are the same variable, otherwise, the input and output of + ``layers.reshape`` are different variables. Note that if :attr:`x` + is more than one layers' input, ``inplace`` must be :attr:`False`. name (str): The name of this layer. It is optional. Returns: @@ -8334,6 +8331,44 @@ def stack(x, axis=0): If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x[0])+1`. If :code:`axis` is None, it would be replaced with 0. + .. code-block:: text + + Case 1: + Input: + x[0].data = [ [1.0 , 2.0 ] ] + x[0].dims = [1, 2] + x[1].data = [ [3.0 , 4.0 ] ] + x[1].dims = [1, 2] + x[2].data = [ [5.0 , 6.0 ] ] + x[2].dims = [1, 2] + + Attrs: + axis = 0 + + Output: + Out.data =[ [ [1.0, 2.0] ], + [ [3.0, 4.0] ], + [ [5.0, 6.0] ] ] + Out.dims = [3, 1, 2] + + Case 2: + Given + x[0].data = [ [1.0 , 2.0 ] ] + x[0].dims = [1, 2] + x[1].data = [ [3.0 , 4.0 ] ] + x[1].dims = [1, 2] + x[2].data = [ [5.0 , 6.0 ] ] + x[2].dims = [1, 2] + + Attrs: + axis = 1 or axis = -2 + + Output: + Out.data =[ [ [1.0, 2.0] + [3.0, 4.0] + [5.0, 6.0] ] ] + Out.dims = [1, 3, 2] + Args: x (Variable|list(Variable)|tuple(Variable)): Input variables. axis (int|None): The axis along which all inputs are stacked. diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 2153ca254f..af747c3cec 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -567,7 +567,7 @@ def ones(shape, dtype, force_cpu=False): It also sets *stop_gradient* to True. Args: - shape(tuple|list|None): Shape of output tensor + shape(tuple|list): Shape of output tensor dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor Returns: @@ -578,6 +578,10 @@ def ones(shape, dtype, force_cpu=False): data = fluid.layers.ones(shape=[1], dtype='int64') """ + assert isinstance(shape, list) or isinstance( + shape, tuple), "The shape's type should be list or tuple." + assert reduce(lambda x, y: x * y, + shape) > 0, "The shape is invalid: %s." % (str(shape)) return fill_constant(value=1.0, **locals()) From ba223e956609fac86e30efaa423dd324e7bc3ecc Mon Sep 17 00:00:00 2001 From: chengduozh Date: Wed, 13 Feb 2019 15:05:43 +0800 Subject: [PATCH 013/117] doc refine test=develop --- python/paddle/fluid/layers/nn.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index ea043b0eba..f4c4fc3b65 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -8331,6 +8331,8 @@ def stack(x, axis=0): If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x[0])+1`. If :code:`axis` is None, it would be replaced with 0. + For Example: + .. code-block:: text Case 1: From 11afbe0f538f873b77647e280ee8de5ae35ca790 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 13 Feb 2019 15:27:06 +0800 Subject: [PATCH 014/117] add details. test=develop --- .../framework/details/memory_optimize_pass.cc | 85 ++++++++++--------- 1 file changed, 44 insertions(+), 41 deletions(-) diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index 41e4a834df..1574d78440 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -69,55 +69,58 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( } for (auto& var : op->outputs) { - if (!NodeCanReused(var) || cfg_->Use(op).count(var->Name()) == 0 || - skip_set_.count(var->Name())) + if (skip_set_.count(var->Name())) { + VLOG(3) << "Skip set contains variable of " << var->Name() + << "disable reuse on it. skipped"; continue; - ir::Node* cache = pool_.FindBestFitNode(var); - - if (var->Name() == FLAGS_memory_optimize_debug) { - VLOG(3) << "start match var " << DebugString(var) << " of op " - << op->Name(); - VLOG(3) << pool_.ToString(); - VLOG(3) << "matched in pool : " - << ((cache == nullptr) ? "False" : "True"); } + if (NodeCanReused(var) && cfg_->Use(op).count(var->Name()) == 0) { + ir::Node* cache = pool_.FindBestFitNode(var); + if (var->Name() == FLAGS_memory_optimize_debug) { + VLOG(3) << "start match var " << DebugString(var) << " of op " + << op->Name(); + VLOG(3) << pool_.ToString(); + VLOG(3) << "matched in pool : " + << ((cache == nullptr) ? "False" : "True"); + } - if (cache == nullptr) continue; - if (var->Name() == cache->Name()) { - VLOG(3) << "The same cache variable is cascade reused." << var->Name() - << " is re-filled to the pool after" - << "the reused op is finished. Current op can not " - << "replace it again. Skip this candidate."; - continue; - - int node_idx_in_pool = pool_.GetNodeIndexInPool(cache); - VLOG(3) << string::Sprintf( - "!!! %s, %s => %s, cache idx %d, pool size %d", - std::to_string(reuse_id++), DebugString(var), DebugString(cache), - node_idx_in_pool, static_cast(pool_.size())); - - // update CFG Graph on the fly. - // reused var maybe re-fill into the pool - cfg_->RenameVarInCFGGraph(var->Name(), cache->Name(), idx); - // NOTE(dzhwinter): we need to both update the ProgramDesc - // and IR Graph. because op_desc/var_desc is used in CreateOp, - // CreateVar when running happens. But IR Graph - // define the dependence relationship between nodes. - RenameVarInGraphDesc(var->Name(), cache->Name(), idx); - RenameVarInGraphNode(var->Name(), cache->Name(), idx, graph.get()); + if (cache != nullptr) { + if (var->Name() == cache->Name()) { + VLOG(3) << "The same cache variable is cascade reused." + << var->Name() << " is re-filled to the pool after" + << "the reused op is finished. Current op can not " + << "replace it again. Skip this candidate."; + continue; + } - pool_.Erase(cache); - } + int node_idx_in_pool = pool_.GetNodeIndexInPool(cache); + VLOG(3) << string::Sprintf( + "!!! %s, %s => %s, cache idx %d, pool size %d", + std::to_string(reuse_id++), DebugString(var), DebugString(cache), + node_idx_in_pool, static_cast(pool_.size())); + // NOTE(dzhwinter): update the ProgramDesc/IR Graph + // and the CFG Graph on the fly. + // + // IR Graph define the dependence relationship between nodes. + // + // ProgramDesc defines the input/output vars. Its used in + // CreateOp, CreateVar when running happens. + // + // CFG Graph store the liveness information, when reuse happens + // we also need to update the variable liveness. + cfg_->RenameVarInCFGGraph(var->Name(), cache->Name(), idx); + RenameVarInGraphDesc(var->Name(), cache->Name(), idx); + RenameVarInGraphNode(var->Name(), cache->Name(), idx, graph.get()); - // fill the pool - std::unordered_set unlived_vars; - for (auto var : cfg_->LiveIn(op)) { - if (cfg_->LiveOut(op).count(var) == 0) { - unlived_vars.emplace(var); + pool_.Erase(cache); } } - for (auto var : unlived_vars) { + } + // fill the pool + for (auto var : cfg_->LiveIn(op)) { + if (cfg_->LiveOut(op).count(var) == 0) { ir::Node* var_node = cfg_->GetNodeByName(var, op); + if (var_node == nullptr) continue; if (NodeCanReused(var_node) && !pool_.Has(var_node)) { pool_.Insert(var_node); } From 6d6ddcfe15f6d6d2be156b469cbb284ce9382646 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 13 Feb 2019 19:39:32 +0800 Subject: [PATCH 015/117] add details. test=develop --- paddle/fluid/framework/details/CMakeLists.txt | 7 ++- .../details/memory_optimize_helper.cc | 52 ++++++++++++++++++- .../details/memory_optimize_helper.h | 1 + .../framework/details/memory_optimize_pass.cc | 15 +++--- 4 files changed, 64 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index e88084424b..5e8ffa4f51 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -50,7 +50,12 @@ cc_library(data_balance_op_handle SRCS data_balance_op_handle.cc DEPS op_handle_ cc_library(gather_op_handle SRCS gather_op_handle.cc DEPS op_handle_base scope ddim memory variable_visitor) cc_library(fuse_vars_op_handle SRCS fuse_vars_op_handle.cc DEPS op_handle_base scope) -cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper) +if(WITH_GPU) +cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper cpu_info) +else() +nv_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper gpu_info) +endif() + cc_library(memory_optimize_pass SRCS memory_optimize_pass.cc DEPS memory_optimize_helper pass) cc_library(inplace_op_pass SRCS inplace_op_pass.cc DEPS memory_optimize_pass op_info) cc_library(modify_op_lock_and_record_event_pass SRCS modify_op_lock_and_record_event_pass.cc DEPS computation_op_handle op_graph_view multi_devices_helper) diff --git a/paddle/fluid/framework/details/memory_optimize_helper.cc b/paddle/fluid/framework/details/memory_optimize_helper.cc index 6345ba3359..ef2b4131bf 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.cc +++ b/paddle/fluid/framework/details/memory_optimize_helper.cc @@ -13,13 +13,19 @@ // limitations under the License. #include "paddle/fluid/framework/details/memory_optimize_helper.h" +#include #include #include -#include +#include #include #include #include #include "paddle/fluid/framework/var_desc.h" +#include "paddle/fluid/platform/cpu_info.h" + +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/platform/gpu_info.h" +#endif // PADDLE_WITH_CUDA namespace paddle { namespace framework { @@ -230,6 +236,27 @@ ir::Node* OrderedSet::FindBestFitNode(ir::Node* var) const { return found_node; } +ir::Node* OrderedSet::FindNextBestFitNode(ir::Node* var, ir::Node* prev) const { + ir::Node* found_node = nullptr; + NodeComparator functor; + auto it = + std::find_if(nodes_.begin(), nodes_.end(), [&](const NodeVector& v) { + if (v.front() == prev) + return true; + else + return false; + }); + PADDLE_ENFORCE(it != nodes_.end(), "Not found previous in node list!"); + for (it = std::next(it); it != nodes_.end(); ++it) { + auto& candidate = it->front(); + if (functor(var, candidate)) { + found_node = candidate; + break; + } + } + return found_node; +} + bool OrderedSet::Has(ir::Node* var) const { if (mark_table_.count(var->Name())) { auto& node_in_samename = mark_table_.at(var->Name()); @@ -274,14 +301,35 @@ bool NodeCanReused(ir::Node* node) { return flag; } +int MinChunkSize() { + int size{0}; +#ifdef PADDLE_WITH_CUDA + size = platform::GpuMinChunkSize(); +#else + size = platform::CpuMinChunkSize(); +#endif // PADDLE_WITH_CUDA + return size; +} + bool NodeCanReused(const VarDesc& node) { auto type = node.GetType(); + // only these types holds bulk of gpu memory if (!(type == proto::VarType::LOD_TENSOR || type == proto::VarType::SELECTED_ROWS || type == proto::VarType::LOD_TENSOR_ARRAY)) { return false; } - if (node.Persistable() || node.GetShape().empty()) { + // persistable variable is parameter + if (node.Persistable()) { + return false; + } + // shape < min_chunk_size is meaningless. + // further more, fetched loss always has size = 1 + // which should not be reused. + auto shape = node.GetShape(); + int size = std::abs( + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies())); + if (shape.empty() || size < MinChunkSize()) { return false; } // vars can be @EMPTY@, @LR_DECAY_REUSE_ID@. For example, while_grad diff --git a/paddle/fluid/framework/details/memory_optimize_helper.h b/paddle/fluid/framework/details/memory_optimize_helper.h index 0bfaf827fe..e17030b2ab 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.h +++ b/paddle/fluid/framework/details/memory_optimize_helper.h @@ -62,6 +62,7 @@ class OrderedSet { } // find the bestfit shape node block with var. ir::Node* FindBestFitNode(ir::Node* var) const; + ir::Node* FindNextBestFitNode(ir::Node* var, ir::Node* prev) const; // map store non-const iterator, can not promise const int GetNodeIndexInPool(ir::Node* var); // pool all node to string diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index 1574d78440..2f9e2e662b 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -76,6 +76,13 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( } if (NodeCanReused(var) && cfg_->Use(op).count(var->Name()) == 0) { ir::Node* cache = pool_.FindBestFitNode(var); + while (cache != nullptr && var->Name() == cache->Name()) { + VLOG(3) << "The same cache variable is cascade reused." << var->Name() + << " is re-filled to the pool after" + << "the reused op is finished. Current op can not " + << "replace it again. Skip this candidate."; + cache = pool_.FindNextBestFitNode(var, cache); + } if (var->Name() == FLAGS_memory_optimize_debug) { VLOG(3) << "start match var " << DebugString(var) << " of op " << op->Name(); @@ -85,14 +92,6 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( } if (cache != nullptr) { - if (var->Name() == cache->Name()) { - VLOG(3) << "The same cache variable is cascade reused." - << var->Name() << " is re-filled to the pool after" - << "the reused op is finished. Current op can not " - << "replace it again. Skip this candidate."; - continue; - } - int node_idx_in_pool = pool_.GetNodeIndexInPool(cache); VLOG(3) << string::Sprintf( "!!! %s, %s => %s, cache idx %d, pool size %d", From 51d1e8cd065001a0ef96a81da748760c0b1b8e14 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 13 Feb 2019 20:04:54 +0800 Subject: [PATCH 016/117] add details. test=develop --- python/paddle/fluid/compiler.py | 5 ++++- python/paddle/fluid/parallel_executor.py | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index ef02429428..7c8c4a7e06 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -177,7 +177,10 @@ class CompiledProgram(object): # FIXME(dzhwinter): enable_inplace should be after memory_optimize # if turn on python memory optimize, turn off the inplace_pass. - self._build_strategy.enable_inplace = False if self._program._is_mem_optimized else True + if build_strategy.memory_optimize is None: + build_strategy.memory_optimize = False if main._is_mem_optimized else True + if build_strategy.enable_inplace is None: + build_strategy.enable_inplace = False if main._is_mem_optimized else True if self._build_strategy.num_trainers > 1 and trainers_endpoints: assert self._build_strategy.num_trainers == len( diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 22212ae9a2..8586670c24 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -148,6 +148,8 @@ class ParallelExecutor(object): else framework.default_main_program() # FIXME(dzhwinter): enable_inplace should be after memory_optimize # if turn on python memory optimize, turn off the inplace_pass. + if build_strategy.memory_optimize is None: + build_strategy.memory_optimize = False if main._is_mem_optimized else True if build_strategy.enable_inplace is None: build_strategy.enable_inplace = False if main._is_mem_optimized else True scope = scope if scope is not None else executor.global_scope() From 283573c6aa8d3e6d6f72c6f68c11b553095d64bc Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Thu, 14 Feb 2019 10:36:55 +0800 Subject: [PATCH 017/117] add details. test=develop --- paddle/fluid/framework/details/CMakeLists.txt | 4 ++-- paddle/fluid/framework/details/inplace_op_pass.cc | 2 +- python/paddle/fluid/compiler.py | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index 5e8ffa4f51..6b1957ae59 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -51,9 +51,9 @@ cc_library(gather_op_handle SRCS gather_op_handle.cc DEPS op_handle_base scope d cc_library(fuse_vars_op_handle SRCS fuse_vars_op_handle.cc DEPS op_handle_base scope) if(WITH_GPU) -cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper cpu_info) +cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper gpu_info) else() -nv_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper gpu_info) +nv_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper cpu_info) endif() cc_library(memory_optimize_pass SRCS memory_optimize_pass.cc DEPS memory_optimize_helper pass) diff --git a/paddle/fluid/framework/details/inplace_op_pass.cc b/paddle/fluid/framework/details/inplace_op_pass.cc index b0c5968499..c91fc81b2d 100644 --- a/paddle/fluid/framework/details/inplace_op_pass.cc +++ b/paddle/fluid/framework/details/inplace_op_pass.cc @@ -49,7 +49,7 @@ DEFINE_bool( "If this option turns on, only these op in whitelist can be inplaced." "If it turns off, all of the running op can be candidate of inplaced op." "Such as scale, elementwise_add" - "By default, it's turned on"); + "By default, it's turned off"); DECLARE_string(memory_optimize_debug); diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 7c8c4a7e06..b24cec044f 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -177,10 +177,10 @@ class CompiledProgram(object): # FIXME(dzhwinter): enable_inplace should be after memory_optimize # if turn on python memory optimize, turn off the inplace_pass. - if build_strategy.memory_optimize is None: - build_strategy.memory_optimize = False if main._is_mem_optimized else True - if build_strategy.enable_inplace is None: - build_strategy.enable_inplace = False if main._is_mem_optimized else True + if self._build_strategy.memory_optimize is None: + self._build_strategy.memory_optimize = False if main._is_mem_optimized else True + if self._build_strategy.enable_inplace is None: + self._build_strategy.enable_inplace = False if main._is_mem_optimized else True if self._build_strategy.num_trainers > 1 and trainers_endpoints: assert self._build_strategy.num_trainers == len( From daac6a05f590e33d4d50d71a97378fe57331f33e Mon Sep 17 00:00:00 2001 From: Wojciech Uss Date: Thu, 14 Feb 2019 08:19:20 +0100 Subject: [PATCH 018/117] Removed duplicated code This also fixes linking to libpaddle_fluid.so built in debug mode test=develop --- .../analysis/ir_passes/subgraph_detector.cc | 71 ------------------- .../analysis/ir_passes/subgraph_detector.h | 27 +------ 2 files changed, 1 insertion(+), 97 deletions(-) diff --git a/paddle/fluid/inference/analysis/ir_passes/subgraph_detector.cc b/paddle/fluid/inference/analysis/ir_passes/subgraph_detector.cc index a64f85ee9a..96befe7f8a 100644 --- a/paddle/fluid/inference/analysis/ir_passes/subgraph_detector.cc +++ b/paddle/fluid/inference/analysis/ir_passes/subgraph_detector.cc @@ -460,77 +460,6 @@ inline bool CheckNodeIndegreeEquals(const Node &node, size_t n) { return node.inputs.size() == n; } -NodesTSIterator::NodesTSIterator(const std::vector &source) { - PADDLE_ENFORCE(!source.empty(), - "Start points of topological sorting should not be empty!"); - // CHECK all the inputs' in-degree is 0 - for (auto *node : source) { - PADDLE_ENFORCE(CheckNodeIndegreeEquals(*node, 0)); - } - - std::unordered_set visited; - std::unordered_set to_visit{source.begin(), source.end()}; - - std::vector inlink_visited; - while (!to_visit.empty()) { - std::vector queue(to_visit.begin(), to_visit.end()); - for (auto *p : queue) { - if (Agent(p).deleted()) { - visited.insert(p); - to_visit.erase(p); - } - - inlink_visited.clear(); - - std::copy_if(p->inputs.begin(), p->inputs.end(), - std::back_inserter(inlink_visited), - [&](Node *x) -> bool { return visited.count(x) != 0; }); - - if (inlink_visited.size() == p->inputs.size()) { - sorted_.push_back(p); - for (auto *_ : p->outputs) { - if (!visited.count(_)) { - to_visit.insert(_); - } - } - - to_visit.erase(p); - visited.insert(p); - } - } - } -} - -NodesTSIterator::NodesTSIterator(const NodesTSIterator &other) - : sorted_(other.sorted_), cursor_(other.cursor_) {} - -Node &NodesTSIterator::operator*() { - PADDLE_ENFORCE_LT(cursor_, sorted_.size()); - return *sorted_[cursor_]; -} - -NodesTSIterator &NodesTSIterator::operator++() { - if (++cursor_ >= sorted_.size()) { - sorted_.clear(); - cursor_ = 0; - } - return *this; -} -NodesTSIterator &NodesTSIterator::operator=(const NodesTSIterator &other) { - cursor_ = other.cursor_; - sorted_ = other.sorted_; - return *this; -} - -bool NodesTSIterator::operator==(const NodesTSIterator &other) { - return sorted_ == other.sorted_ && cursor_ == other.cursor_; -} - -Node *NodesTSIterator::operator->() { - PADDLE_ENFORCE_LT(cursor_, sorted_.size()); - return sorted_[cursor_]; -} - } // namespace analysis } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/analysis/ir_passes/subgraph_detector.h b/paddle/fluid/inference/analysis/ir_passes/subgraph_detector.h index ea88edd042..5d11c217b6 100644 --- a/paddle/fluid/inference/analysis/ir_passes/subgraph_detector.h +++ b/paddle/fluid/inference/analysis/ir_passes/subgraph_detector.h @@ -30,6 +30,7 @@ namespace inference { namespace analysis { using framework::ir::Graph; +using framework::ir::NodesTSIterator; const char kIsFunctionNode[] = "__is_function_node__"; const char kFunctionNodeSubGraph[] = "__function_node_sub_graph__"; @@ -132,32 +133,6 @@ struct Agent { framework::ir::Node *x_; }; -// Topological sorting iterator on nodes. -struct NodesTSIterator - : public std::iterator { - NodesTSIterator() = default; - explicit NodesTSIterator(const std::vector &source); - NodesTSIterator(NodesTSIterator &&other) - : sorted_(std::move(other.sorted_)), cursor_(other.cursor_) { - other.cursor_ = 0; - } - NodesTSIterator(const NodesTSIterator &other); - - framework::ir::Node &operator*(); - NodesTSIterator &operator++(); - // TODO(Superjomn) current implementation just compare the first - // element, need to compare the graph and all the elements in the queue and - // set. - NodesTSIterator &operator=(const NodesTSIterator &other); - bool operator==(const NodesTSIterator &other); - bool operator!=(const NodesTSIterator &other) { return !(*this == other); } - framework::ir::Node *operator->(); - - private: - std::vector sorted_; - size_t cursor_{0}; -}; - // The nodes those have no input will be treated as start points. static std::vector ExtractStartPoints(const Graph &g) { std::vector result; From f3463ecb6ee2b791c7ccd3eb64f7d317f9c30519 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 14 Feb 2019 16:19:02 +0800 Subject: [PATCH 019/117] refine pg execution --- .../fluid/framework/details/build_strategy.cc | 10 +- .../details/multi_devices_graph_pass.cc | 54 +++++---- .../details/multi_devices_graph_pass.h | 16 ++- .../framework/details/multi_devices_helper.h | 11 +- .../fluid/framework/details/op_handle_base.h | 3 + .../details/parallel_ssa_graph_executor.cc | 65 ++++++++++- .../details/parallel_ssa_graph_executor.h | 11 ++ .../details/threaded_ssa_graph_executor.cc | 4 +- paddle/fluid/framework/ir/graph.h | 26 +++-- paddle/fluid/framework/ir/graph_helper.h | 4 +- paddle/fluid/framework/parallel_executor.cc | 81 +++++++------ .../unittests/parallel_executor_test_base.py | 3 +- .../unittests/test_parallel_executor_pg.py | 107 ++++++++++++++++++ 13 files changed, 309 insertions(+), 86 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_parallel_executor_pg.py diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index ce5731a1f4..10855eacff 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -35,8 +35,8 @@ static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) { // Should fix the allreduce op order if scheduling // them in multiple threads or processes to avoid hang. return (!strategy.enable_sequential_execution_ && - strategy.num_trainers_ > 1) || - strategy.enable_parallel_graph_; + strategy.num_trainers_ > 1) && + !strategy.enable_parallel_graph_; } class ParallelExecutorPassBuilder : public ir::PassBuilder { @@ -106,7 +106,9 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { } // Verify that the graph is correct for multi-device executor. - AppendPass("multi_devices_check_pass"); + auto multi_devices_pass = AppendPass("multi_devices_check_pass"); + multi_devices_pass->Set(kEnablePG, + new bool(strategy.enable_parallel_graph_)); if (SeqOnlyAllReduceOps(strategy)) { AppendPass("all_reduce_deps_pass"); @@ -180,6 +182,8 @@ std::unique_ptr BuildStrategy::Apply( &local_scopes); pass->Erase(kNRanks); pass->Set(kNRanks, new size_t(nranks)); + pass->Erase(kEnablePG); + pass->Set(kEnablePG, new bool(true)); #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr; diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_pass.cc index 75f922d2cc..dcceaa93d9 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.cc @@ -36,11 +36,6 @@ namespace framework { namespace details { namespace { -// TODO(panyx0718): Clean this up as well. -// all operators. NOTE that even we use a vector here, the operators is -// unordered. -typedef std::vector GraphOps; -const char kGraphOps[] = "ops"; bool OpHaveRole(const ir::Node &node, const framework::OpRole &role) { return boost::get( @@ -206,7 +201,7 @@ std::unique_ptr MultiDevSSAGraphBuilderBase::ApplyImpl( auto &g_name = backward_vars[i + 1]; VLOG(10) << "Bcast " << g_name << " for parameter " << p_name; - InsertCollectiveOp(&result, p_name, g_name); + InsertCollectiveOp(&result, node, p_name, g_name); } } catch (boost::bad_get e) { } @@ -226,7 +221,7 @@ std::unique_ptr MultiDevSSAGraphBuilderBase::ApplyImpl( * Only variables should be the leaves of graph. */ AddOutputToLeafOps(&result); - result.Erase(kGraphOps); + // result.Erase(kGraphOps); return graph; } @@ -391,20 +386,34 @@ void MultiDevSSAGraphBuilderBase::CreateComputationalOp(ir::Graph *result, } void MultiDevSSAGraphBuilderBase::CreateAllReduceOp( - ir::Graph *result, const std::string &og) const { + ir::Graph *result, ir::Node *node, const std::string &og) const { + OpHandleBase *op_handle = nullptr; + + auto append_allreduce_op = [&]( + std::vector &scopes, + std::vector &places) -> OpHandleBase * { #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) - result->Get(kGraphOps).emplace_back(new AllReduceOpHandle( - result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), - local_scopes_, places_, nccl_ctxs_)); + result->Get(kGraphOps).emplace_back(new AllReduceOpHandle( + result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), + scopes, places, nccl_ctxs_)); #else - result->Get(kGraphOps).emplace_back(new AllReduceOpHandle( - result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), - local_scopes_, places_)); + result->Get(kGraphOps).emplace_back(new AllReduceOpHandle( + result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), + scopes, places)); #endif - auto *op_handle = result->Get(kGraphOps).back(); + return result->Get(kGraphOps).back(); + }; + + if (!strategy_.enable_parallel_graph_) + op_handle = append_allreduce_op(local_scopes_, places_); for (size_t i = 0; i < places_.size(); ++i) { - auto &p = places_[i]; + auto p = places_[i]; + std::vector ss{local_scopes_[i]}; + std::vector ps{p}; + if (strategy_.enable_parallel_graph_) + op_handle = append_allreduce_op(ss, ps); + SetCommunicationContext(op_handle, p); auto &vars = result->Get(kGraphVars)[i][og]; PADDLE_ENFORCE(!vars.empty()); @@ -501,13 +510,13 @@ bool MultiDevSSAGraphBuilderBase::IsSparseGradient( } void AllReduceSSAGraphBuilder::InsertCollectiveOp( - ir::Graph *result, const std::string &p_name, + ir::Graph *result, ir::Node *node, const std::string &p_name, const std::string &g_name) const { if (IsSparseGradient(g_name)) { CreateReduceOp(result, g_name, 0); CreateBroadcastOp(result, g_name, 0); } else { - CreateAllReduceOp(result, g_name); + CreateAllReduceOp(result, node, g_name); } } @@ -580,7 +589,7 @@ void ReduceSSAGraphBuilder::ResetState() const { } void ReduceSSAGraphBuilder::InsertCollectiveOp( - ir::Graph *result, const std::string &p_name, + ir::Graph *result, ir::Node *node, const std::string &p_name, const std::string &g_name) const { size_t cur_device_id = GetAppropriateDeviceID({g_name}); CreateReduceOp(result, g_name, cur_device_id); @@ -900,7 +909,7 @@ int DistSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result, return op_dev_id; } -void DistSSAGraphBuilder::InsertCollectiveOp(ir::Graph *result, +void DistSSAGraphBuilder::InsertCollectiveOp(ir::Graph *result, ir::Node *node, const std::string &p_name, const std::string &g_name) const { size_t cur_device_id = 0; @@ -915,7 +924,7 @@ void DistSSAGraphBuilder::InsertCollectiveOp(ir::Graph *result, CreateReduceOp(result, g_name, 0); CreateBroadcastOp(result, g_name, 0); } else { - CreateAllReduceOp(result, g_name); + CreateAllReduceOp(result, node, g_name); } break; default: @@ -966,7 +975,8 @@ static int MultiDevSSAGraphBuilderRegister(const std::string &builder_mode) { .RequirePassAttr(paddle::framework::details::kPlaces) \ .RequirePassAttr(paddle::framework::details::kLocalScopes) \ .RequirePassAttr(paddle::framework::details::kStrategy) \ - .RequirePassAttr(paddle::framework::details::kNRanks) + .RequirePassAttr(paddle::framework::details::kNRanks) \ + .RequirePassAttr(paddle::framework::details::kEnablePG) REGISTER_MULTI_DEVICES_PASS(reduce_mode_multi_devices_pass, paddle::framework::details::ReduceSSAGraphBuilder); diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.h b/paddle/fluid/framework/details/multi_devices_graph_pass.h index 6d4386538e..e3c1fe711c 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.h +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.h @@ -36,6 +36,7 @@ constexpr char kPlaces[] = "places"; constexpr char kLocalScopes[] = "local_scopes"; constexpr char kStrategy[] = "strategy"; constexpr char kNRanks[] = "nranks"; +constexpr char kEnablePG[] = "enable_pg"; class MultiDevSSAGraphBuilderBase : public ir::Pass { protected: @@ -46,7 +47,8 @@ class MultiDevSSAGraphBuilderBase : public ir::Pass { virtual std::vector SortOperations(const ir::Graph &graph) const; - virtual void InsertCollectiveOp(ir::Graph *result, const std::string &p_name, + virtual void InsertCollectiveOp(ir::Graph *result, ir::Node *node, + const std::string &p_name, const std::string &g_name) const = 0; virtual bool DealWithSpecialOp(ir::Graph *result, ir::Node *node) const = 0; @@ -75,7 +77,8 @@ class MultiDevSSAGraphBuilderBase : public ir::Pass { bool IsSparseGradient(const std::string &og) const; - void CreateAllReduceOp(ir::Graph *result, const std::string &og) const; + void CreateAllReduceOp(ir::Graph *result, ir::Node *node, + const std::string &og) const; void CreateBroadcastOp(ir::Graph *result, const std::string &p_name, size_t src_dev_id) const; @@ -106,7 +109,8 @@ class MultiDevSSAGraphBuilderBase : public ir::Pass { class AllReduceSSAGraphBuilder : public MultiDevSSAGraphBuilderBase { protected: - virtual void InsertCollectiveOp(ir::Graph *result, const std::string &p_name, + virtual void InsertCollectiveOp(ir::Graph *result, ir::Node *node, + const std::string &p_name, const std::string &g_name) const; virtual bool DealWithSpecialOp(ir::Graph *result, ir::Node *node) const { @@ -135,7 +139,8 @@ class ReduceSSAGraphBuilder : public BalanceVarSSAGraphBuilder { protected: virtual void Init() const; - virtual void InsertCollectiveOp(ir::Graph *result, const std::string &p_name, + virtual void InsertCollectiveOp(ir::Graph *result, ir::Node *node, + const std::string &p_name, const std::string &g_name) const; virtual bool DealWithSpecialOp(ir::Graph *result, ir::Node *node) const; @@ -164,7 +169,8 @@ class DistSSAGraphBuilder : public BalanceVarSSAGraphBuilder { virtual void InsertPostprocessOps(ir::Graph *result) const; - virtual void InsertCollectiveOp(ir::Graph *result, const std::string &p_name, + virtual void InsertCollectiveOp(ir::Graph *result, ir::Node *node, + const std::string &p_name, const std::string &g_name) const; virtual void ResetState() const; diff --git a/paddle/fluid/framework/details/multi_devices_helper.h b/paddle/fluid/framework/details/multi_devices_helper.h index 1a2b75fbc0..5331b750eb 100644 --- a/paddle/fluid/framework/details/multi_devices_helper.h +++ b/paddle/fluid/framework/details/multi_devices_helper.h @@ -36,13 +36,20 @@ namespace details { // map from variable name to variables. The variables, who have the same name, // will have a differsent version. The offset in the // `std::vector` is the version of varaibles. -typedef std::vector>> +typedef std::vector>> GraphVars; const char kGraphVars[] = "vars"; // aux variables to represent dependency. Useful to resolve data hazard. -typedef std::unordered_set GraphDepVars; +typedef std::unordered_set GraphDepVars; const char kGraphDepVars[] = "dep_vars"; + +// TODO(panyx0718): Clean this up as well. +// all operators. NOTE that even we use a vector here, the operators is +// unordered. +typedef std::vector GraphOps; +const char kGraphOps[] = "ops"; + } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index b1a82e8771..e0aa352e95 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -70,6 +70,9 @@ class OpHandleBase { auto it = dev_ctxes_.find(place); return it != dev_ctxes_.end() ? it->second : nullptr; } + const std::map &DeviceContext() { + return dev_ctxes_; + } void SetDeviceContext(platform::Place place, platform::DeviceContext *ctx_) { dev_ctxes_[place] = ctx_; diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc index 128aaa33a2..41bfe99cab 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc @@ -13,11 +13,74 @@ // limitations under the License. #include "paddle/fluid/framework/details/parallel_ssa_graph_executor.h" +#include "paddle/fluid/framework/ir/graph_helper.h" namespace paddle { namespace framework { namespace details { +std::vector> SeparateMultiDevicesGraph( + const std::vector &places, + std::unique_ptr graph) { + std::vector> graphs; + graphs.reserve(places.size()); + for (size_t i = 0; i < places.size(); ++i) { + ProgramDesc empty; + graphs.emplace_back(std::unique_ptr(new ir::Graph(empty))); + auto &g = graphs.back(); + g->Set(kGraphVars, new GraphVars(1UL)); + g->Set(kGraphDepVars, new GraphDepVars); + g->Set(kGraphOps, new GraphOps); + } + + for (auto &op : graph->Get(kGraphOps)) { + auto &dev_ctx = op->DeviceContext(); + auto &p = dev_ctx.begin()->first; +#ifdef PADDLE_WITH_CUDA + int dev_id = boost::get(p).device; + auto &dev_ops = graphs[dev_id]->Get(kGraphOps); + auto &dev_dummys = graphs[dev_id]->Get(kGraphDepVars); + dev_ops.emplace_back(op); + graphs[dev_id]->AddNode(graph->ReleaseNode(op->Node()).release()); + + for (auto &var : op->Inputs()) { + auto dummy_ptr = dynamic_cast(var); + if (dummy_ptr) { + dev_dummys.insert(var); + if (graph->Nodes().count(var->Node())) + graphs[dev_id]->AddNode(graph->ReleaseNode(var->Node()).release()); + } + } + for (auto &var : op->Outputs()) { + auto dummy_ptr = dynamic_cast(var); + if (dummy_ptr) { + dev_dummys.insert(var); + if (graph->Nodes().count(var->Node())) + graphs[dev_id]->AddNode(graph->ReleaseNode(var->Node()).release()); + } + } +#else + PADDLE_THROW("Parallel Graph Execution only support CUDAPlace."); +#endif + } + + for (size_t dev_id = 0; dev_id < places.size(); ++dev_id) { + auto &dev_vars = graphs[dev_id]->Get(kGraphVars)[0]; + auto &origin_vars = graph->Get(kGraphVars)[dev_id]; + for (auto &name_pair : origin_vars) { + dev_vars.emplace(name_pair.first, name_pair.second); + for (auto &version_pair : name_pair.second) { + if (graph->Nodes().count(version_pair->Node())) { + graphs[dev_id]->AddNode( + graph->ReleaseNode(version_pair->Node()).release()); + } + } + } + } + + return graphs; +} + ParallelSSAGraphExecutor::ParallelSSAGraphExecutor( const ExecutionStrategy &strategy, const std::vector &local_scopes, const std::vector &places, @@ -37,7 +100,7 @@ ParallelSSAGraphExecutor::ParallelSSAGraphExecutor( << " to run the operators of the graph on each device."; for (size_t i = 0; i < places.size(); ++i) { executors_.emplace_back(new details::ThreadedSSAGraphExecutor( - strategy_, {local_scopes_[i]}, {places_[i]}, std::move(graphs_[i]))); + strategy_, local_scopes_, {places_[i]}, std::move(graphs_.at(i)))); } } diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.h b/paddle/fluid/framework/details/parallel_ssa_graph_executor.h index c00c5bc2d1..e3abd23753 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.h @@ -14,16 +14,24 @@ #pragma once +#include +#include #include #include #include "ThreadPool.h" +#include "paddle/fluid/framework/details/multi_devices_helper.h" #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" +#include "paddle/fluid/framework/ir/graph.h" namespace paddle { namespace framework { namespace details { +std::vector> SeparateMultiDevicesGraph( + const std::vector &places, + std::unique_ptr graph); + class ParallelSSAGraphExecutor : public SSAGraphExecutor { public: ParallelSSAGraphExecutor(const ExecutionStrategy &strategy, @@ -31,11 +39,14 @@ class ParallelSSAGraphExecutor : public SSAGraphExecutor { const std::vector &places, std::vector> &&graphs); ~ParallelSSAGraphExecutor() final = default; + const ir::Graph &Graph() const override { return *graphs_[0]; } FeedFetchList Run(const std::vector &fetch_tensors) override; private: + // std::vector> SeparateMultiDevicesGraph(); + ExecutionStrategy strategy_; std::vector local_scopes_; std::unique_ptr<::ThreadPool> pool_{nullptr}; diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index 677a293794..c0edad6f74 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -56,10 +56,10 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( } } } + for (auto &var : graph_->Get(details::kGraphDepVars)) { InsertPendingVar(&pending_vars, ready_vars.get(), var); } - for (auto &op : ir::FilterByNodeWrapper(*graph_)) { if (op->Inputs().empty()) { // Special case, Op has no input. ready_ops.insert(op); @@ -219,7 +219,7 @@ void ThreadedSSAGraphExecutor::RunOp( VLOG(10) << op << " " << op->Name() << " Done "; running_ops_--; ready_var_q->Extend(op->Outputs()); - VLOG(10) << op << " " << op->Name() << "Signal posted"; + VLOG(10) << op << " " << op->Name() << " Signal posted"; } catch (...) { exception_holder_.Catch(std::current_exception()); } diff --git a/paddle/fluid/framework/ir/graph.h b/paddle/fluid/framework/ir/graph.h index 8bb3c27bdd..07cbfc74ff 100644 --- a/paddle/fluid/framework/ir/graph.h +++ b/paddle/fluid/framework/ir/graph.h @@ -167,6 +167,14 @@ class Graph { return ret; } + std::unique_ptr ReleaseNode(ir::Node *node) { + std::unique_ptr ret; + ret.reset(nodes_.at(node).release()); + nodes_.erase(node); + node_set_.erase(node); + return ret; + } + void RemoveNode(ir::Node *node) { PADDLE_ENFORCE(node_set_.find(node) != node_set_.end()); node_set_.erase(node); @@ -183,13 +191,6 @@ class Graph { return nullptr; } - void ResolveHazard( - const std::map> &var_nodes); - - private: - std::map> InitFromProgram( - const ProgramDesc &program); - // This method takes ownership of `node`. ir::Node *AddNode(ir::Node *node) { PADDLE_ENFORCE(node_set_.find(node) == node_set_.end()); @@ -198,6 +199,17 @@ class Graph { return node; } + bool ContainNode(ir::Node *node) { + return node_set_.find(node) != node_set_.end(); + } + + void ResolveHazard( + const std::map> &var_nodes); + + private: + std::map> InitFromProgram( + const ProgramDesc &program); + // NOTE: program_ shouldn't be exposed to user. const ProgramDesc program_; std::map attrs_; diff --git a/paddle/fluid/framework/ir/graph_helper.h b/paddle/fluid/framework/ir/graph_helper.h index fba4936f2c..726cf8ec52 100644 --- a/paddle/fluid/framework/ir/graph_helper.h +++ b/paddle/fluid/framework/ir/graph_helper.h @@ -59,7 +59,9 @@ template std::vector FilterByNodeWrapper(const Graph &graph) { std::vector ret; for (ir::Node *n : graph.Nodes()) { - if (n->IsWrappedBy()) ret.push_back(&n->Wrapper()); + if (n->IsWrappedBy()) { + ret.push_back(&n->Wrapper()); + } } return ret; } diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index f61c9e3a91..abe241ed22 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -26,6 +26,7 @@ limitations under the License. */ #include "paddle/fluid/framework/details/parallel_ssa_graph_executor.h" #include "paddle/fluid/framework/details/reference_count_pass_helper.h" #include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h" +#include "paddle/fluid/framework/details/sequential_execution_pass.h" #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" #include "paddle/fluid/platform/profiler.h" @@ -201,7 +202,6 @@ ParallelExecutor::ParallelExecutor( member_->use_all_reduce_ = build_strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce; member_->nranks_ = build_strategy.num_trainers_ * places.size(); - if (!member_->use_all_reduce_) { PADDLE_ENFORCE(places.size() > 1, "If you set build_strategy.reduce with 'Reduce'," @@ -229,9 +229,10 @@ ParallelExecutor::ParallelExecutor( // choice the execution strategy. build_strategy.enable_parallel_graph_ = EnableParallelGraphExecution(main_program, exec_strategy, build_strategy); - - VLOG(1) << "Enable ParallelGraph Execution: " - << build_strategy.enable_parallel_graph_; + if (build_strategy.enable_parallel_graph_) + VLOG(0) << "The Executor would execute the graph by ParallelGraph " + "Execution which can get better performance," + << "you can force it off by env FLAGS_enable_parallel_graph=0"; if (member_->use_cuda_) { // Bcast Parameters to all GPUs @@ -265,58 +266,42 @@ ParallelExecutor::ParallelExecutor( // Step 2. Convert main_program to SSA form and dependency graph. Also, insert // ncclOp - std::vector> graphs; + std::unique_ptr graph; #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) - if (build_strategy.enable_parallel_graph_) { - for (size_t i = 0; i < member_->places_.size(); ++i) { - std::unique_ptr graph = build_strategy.Apply( - main_program, {member_->places_[i]}, loss_var_name, - {member_->local_scopes_[i]}, member_->nranks_, member_->use_cuda_, - member_->nccl_ctxs_.get()); - graphs.push_back(std::move(graph)); - } - } else { - std::unique_ptr graph = build_strategy.Apply( - main_program, member_->places_, loss_var_name, member_->local_scopes_, - member_->nranks_, member_->use_cuda_, member_->nccl_ctxs_.get()); - graphs.push_back(std::move(graph)); - } + graph = build_strategy.Apply(main_program, member_->places_, loss_var_name, + member_->local_scopes_, member_->nranks_, + member_->use_cuda_, member_->nccl_ctxs_.get()); #else - std::unique_ptr graph = build_strategy.Apply( - main_program, member_->places_, loss_var_name, member_->local_scopes_, - member_->nranks_, member_->use_cuda_); - graphs.push_back(std::move(graph)); + graph = build_strategy.Apply(main_program, member_->places_, loss_var_name, + member_->local_scopes_, member_->nranks_, + member_->use_cuda_); #endif auto max_memory_size = GetEagerDeletionThreshold(); if (max_memory_size >= 0) { - for (size_t i = 0; i < graphs.size(); ++i) { - graphs[i] = member_->PrepareGCAndRefCnts( - std::move(graphs[i]), static_cast(max_memory_size)); - } + graph = member_->PrepareGCAndRefCnts(std::move(graph), + static_cast(max_memory_size)); } // Step 3. Create vars in each scope. Passes may also create new vars. // skip control vars and empty vars std::vector var_infos; - for (auto &graph : graphs) { - for (auto &node : graph->Nodes()) { - if (node->IsVar() && !node->IsCtrlVar() && node->Var()) { - var_infos.emplace_back(); - var_infos.back().name_ = node->Var()->Name(); - var_infos.back().type_ = node->Var()->GetType(); - var_infos.back().persistable_ = node->Var()->Persistable(); - } + for (auto &node : graph->Nodes()) { + if (node->IsVar() && !node->IsCtrlVar() && node->Var()) { + var_infos.emplace_back(); + var_infos.back().name_ = node->Var()->Name(); + var_infos.back().type_ = node->Var()->GetType(); + var_infos.back().persistable_ = node->Var()->Persistable(); } } // If the loss_var_name is given, the number of graph should be only one. if (loss_var_name.size()) { - size_t graph_num = ir::GraphNum(*graphs[0]); + size_t graph_num = ir::GraphNum(*graph); if (graph_num > 1) { LOG(WARNING) << "The number of graph should be only one, " "but the current graph has " - << ir::GraphNum(*graphs[0]) + << ir::GraphNum(*graph) << " sub_graphs. If you want to see the nodes of the " "sub_graphs, you should use 'FLAGS_print_sub_graph_dir' " "to specify the output dir. NOTES: if you not do training, " @@ -325,18 +310,30 @@ ParallelExecutor::ParallelExecutor( } if (build_strategy.enable_parallel_graph_) { + auto parallel_graph = + details::SeparateMultiDevicesGraph(member_->places_, std::move(graph)); + auto seq_allreduce_pass = + ir::PassRegistry::Instance().Get("all_reduce_deps_pass"); + seq_allreduce_pass->Erase(details::kAllOpDescs); + seq_allreduce_pass->Set>( + details::kAllOpDescs, + new std::vector(main_program.Block(0).AllOps())); + for (size_t i = 0; i < parallel_graph.size(); ++i) { + parallel_graph[i] = + seq_allreduce_pass->Apply(std::move(parallel_graph[i])); + } member_->executor_.reset(new details::ParallelSSAGraphExecutor( exec_strategy, member_->local_scopes_, member_->places_, - std::move(graphs))); + std::move(parallel_graph))); } else { if (exec_strategy.type_ == ExecutionStrategy::kDefault) { member_->executor_.reset(new details::ThreadedSSAGraphExecutor( exec_strategy, member_->local_scopes_, member_->places_, - std::move(graphs[0]))); + std::move(graph))); } else { member_->executor_.reset(new details::FastThreadedSSAGraphExecutor( exec_strategy, member_->local_scopes_, member_->places_, - std::move(graphs[0]))); + std::move(graph))); } } @@ -487,8 +484,8 @@ bool ParallelExecutor::EnableParallelGraphExecution( } } - if (!member_->use_all_reduce_ || !member_->use_cuda_) - enable_parallel_graph = false; + // if (!member_->use_all_reduce_ || !member_->use_cuda_) + if (!member_->use_all_reduce_) enable_parallel_graph = false; if (build_strategy.enable_sequential_execution_ || exec_strategy.type_ == ExecutionStrategy::ExecutorType::kExperimental) diff --git a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py index fdacd241f9..f14094a7b3 100644 --- a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py +++ b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py @@ -72,6 +72,7 @@ class TestParallelExecutorBase(unittest.TestCase): exe.run(startup) exec_strategy = fluid.ExecutionStrategy() exec_strategy.allow_op_delay = allow_op_delay + exec_strategy.num_threads = 1 if use_fast_executor: exec_strategy.use_experimental_executor = True build_strategy = fluid.BuildStrategy() @@ -99,7 +100,7 @@ class TestParallelExecutorBase(unittest.TestCase): first_loss, = run_executor( exe=exe, binary=binary, feed=feed_dict, fetch_list=[loss.name]) - for i in range(iter): + for _ in range(iter): run_executor( exe=exe, binary=binary, feed=feed_dict, fetch_list=[]) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_pg.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_pg.py new file mode 100644 index 0000000000..041c56fce1 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_pg.py @@ -0,0 +1,107 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest + +import numpy as np +import os +os.environ['FLAGS_enable_parallel_graph'] = str(1) +import paddle.fluid.core as core +import os +import paddle.fluid as fluid +from parallel_executor_test_base import TestParallelExecutorBase + + +def simple_fc_net(use_feed): + img = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + hidden = img + for _ in range(4): + hidden = fluid.layers.fc( + hidden, + size=200, + act='tanh', + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0))) + prediction = fluid.layers.fc(hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = fluid.layers.mean(loss) + return loss + + +class TestMNIST(TestParallelExecutorBase): + @classmethod + def setUpClass(cls): + os.environ['CPU_NUM'] = str(4) + + def _init_data(self): + np.random.seed(5) + img = np.random.random(size=[32, 784]).astype(np.float32) + label = np.ones(shape=[32, 1], dtype='int64') + return img, label + + # simple_fc + def check_simple_fc_convergence(self, use_cuda, use_reduce=False): + if use_cuda and not core.is_compiled_with_cuda(): + return + + img, label = self._init_data() + + self.check_network_convergence( + simple_fc_net, + feed_dict={"image": img, + "label": label}, + use_cuda=use_cuda, + use_reduce=use_reduce) + + def test_simple_fc(self): + # use_cuda + self.check_simple_fc_convergence(True) + + def check_simple_fc_parallel_accuracy(self, use_cuda): + if use_cuda and not core.is_compiled_with_cuda(): + return + + img, label = self._init_data() + + single_first_loss, single_last_loss = self.check_network_convergence( + method=simple_fc_net, + seed=1, + feed_dict={"image": img, + "label": label}, + use_cuda=use_cuda, + use_parallel_executor=False) + parallel_first_loss, parallel_last_loss = self.check_network_convergence( + method=simple_fc_net, + seed=1, + feed_dict={"image": img, + "label": label}, + use_cuda=use_cuda, + use_parallel_executor=True) + + self.assertAlmostEquals( + np.mean(parallel_first_loss), + single_first_loss, + delta=1e-6, ) + self.assertAlmostEquals( + np.mean(parallel_last_loss), single_last_loss, delta=1e-6) + + def test_simple_fc_parallel_accuracy(self): + self.check_simple_fc_parallel_accuracy(True) + + +if __name__ == '__main__': + unittest.main() From 869f00ffc6697bdac73271ecbd7257f6937245c2 Mon Sep 17 00:00:00 2001 From: liuhongyu Date: Thu, 14 Feb 2019 16:20:37 +0800 Subject: [PATCH 020/117] set lstm lstmp unsed pointer to null --- paddle/fluid/operators/lstm_op.h | 4 ++++ paddle/fluid/operators/lstmp_op.h | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/paddle/fluid/operators/lstm_op.h b/paddle/fluid/operators/lstm_op.h index 7d62d2d020..289f50f52e 100644 --- a/paddle/fluid/operators/lstm_op.h +++ b/paddle/fluid/operators/lstm_op.h @@ -311,6 +311,10 @@ class LSTMGradKernel : public framework::OpKernel { lstm_grad.prev_state_grad = c0_g ? ordered_c0_g.data() : nullptr; } + // lstm_value.output_value not used in bp, set to null + // lstm_grad.state_active_grad not used in bp, set to null + lstm_value.output_value = nullptr; + lstm_grad.state_active_grad = nullptr; int cur_batch_size = bend - bstart; math::LstmUnitGradFunctor::compute( device_ctx, lstm_value, lstm_grad, frame_size, cur_batch_size, diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h index 370dd04d14..05ecd3c1ae 100644 --- a/paddle/fluid/operators/lstmp_op.h +++ b/paddle/fluid/operators/lstmp_op.h @@ -405,6 +405,11 @@ class LSTMPGradKernel : public framework::OpKernel { } int cur_batch_size = bend - bstart; + // lstm_value.output_value not used in bp, set to null + // lstm_grad.state_active_grad not used in bp, set to null + lstm_value.output_value = nullptr; + lstm_grad.state_active_grad = nullptr; + math::LstmUnitGradFunctor::compute( device_ctx, lstmp_value, lstmp_grad, frame_size, cur_batch_size, gate_act, cell_act, cand_act); From 393fa6021e78d111d9a76e52fbdd97c4e152e65d Mon Sep 17 00:00:00 2001 From: liuhongyu Date: Thu, 14 Feb 2019 16:25:29 +0800 Subject: [PATCH 021/117] set lstm lstmp unsed pointer to nullptr; test=develop --- paddle/fluid/operators/lstm_op.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/lstm_op.h b/paddle/fluid/operators/lstm_op.h index 289f50f52e..3f110024b2 100644 --- a/paddle/fluid/operators/lstm_op.h +++ b/paddle/fluid/operators/lstm_op.h @@ -311,8 +311,8 @@ class LSTMGradKernel : public framework::OpKernel { lstm_grad.prev_state_grad = c0_g ? ordered_c0_g.data() : nullptr; } - // lstm_value.output_value not used in bp, set to null - // lstm_grad.state_active_grad not used in bp, set to null + // lstm_value.output_value not used in bp, set to nullptr + // lstm_grad.state_active_grad not used in bp, set to nullptr lstm_value.output_value = nullptr; lstm_grad.state_active_grad = nullptr; int cur_batch_size = bend - bstart; From 73005ee00dc54eff7218e1c853bdf2eb0c053723 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 14 Feb 2019 16:37:35 +0800 Subject: [PATCH 022/117] cleanup code test=develop --- .../fluid/framework/details/build_strategy.cc | 4 ---- .../details/multi_devices_graph_pass.cc | 17 ++++++++--------- .../details/multi_devices_graph_pass.h | 16 +++++----------- .../details/parallel_ssa_graph_executor.h | 2 -- .../details/threaded_ssa_graph_executor.cc | 2 +- paddle/fluid/framework/ir/graph.h | 10 ---------- paddle/fluid/framework/ir/graph_helper.h | 4 +--- paddle/fluid/framework/parallel_executor.cc | 9 ++++----- 8 files changed, 19 insertions(+), 45 deletions(-) diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index ae17b8df75..7d2a081e3b 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -119,8 +119,6 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { // Verify that the graph is correct for multi-device executor. auto multi_devices_pass = AppendPass("multi_devices_check_pass"); - multi_devices_pass->Set(kEnablePG, - new bool(strategy.enable_parallel_graph_)); if (SeqOnlyAllReduceOps(strategy)) { AppendPass("all_reduce_deps_pass"); @@ -194,8 +192,6 @@ std::unique_ptr BuildStrategy::Apply( &local_scopes); pass->Erase(kNRanks); pass->Set(kNRanks, new size_t(nranks)); - pass->Erase(kEnablePG); - pass->Set(kEnablePG, new bool(true)); #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr; diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_pass.cc index dcceaa93d9..4f856c6d9e 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.cc @@ -201,7 +201,7 @@ std::unique_ptr MultiDevSSAGraphBuilderBase::ApplyImpl( auto &g_name = backward_vars[i + 1]; VLOG(10) << "Bcast " << g_name << " for parameter " << p_name; - InsertCollectiveOp(&result, node, p_name, g_name); + InsertCollectiveOp(&result, p_name, g_name); } } catch (boost::bad_get e) { } @@ -386,7 +386,7 @@ void MultiDevSSAGraphBuilderBase::CreateComputationalOp(ir::Graph *result, } void MultiDevSSAGraphBuilderBase::CreateAllReduceOp( - ir::Graph *result, ir::Node *node, const std::string &og) const { + ir::Graph *result, const std::string &og) const { OpHandleBase *op_handle = nullptr; auto append_allreduce_op = [&]( @@ -510,13 +510,13 @@ bool MultiDevSSAGraphBuilderBase::IsSparseGradient( } void AllReduceSSAGraphBuilder::InsertCollectiveOp( - ir::Graph *result, ir::Node *node, const std::string &p_name, + ir::Graph *result, const std::string &p_name, const std::string &g_name) const { if (IsSparseGradient(g_name)) { CreateReduceOp(result, g_name, 0); CreateBroadcastOp(result, g_name, 0); } else { - CreateAllReduceOp(result, node, g_name); + CreateAllReduceOp(result, g_name); } } @@ -589,7 +589,7 @@ void ReduceSSAGraphBuilder::ResetState() const { } void ReduceSSAGraphBuilder::InsertCollectiveOp( - ir::Graph *result, ir::Node *node, const std::string &p_name, + ir::Graph *result, const std::string &p_name, const std::string &g_name) const { size_t cur_device_id = GetAppropriateDeviceID({g_name}); CreateReduceOp(result, g_name, cur_device_id); @@ -909,7 +909,7 @@ int DistSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result, return op_dev_id; } -void DistSSAGraphBuilder::InsertCollectiveOp(ir::Graph *result, ir::Node *node, +void DistSSAGraphBuilder::InsertCollectiveOp(ir::Graph *result, const std::string &p_name, const std::string &g_name) const { size_t cur_device_id = 0; @@ -924,7 +924,7 @@ void DistSSAGraphBuilder::InsertCollectiveOp(ir::Graph *result, ir::Node *node, CreateReduceOp(result, g_name, 0); CreateBroadcastOp(result, g_name, 0); } else { - CreateAllReduceOp(result, node, g_name); + CreateAllReduceOp(result, g_name); } break; default: @@ -975,8 +975,7 @@ static int MultiDevSSAGraphBuilderRegister(const std::string &builder_mode) { .RequirePassAttr(paddle::framework::details::kPlaces) \ .RequirePassAttr(paddle::framework::details::kLocalScopes) \ .RequirePassAttr(paddle::framework::details::kStrategy) \ - .RequirePassAttr(paddle::framework::details::kNRanks) \ - .RequirePassAttr(paddle::framework::details::kEnablePG) + .RequirePassAttr(paddle::framework::details::kNRanks) REGISTER_MULTI_DEVICES_PASS(reduce_mode_multi_devices_pass, paddle::framework::details::ReduceSSAGraphBuilder); diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.h b/paddle/fluid/framework/details/multi_devices_graph_pass.h index e3c1fe711c..6d4386538e 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.h +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.h @@ -36,7 +36,6 @@ constexpr char kPlaces[] = "places"; constexpr char kLocalScopes[] = "local_scopes"; constexpr char kStrategy[] = "strategy"; constexpr char kNRanks[] = "nranks"; -constexpr char kEnablePG[] = "enable_pg"; class MultiDevSSAGraphBuilderBase : public ir::Pass { protected: @@ -47,8 +46,7 @@ class MultiDevSSAGraphBuilderBase : public ir::Pass { virtual std::vector SortOperations(const ir::Graph &graph) const; - virtual void InsertCollectiveOp(ir::Graph *result, ir::Node *node, - const std::string &p_name, + virtual void InsertCollectiveOp(ir::Graph *result, const std::string &p_name, const std::string &g_name) const = 0; virtual bool DealWithSpecialOp(ir::Graph *result, ir::Node *node) const = 0; @@ -77,8 +75,7 @@ class MultiDevSSAGraphBuilderBase : public ir::Pass { bool IsSparseGradient(const std::string &og) const; - void CreateAllReduceOp(ir::Graph *result, ir::Node *node, - const std::string &og) const; + void CreateAllReduceOp(ir::Graph *result, const std::string &og) const; void CreateBroadcastOp(ir::Graph *result, const std::string &p_name, size_t src_dev_id) const; @@ -109,8 +106,7 @@ class MultiDevSSAGraphBuilderBase : public ir::Pass { class AllReduceSSAGraphBuilder : public MultiDevSSAGraphBuilderBase { protected: - virtual void InsertCollectiveOp(ir::Graph *result, ir::Node *node, - const std::string &p_name, + virtual void InsertCollectiveOp(ir::Graph *result, const std::string &p_name, const std::string &g_name) const; virtual bool DealWithSpecialOp(ir::Graph *result, ir::Node *node) const { @@ -139,8 +135,7 @@ class ReduceSSAGraphBuilder : public BalanceVarSSAGraphBuilder { protected: virtual void Init() const; - virtual void InsertCollectiveOp(ir::Graph *result, ir::Node *node, - const std::string &p_name, + virtual void InsertCollectiveOp(ir::Graph *result, const std::string &p_name, const std::string &g_name) const; virtual bool DealWithSpecialOp(ir::Graph *result, ir::Node *node) const; @@ -169,8 +164,7 @@ class DistSSAGraphBuilder : public BalanceVarSSAGraphBuilder { virtual void InsertPostprocessOps(ir::Graph *result) const; - virtual void InsertCollectiveOp(ir::Graph *result, ir::Node *node, - const std::string &p_name, + virtual void InsertCollectiveOp(ir::Graph *result, const std::string &p_name, const std::string &g_name) const; virtual void ResetState() const; diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.h b/paddle/fluid/framework/details/parallel_ssa_graph_executor.h index e3abd23753..c31bba17f6 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.h @@ -45,8 +45,6 @@ class ParallelSSAGraphExecutor : public SSAGraphExecutor { FeedFetchList Run(const std::vector &fetch_tensors) override; private: - // std::vector> SeparateMultiDevicesGraph(); - ExecutionStrategy strategy_; std::vector local_scopes_; std::unique_ptr<::ThreadPool> pool_{nullptr}; diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index c0edad6f74..5bf414324f 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -56,10 +56,10 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( } } } - for (auto &var : graph_->Get(details::kGraphDepVars)) { InsertPendingVar(&pending_vars, ready_vars.get(), var); } + for (auto &op : ir::FilterByNodeWrapper(*graph_)) { if (op->Inputs().empty()) { // Special case, Op has no input. ready_ops.insert(op); diff --git a/paddle/fluid/framework/ir/graph.h b/paddle/fluid/framework/ir/graph.h index 0d66043a73..40baae2ffd 100644 --- a/paddle/fluid/framework/ir/graph.h +++ b/paddle/fluid/framework/ir/graph.h @@ -176,12 +176,6 @@ class Graph { return ret; } - void RemoveNode(ir::Node *node) { - PADDLE_ENFORCE(node_set_.find(node) != node_set_.end()); - node_set_.erase(node); - nodes_.erase(node); - } - // NOTE low performance, but simple and secure. Node *RetrieveNode(int id) { for (auto &node : nodes_) { @@ -200,10 +194,6 @@ class Graph { return node; } - bool ContainNode(ir::Node *node) { - return node_set_.find(node) != node_set_.end(); - } - void ResolveHazard( const std::map> &var_nodes); diff --git a/paddle/fluid/framework/ir/graph_helper.h b/paddle/fluid/framework/ir/graph_helper.h index 3b95aa7b86..214de9ec7d 100644 --- a/paddle/fluid/framework/ir/graph_helper.h +++ b/paddle/fluid/framework/ir/graph_helper.h @@ -64,9 +64,7 @@ template std::vector FilterByNodeWrapper(const Graph &graph) { std::vector ret; for (ir::Node *n : graph.Nodes()) { - if (n->IsWrappedBy()) { - ret.push_back(&n->Wrapper()); - } + if (n->IsWrappedBy()) ret.push_back(&n->Wrapper()); } return ret; } diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 91d1a99886..dca1a4e530 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -478,12 +478,11 @@ bool ParallelExecutor::EnableParallelGraphExecution( } } - // if (!member_->use_all_reduce_ || !member_->use_cuda_) - if (!member_->use_all_reduce_) enable_parallel_graph = false; + if (!member_->use_all_reduce_ || !member_->use_cuda_) - if (build_strategy.enable_sequential_execution_ || - exec_strategy.type_ == ExecutionStrategy::ExecutorType::kExperimental) - enable_parallel_graph = false; + if (build_strategy.enable_sequential_execution_ || + exec_strategy.type_ == ExecutionStrategy::ExecutorType::kExperimental) + enable_parallel_graph = false; return enable_parallel_graph; } From ecdd1166b80627b652b948d6b8b317307ce0afb0 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 14 Feb 2019 16:44:09 +0800 Subject: [PATCH 023/117] cleanup code test=develop --- .../framework/details/parallel_ssa_graph_executor.cc | 8 ++++---- paddle/fluid/framework/ir/graph.h | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc index a7cb9adbbf..77a3318ff9 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc @@ -41,14 +41,14 @@ std::vector> SeparateMultiDevicesGraph( auto &dev_ops = graphs[dev_id]->Get(kGraphOps); auto &dev_dummys = graphs[dev_id]->Get(kGraphDepVars); dev_ops.emplace_back(op); - graphs[dev_id]->AddNode(graph->ReleaseNode(op->Node()).release()); + graphs[dev_id]->AddNode(graph->RemoveNode(op->Node()).release()); for (auto &var : op->Inputs()) { auto dummy_ptr = dynamic_cast(var); if (dummy_ptr) { dev_dummys.insert(var); if (graph->Nodes().count(var->Node())) - graphs[dev_id]->AddNode(graph->ReleaseNode(var->Node()).release()); + graphs[dev_id]->AddNode(graph->RemoveNode(var->Node()).release()); } } for (auto &var : op->Outputs()) { @@ -56,7 +56,7 @@ std::vector> SeparateMultiDevicesGraph( if (dummy_ptr) { dev_dummys.insert(var); if (graph->Nodes().count(var->Node())) - graphs[dev_id]->AddNode(graph->ReleaseNode(var->Node()).release()); + graphs[dev_id]->AddNode(graph->RemoveNode(var->Node()).release()); } } #else @@ -72,7 +72,7 @@ std::vector> SeparateMultiDevicesGraph( for (auto &version_pair : name_pair.second) { if (graph->Nodes().count(version_pair->Node())) { graphs[dev_id]->AddNode( - graph->ReleaseNode(version_pair->Node()).release()); + graph->RemoveNode(version_pair->Node()).release()); } } } diff --git a/paddle/fluid/framework/ir/graph.h b/paddle/fluid/framework/ir/graph.h index 40baae2ffd..b55a774513 100644 --- a/paddle/fluid/framework/ir/graph.h +++ b/paddle/fluid/framework/ir/graph.h @@ -168,7 +168,8 @@ class Graph { return ret; } - std::unique_ptr ReleaseNode(ir::Node *node) { + std::unique_ptr RemoveNode(ir::Node *node) { + PADDLE_ENFORCE(node_set_.find(node) != node_set_.end()); std::unique_ptr ret; ret.reset(nodes_.at(node).release()); nodes_.erase(node); From 029be5fda9b973ec798444b959e7b83e03ade7f1 Mon Sep 17 00:00:00 2001 From: liuhongyu Date: Thu, 14 Feb 2019 17:23:20 +0800 Subject: [PATCH 024/117] fix lstmp bug; test=develop --- paddle/fluid/operators/lstmp_op.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h index 05ecd3c1ae..1f11e57dcb 100644 --- a/paddle/fluid/operators/lstmp_op.h +++ b/paddle/fluid/operators/lstmp_op.h @@ -405,10 +405,10 @@ class LSTMPGradKernel : public framework::OpKernel { } int cur_batch_size = bend - bstart; - // lstm_value.output_value not used in bp, set to null - // lstm_grad.state_active_grad not used in bp, set to null - lstm_value.output_value = nullptr; - lstm_grad.state_active_grad = nullptr; + // lstmp_value.output_value not used in bp, set to null + // lstmp_grad.state_active_grad not used in bp, set to null + lstmp_value.output_value = nullptr; + lstmp_grad.state_active_grad = nullptr; math::LstmUnitGradFunctor::compute( device_ctx, lstmp_value, lstmp_grad, frame_size, cur_batch_size, From bd0d44af2409c9900706fb5eb50c2c713a7fd083 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 14 Feb 2019 17:51:34 +0800 Subject: [PATCH 025/117] fix build failed test=develop --- paddle/fluid/framework/details/all_reduce_deps_pass.cc | 2 -- paddle/fluid/framework/details/all_reduce_deps_pass.h | 2 ++ paddle/fluid/framework/parallel_executor.cc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/framework/details/all_reduce_deps_pass.cc b/paddle/fluid/framework/details/all_reduce_deps_pass.cc index b7d6edd389..2e20c436df 100644 --- a/paddle/fluid/framework/details/all_reduce_deps_pass.cc +++ b/paddle/fluid/framework/details/all_reduce_deps_pass.cc @@ -30,8 +30,6 @@ namespace paddle { namespace framework { namespace details { -static constexpr char kAllOpDescs[] = "all_op_descs"; - VarHandle* GetValidInput(const OpHandleBase* a) { for (auto p : a->Inputs()) { VarHandle* b = dynamic_cast(p); diff --git a/paddle/fluid/framework/details/all_reduce_deps_pass.h b/paddle/fluid/framework/details/all_reduce_deps_pass.h index e8b9108981..1637c7a7a6 100644 --- a/paddle/fluid/framework/details/all_reduce_deps_pass.h +++ b/paddle/fluid/framework/details/all_reduce_deps_pass.h @@ -21,6 +21,8 @@ namespace paddle { namespace framework { namespace details { +constexpr char kAllOpDescs[] = "all_op_descs"; + // TODO(gongwb): overlap allreduce with backward computation. class AllReduceDepsPass : public ir::Pass { protected: diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index dca1a4e530..21f2e1ee3e 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -21,12 +21,12 @@ limitations under the License. */ #include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/details/all_reduce_deps_pass.h" #include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" #include "paddle/fluid/framework/details/parallel_ssa_graph_executor.h" #include "paddle/fluid/framework/details/reference_count_pass_helper.h" #include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h" -#include "paddle/fluid/framework/details/sequential_execution_pass.h" #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" #include "paddle/fluid/platform/profiler.h" From 7cd6de37f57d05c967d829844bc819dd69ce278b Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 14 Feb 2019 18:29:12 +0800 Subject: [PATCH 026/117] fix cpu test=develop --- .../fluid/framework/details/parallel_ssa_graph_executor.cc | 4 ---- paddle/fluid/framework/parallel_executor.cc | 5 +++++ 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc index 77a3318ff9..3433c3424e 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc @@ -36,7 +36,6 @@ std::vector> SeparateMultiDevicesGraph( for (auto &op : graph->Get(kGraphOps)) { auto &dev_ctx = op->DeviceContext(); auto &p = dev_ctx.begin()->first; -#ifdef PADDLE_WITH_CUDA int dev_id = boost::get(p).device; auto &dev_ops = graphs[dev_id]->Get(kGraphOps); auto &dev_dummys = graphs[dev_id]->Get(kGraphDepVars); @@ -59,9 +58,6 @@ std::vector> SeparateMultiDevicesGraph( graphs[dev_id]->AddNode(graph->RemoveNode(var->Node()).release()); } } -#else - PADDLE_THROW("Parallel Graph Execution only support CUDAPlace."); -#endif } for (size_t dev_id = 0; dev_id < places.size(); ++dev_id) { diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 21f2e1ee3e..dbe1bf9b29 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -304,6 +304,7 @@ ParallelExecutor::ParallelExecutor( } if (build_strategy.enable_parallel_graph_) { +#ifdef PADDLE_WITH_CUDA auto parallel_graph = details::SeparateMultiDevicesGraph(member_->places_, std::move(graph)); auto seq_allreduce_pass = @@ -319,6 +320,10 @@ ParallelExecutor::ParallelExecutor( member_->executor_.reset(new details::ParallelSSAGraphExecutor( exec_strategy, member_->local_scopes_, member_->places_, std::move(parallel_graph))); +#else + PADDLE_THROW( + "Paddle should be compiled with CUDA for ParallelGraph Execution."); +#endif } else { if (exec_strategy.type_ == ExecutionStrategy::kDefault) { member_->executor_.reset(new details::ThreadedSSAGraphExecutor( From fe7ffedc1a45a29e02ee259ba7a1781f3a2903d0 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Thu, 14 Feb 2019 12:02:53 +0000 Subject: [PATCH 027/117] test=develop, update protobuf --- cmake/external/protobuf.cmake | 4 ++-- python/requirements.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index e05b7694dd..3da3f10d7c 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -203,7 +203,7 @@ FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST) ENDIF() SET(PROTOBUF_REPO "https://github.com/google/protobuf.git") - SET(PROTOBUF_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546") + SET(PROTOBUF_TAG "v3.6.1") ExternalProject_Add( ${TARGET_NAME} @@ -231,7 +231,7 @@ FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST) ) ENDFUNCTION() -SET(PROTOBUF_VERSION 3.1) +SET(PROTOBUF_VERSION 3.6.1) IF(NOT PROTOBUF_FOUND) build_protobuf(extern_protobuf FALSE) diff --git a/python/requirements.txt b/python/requirements.txt index 5a70f1aa3f..6cbda1db54 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,6 +1,6 @@ requests==2.9.2 numpy>=1.12 -protobuf==3.1 +protobuf>=3.6 recordio>=0.1.0 matplotlib==2.2.3 # TODO: let python3 paddlepaddle package use latest matplotlib rarfile From 15da2f9a0d555edbddacb3e5f4c747f1059602df Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Wed, 13 Feb 2019 14:00:31 +0000 Subject: [PATCH 028/117] add embseqpool jitkernel refer code, test and benchmark test=develop --- paddle/fluid/operators/jit/benchmark.cc | 36 ++++++++++ paddle/fluid/operators/jit/helper.cc | 1 + paddle/fluid/operators/jit/helper.h | 9 +++ paddle/fluid/operators/jit/kernel_base.h | 66 +++++++++++++------ paddle/fluid/operators/jit/kernel_key.cc | 5 ++ .../fluid/operators/jit/refer/CMakeLists.txt | 1 + paddle/fluid/operators/jit/refer/refer.cc | 2 + paddle/fluid/operators/jit/refer/refer.h | 34 ++++++++++ paddle/fluid/operators/jit/test.cc | 65 ++++++++++++++++++ 9 files changed, 200 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/operators/jit/benchmark.cc b/paddle/fluid/operators/jit/benchmark.cc index 97ddf223ae..9831b6ef92 100644 --- a/paddle/fluid/operators/jit/benchmark.cc +++ b/paddle/fluid/operators/jit/benchmark.cc @@ -301,6 +301,37 @@ void BenchSeqPoolKernel() { } } +template +void BenchEmbSeqPoolKernel() { + std::vector pool_types = {jit::SeqPoolType::kSum}; + int64_t tbl_h = 1e4; + for (int tbl_w : {10, 16, 256}) { + Tensor table; + table.Resize({tbl_h, tbl_w}); + RandomVec(tbl_h * tbl_w, table.mutable_data(PlaceType()), -2.f, 2.f); + const T* table_data = table.data(); + for (auto type : pool_types) { + for (int idx_w : {1, 2, 10, 16}) { + for (int idx_h : {1, 2, 10, 16}) { + int64_t out_w = tbl_w * idx_w; + jit::emb_seq_pool_attr_t attr(tbl_h, tbl_w, idx_h, idx_w, out_w, + type); + Tensor idx, out; + idx.Resize({idx_h, idx_w}); + out.Resize({out_w}); + RandomVec(idx_h * idx_w, + idx.mutable_data(PlaceType()), 0, + tbl_h - 1); + const int64_t* idx_data = idx.data(); + T* o_data = out.mutable_data(PlaceType()); + BenchAllImpls, PlaceType>( + attr, table_data, idx_data, o_data, &attr); + } + } + } + } +} + template void BenchMatMulKernel() { for (int m : {1, 2, 3, 4}) { @@ -376,6 +407,11 @@ BENCH_FP32_CPU(kGRUHtPart2) { BenchGRUKernel(); } // seq pool function BENCH_FP32_CPU(kSeqPool) { BenchSeqPoolKernel(); } +// embedding seq pool function +BENCH_FP32_CPU(kEmbSeqPool) { + BenchEmbSeqPoolKernel(); +} + // matmul BENCH_FP32_CPU(kMatMul) { BenchMatMulKernel(); } diff --git a/paddle/fluid/operators/jit/helper.cc b/paddle/fluid/operators/jit/helper.cc index e7292fe2bd..a766536132 100644 --- a/paddle/fluid/operators/jit/helper.cc +++ b/paddle/fluid/operators/jit/helper.cc @@ -54,6 +54,7 @@ const char* to_string(KernelType kt) { ONE_CASE(kHMax); ONE_CASE(kHSum); ONE_CASE(kSoftmax); + ONE_CASE(kEmbSeqPool); default: PADDLE_THROW("Not support type: %d, or forget to add it.", kt); return "NOT JITKernel"; diff --git a/paddle/fluid/operators/jit/helper.h b/paddle/fluid/operators/jit/helper.h index d5773d6594..07998588a5 100644 --- a/paddle/fluid/operators/jit/helper.h +++ b/paddle/fluid/operators/jit/helper.h @@ -172,6 +172,15 @@ inline std::ostream& operator<<(std::ostream& os, const seq_pool_attr_t& attr) { return os; } +inline std::ostream& operator<<(std::ostream& os, + const emb_seq_pool_attr_t& attr) { + os << "table_height[" << attr.table_height << "],table_width[" + << attr.table_width << "],index_height[" << attr.index_height + << "],index_width[" << attr.index_width << "],output_width[" + << attr.out_width << "],pool_type[" << to_string(attr.pool_type) << "]"; + return os; +} + inline std::ostream& operator<<(std::ostream& os, const matmul_attr_t& attr) { os << "M[" << attr.m << "],N[" << attr.n << "],K[" << attr.k << "]"; return os; diff --git a/paddle/fluid/operators/jit/kernel_base.h b/paddle/fluid/operators/jit/kernel_base.h index 4a8f61146a..20b6a32bef 100644 --- a/paddle/fluid/operators/jit/kernel_base.h +++ b/paddle/fluid/operators/jit/kernel_base.h @@ -13,6 +13,7 @@ * limitations under the License. */ #pragma once +#include #include "paddle/fluid/operators/jit/macro.h" #include "paddle/fluid/platform/macros.h" @@ -20,34 +21,35 @@ namespace paddle { namespace operators { namespace jit { -// TODO(TJ): reorder by alphabet typedef enum { kNone = 0, - kVMul = 1, - kVAdd = 2, - kVAddRelu, - kVSub, - kVScal, - kVAddBias, - kVRelu, - kVIdentity, - kVSquare, - kVExp, - kVSigmoid, - kVTanh, - kLSTMCtHt, - kLSTMC1H1, + // sort by alphabet + kCRFDecoding = 1, + kEmbSeqPool = 2, kGRUH1, kGRUHtPart1, kGRUHtPart2, - kCRFDecoding, + kHSum, // horizontal max + kHMax, // horizontal sum + kLSTMCtHt, + kLSTMC1H1, kLayerNorm, + kMatMul, kNCHW16CMulNC, kSeqPool, - kMatMul, - kHSum, // horizontal max - kHMax, // horizontal sum kSoftmax, + kVAdd, + kVAddBias, + kVAddRelu, + kVExp, + kVIdentity, + kVMul, + kVRelu, + kVScal, + kVSigmoid, + kVSquare, + kVSub, + kVTanh, } KernelType; typedef enum { @@ -145,6 +147,32 @@ struct SeqPoolTuples { typedef void (*func_type)(const T*, T*, const seq_pool_attr_t*); }; +typedef struct emb_seq_pool_attr_s { + int64_t table_height, table_width; + int64_t index_height, index_width; + int64_t out_width; + SeqPoolType pool_type; + emb_seq_pool_attr_s() = default; + explicit emb_seq_pool_attr_s(int64_t tbl_height, int64_t tbl_width, + int64_t idx_height, int64_t idx_width, + int64_t output_width, + SeqPoolType seqpool_type = SeqPoolType::kSum) + : table_height(tbl_height), + table_width(tbl_width), + index_height(idx_height), + index_width(idx_width), + out_width(output_width), + pool_type(seqpool_type) {} +} emb_seq_pool_attr_t; + +template +struct EmbSeqPoolTuples { + typedef T data_type; + typedef emb_seq_pool_attr_t attr_type; + typedef void (*func_type)(const T*, const int64_t*, T*, + const emb_seq_pool_attr_t*); +}; + typedef struct matmul_attr_s { int m, n, k; void* packed_weight{nullptr}; diff --git a/paddle/fluid/operators/jit/kernel_key.cc b/paddle/fluid/operators/jit/kernel_key.cc index 1e4a8884e7..e659c6d254 100644 --- a/paddle/fluid/operators/jit/kernel_key.cc +++ b/paddle/fluid/operators/jit/kernel_key.cc @@ -56,6 +56,11 @@ size_t JitCodeKey(const matmul_attr_t& attr) { return (key << shift * 2) + ((static_cast(attr.n)) << shift) + attr.k; } +template <> +size_t JitCodeKey(const emb_seq_pool_attr_t& attr) { + return attr.table_width; +} + } // namespace jit } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/jit/refer/CMakeLists.txt b/paddle/fluid/operators/jit/refer/CMakeLists.txt index 9f2935828c..218d801c08 100644 --- a/paddle/fluid/operators/jit/refer/CMakeLists.txt +++ b/paddle/fluid/operators/jit/refer/CMakeLists.txt @@ -32,3 +32,4 @@ USE_JITKERNEL_REFER(kVSquare) USE_JITKERNEL_REFER(kHSum) USE_JITKERNEL_REFER(kHMax) USE_JITKERNEL_REFER(kSoftmax) +USE_JITKERNEL_REFER(kEmbSeqPool) diff --git a/paddle/fluid/operators/jit/refer/refer.cc b/paddle/fluid/operators/jit/refer/refer.cc index b8adb40ec7..7e7dd6960b 100644 --- a/paddle/fluid/operators/jit/refer/refer.cc +++ b/paddle/fluid/operators/jit/refer/refer.cc @@ -57,4 +57,6 @@ REGISTER_REFER_KERNEL(kHSum, HSum); REGISTER_REFER_KERNEL(kSoftmax, Softmax); +REGISTER_REFER_KERNEL(kEmbSeqPool, EmbSeqPool); + #undef REGISTER_REFER_KERNEL diff --git a/paddle/fluid/operators/jit/refer/refer.h b/paddle/fluid/operators/jit/refer/refer.h index 0c4a985f8e..fd1193aa41 100644 --- a/paddle/fluid/operators/jit/refer/refer.h +++ b/paddle/fluid/operators/jit/refer/refer.h @@ -16,6 +16,7 @@ #include #include +#include #include "paddle/fluid/operators/jit/helper.h" #include "paddle/fluid/operators/jit/kernel_base.h" #include "paddle/fluid/platform/enforce.h" @@ -414,6 +415,37 @@ void Softmax(const T* x, T* y, int n, int bs = 1) { } } +// embedding seq pool +// table is a matrix with (tbl_h, tbl_w) +// idx is a matrix with (idx_h, idx_w) +// output is a vector with length tbl_w * idx_w +template +void EmbSeqPool(const T* table, const int64_t* idx, T* out, + const emb_seq_pool_attr_t* attr) { + PADDLE_ENFORCE_EQ(attr->table_width * attr->index_width, attr->out_width); + + auto check_idx_value_valid = [&](int64_t i) { + PADDLE_ENFORCE_LT(idx[i], attr->table_height, "idx value: %d, i: %d", + idx[i], i); + PADDLE_ENFORCE_GE(idx[i], 0, "idx value: %d, i: %d", idx[i], i); + }; + + for (int64_t w = 0; w != attr->index_width; ++w) { + check_idx_value_valid(w); + std::memcpy(out + w * attr->table_width, table + idx[w] * attr->table_width, + attr->table_width * sizeof(T)); + } + + for (int64_t h = 1; h < attr->index_height; ++h) { + for (int64_t w = 0; w < attr->index_width; ++w) { + int64_t i = h * attr->index_width + w; + check_idx_value_valid(i); + VAdd(table + idx[i] * attr->table_width, out + w * attr->table_width, + out + w * attr->table_width, attr->table_width); + } + } +} + #define DECLARE_REFER_KERNEL(name, tuples) \ template \ class name##Kernel : public ReferKernel> { \ @@ -462,6 +494,8 @@ DECLARE_REFER_KERNEL(HSum, XRNTuples); DECLARE_REFER_KERNEL(Softmax, SoftmaxTuples); +DECLARE_REFER_KERNEL(EmbSeqPool, EmbSeqPoolTuples); + #undef DECLARE_REFER_KERNEL } // namespace refer diff --git a/paddle/fluid/operators/jit/test.cc b/paddle/fluid/operators/jit/test.cc index 237e588d35..c35b6aef23 100644 --- a/paddle/fluid/operators/jit/test.cc +++ b/paddle/fluid/operators/jit/test.cc @@ -270,6 +270,32 @@ struct TestFuncWithRefer, std::vector, std::vector, } }; +template +struct TestFuncWithRefer, std::vector, + std::vector, std::vector, + typename jit::EmbSeqPoolTuples::attr_type> { + void operator()(const typename jit::EmbSeqPoolTuples::func_type tgt, + const std::vector& table, const std::vector& idx, + const std::vector& oref, + const typename jit::EmbSeqPoolTuples::attr_type& attr) { + EXPECT_TRUE(tgt != nullptr); + EXPECT_EQ(table.size(), + static_cast(attr.table_height * attr.table_width)); + EXPECT_EQ(idx.size(), + static_cast(attr.index_height * attr.index_width)); + EXPECT_EQ(oref.size(), + static_cast(attr.table_width * attr.index_width)); + const T* table_data = table.data(); + const int64_t* idx_data = idx.data(); + const T* oref_data = oref.data(); + int o_w = oref.size(); + std::vector out(o_w); + T* o_data = out.data(); + tgt(table_data, idx_data, o_data, &attr); + ExpectEQ(o_data, oref_data, o_w); + } +}; + template struct TestFuncWithRefer, std::vector, std::vector, std::vector, @@ -587,6 +613,40 @@ void TestSoftmaxKernel() { } } +template +void TestEmbSeqPoolKernel() { + VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); + int64_t tbl_h = 1e4; + std::vector pool_types = { + jit::SeqPoolType::kSum}; // only support sum yet + for (int tbl_w : TestSizes()) { + std::vector table(tbl_h * tbl_w); + RandomVec(tbl_h * tbl_w, table.data(), -2.f, 2.f); + const T* table_data = table.data(); + for (auto type : pool_types) { + for (int idx_w : {1, 2, 10, 16}) { + for (int idx_h : {1, 2, 10, 16}) { + auto ref = jit::GetRefer>(); + EXPECT_TRUE(ref != nullptr); + std::vector idx(idx_h * idx_w); + RandomVec(idx_h * idx_w, idx.data(), 0, tbl_h - 1); + int64_t out_w = tbl_w * idx_w; + std::vector oref(out_w); + const int64_t* idx_data = idx.data(); + T* o_data = oref.data(); + jit::emb_seq_pool_attr_t attr(tbl_h, tbl_w, idx_h, idx_w, out_w, + type); + ref(table_data, idx_data, o_data, &attr); + + TestAllImpls, PlaceType, std::vector, + std::vector, std::vector>(attr, table, idx, + oref, attr); + } + } + } + } +} + template void TestNCHW16CMulNCKernel() { VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); @@ -756,6 +816,11 @@ TEST(JITKernel, kSoftmax) { TestSoftmaxKernel(); } +TEST(JITKernel, kEmbSeqPool) { + TestEmbSeqPoolKernel(); + TestEmbSeqPoolKernel(); +} + TEST(JITKernel, kNCHW16CMulNC) { TestNCHW16CMulNCKernel(); TestNCHW16CMulNCKernel(); From a3a3d3d8613c729dccb76aa066948c523c35c7e2 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Thu, 14 Feb 2019 14:38:41 +0000 Subject: [PATCH 029/117] add embseqpool jitkernel mkl impl and use it test=develop --- .../fused/fused_embedding_seq_pool_op.h | 41 ++++--------------- .../operators/jit/more/mkl/CMakeLists.txt | 1 + paddle/fluid/operators/jit/more/mkl/mkl.cc | 11 +++++ paddle/fluid/operators/jit/more/mkl/mkl.h | 29 +++++++++++++ 4 files changed, 50 insertions(+), 32 deletions(-) diff --git a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h index 744e83541d..92345b3c0e 100644 --- a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h +++ b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h @@ -21,6 +21,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/operators/jit/kernels.h" #include "paddle/fluid/operators/math/blas.h" namespace paddle { @@ -31,35 +32,6 @@ using LoDTensor = framework::LoDTensor; using SelectedRows = framework::SelectedRows; using DDim = framework::DDim; -template -void emb_seqpool(const framework::ExecutionContext &context, const T *table, - const int64_t *idx, T *out, int64_t table_height, - int64_t table_width, int64_t idx_height, int64_t idx_width, - int64_t out_width) { // pool type == sum - PADDLE_ENFORCE_EQ(table_width * idx_width, out_width); - - auto check_idx_value_valid = [&](int i) { - PADDLE_ENFORCE_LT(idx[i], table_height, "idx value: %d, i: %d", idx[i], i); - PADDLE_ENFORCE_GE(idx[i], 0, "idx value: %d, i: %d", idx[i], i); - }; - auto blas = math::GetBlas(context); - - for (int w = 0; w != idx_width; ++w) { - check_idx_value_valid(w); - blas.VCOPY(table_width, table + idx[w] * table_width, - out + w * table_width); - } - - for (int h = 1; h < idx_height; ++h) { - for (int w = 0; w < idx_width; ++w) { - int i = h * idx_width + w; - check_idx_value_valid(i); - blas.AXPY(table_width, static_cast(1), table + idx[i] * table_width, - out + w * table_width); - } - } -} - template struct EmbeddingVSumFunctor { void operator()(const framework::ExecutionContext &context, @@ -75,10 +47,15 @@ struct EmbeddingVSumFunctor { auto *output = output_t->mutable_data(context.GetPlace()); PADDLE_ENFORCE_LE(table_width * idx_width, out_width); + + jit::emb_seq_pool_attr_t attr(table_height, table_width, 0, idx_width, + out_width, jit::SeqPoolType::kSum); for (int64_t i = 0; i != ids_lod.size() - 1; ++i) { - emb_seqpool(context, table, ids + ids_lod[i] * idx_width, - output + i * out_width, table_height, table_width, - ids_lod[i + 1] - ids_lod[i], idx_width, out_width); + attr.index_height = ids_lod[i + 1] - ids_lod[i]; + auto emb_seqpool = jit::Get, + platform::CPUPlace>(attr); + emb_seqpool(table, ids + ids_lod[i] * idx_width, output + i * out_width, + &attr); } } }; diff --git a/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt b/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt index f9e5aea32e..d209f31007 100644 --- a/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt +++ b/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt @@ -13,3 +13,4 @@ USE_JITKERNEL_MORE(kVSigmoid, mkl) USE_JITKERNEL_MORE(kVTanh, mkl) USE_JITKERNEL_MORE(kSeqPool, mkl) USE_JITKERNEL_MORE(kSoftmax, mkl) +USE_JITKERNEL_MORE(kEmbSeqPool, mkl) diff --git a/paddle/fluid/operators/jit/more/mkl/mkl.cc b/paddle/fluid/operators/jit/more/mkl/mkl.cc index 4c999131ab..29a451f832 100644 --- a/paddle/fluid/operators/jit/more/mkl/mkl.cc +++ b/paddle/fluid/operators/jit/more/mkl/mkl.cc @@ -174,6 +174,16 @@ bool SeqPoolKernel::UseMe(const seq_pool_attr_t& attr) const { return true; } +template <> +bool EmbSeqPoolKernel::UseMe(const emb_seq_pool_attr_t& attr) const { + return true; +} + +template <> +bool EmbSeqPoolKernel::UseMe(const emb_seq_pool_attr_t& attr) const { + return true; +} + template <> bool MatMulKernel::UseMe(const matmul_attr_t& attr) const { return platform::MayIUse(platform::avx); @@ -227,6 +237,7 @@ REGISTER_MKL_KERNEL(kVSquare, VSquare); REGISTER_MKL_KERNEL(kVSigmoid, VSigmoid); REGISTER_MKL_KERNEL(kVTanh, VTanh); REGISTER_MKL_KERNEL(kSeqPool, SeqPool); +REGISTER_MKL_KERNEL(kEmbSeqPool, EmbSeqPool); REGISTER_MKL_KERNEL(kSoftmax, Softmax); #undef REGISTER_MKL_KERNEL diff --git a/paddle/fluid/operators/jit/more/mkl/mkl.h b/paddle/fluid/operators/jit/more/mkl/mkl.h index 8130b87326..9a72ba8302 100644 --- a/paddle/fluid/operators/jit/more/mkl/mkl.h +++ b/paddle/fluid/operators/jit/more/mkl/mkl.h @@ -18,6 +18,7 @@ #include #include #include "paddle/fluid/operators/jit/kernel_base.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace operators { @@ -91,6 +92,32 @@ void SeqPool(const T* x, T* y, const seq_pool_attr_t* attr) { } } +template +void EmbSeqPool(const T* table, const int64_t* idx, T* out, + const emb_seq_pool_attr_t* attr) { + PADDLE_ENFORCE_EQ(attr->table_width * attr->index_width, attr->out_width); + auto check_idx_value_valid = [&](int64_t i) { + PADDLE_ENFORCE_LT(idx[i], attr->table_height, "idx value: %d, i: %d", + idx[i], i); + PADDLE_ENFORCE_GE(idx[i], 0, "idx value: %d, i: %d", idx[i], i); + }; + + for (int64_t w = 0; w != attr->index_width; ++w) { + check_idx_value_valid(w); + VCopy(table + idx[w] * attr->table_width, out + w * attr->table_width, + attr->table_width); + } + + for (int64_t h = 1; h < attr->index_height; ++h) { + for (int64_t w = 0; w < attr->index_width; ++w) { + int64_t i = h * attr->index_width + w; + check_idx_value_valid(i); + VAXPY(static_cast(1), table + idx[i] * attr->table_width, + out + w * attr->table_width, attr->table_width); + } + } +} + template void ASum(const T* x, T* res, int n); @@ -142,6 +169,8 @@ DECLARE_MKL_KERNEL(VSquare, XYNTuples); DECLARE_MKL_KERNEL(SeqPool, SeqPoolTuples); +DECLARE_MKL_KERNEL(EmbSeqPool, EmbSeqPoolTuples); + DECLARE_MKL_KERNEL(Softmax, SoftmaxTuples); #undef DECLARE_MKL_KERNEL From b6085526f34db0bb447c8b43c6b04ab49ac7bdfa Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Fri, 15 Feb 2019 08:07:04 +0000 Subject: [PATCH 030/117] test=develop, update protobuf in Dockerfile used by CI --- tools/manylinux1/Dockerfile.x64 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/manylinux1/Dockerfile.x64 b/tools/manylinux1/Dockerfile.x64 index 48fd145e5f..c2fd743f62 100644 --- a/tools/manylinux1/Dockerfile.x64 +++ b/tools/manylinux1/Dockerfile.x64 @@ -31,10 +31,10 @@ RUN wget --no-check-certificate -qO- https://storage.googleapis.com/golang/go1.8 ENV GOROOT=/usr/local/go GOPATH=/root/gopath ENV PATH=${GOROOT}/bin:${GOPATH}/bin:${PATH} -# protobuf 3.1.0 -RUN cd /opt && wget -q --no-check-certificate https://github.com/google/protobuf/releases/download/v3.1.0/protobuf-cpp-3.1.0.tar.gz && \ - tar xzf protobuf-cpp-3.1.0.tar.gz && \ - cd protobuf-3.1.0 && ./configure && make -j4 && make install && cd .. && rm -f protobuf-cpp-3.1.0.tar.gz +# protobuf 3.6.1 +RUN cd /opt && wget -q --no-check-certificate https://github.com/google/protobuf/releases/download/v3.6.1/protobuf-cpp-3.6.1.tar.gz && \ + tar xzf protobuf-cpp-3.6.1.tar.gz && \ + cd protobuf-3.6.1 && ./configure && make -j4 && make install && cd .. && rm -f protobuf-cpp-3.6.1.tar.gz RUN wget https://raw.githubusercontent.com/PaddlePaddle/Paddle/develop/python/requirements.txt -O /root/requirements.txt From 48cf979a2138a3267224a1d86c65cd1db62068c3 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Fri, 15 Feb 2019 09:49:58 +0000 Subject: [PATCH 031/117] test=develop, install requirements before start for Linux --- cmake/external/python.cmake | 4 ++-- paddle/scripts/paddle_build.sh | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index 623c53f4f7..351e7fa3ce 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -74,8 +74,8 @@ IF(PYTHONINTERP_FOUND) find_python_module(wheel REQUIRED) find_python_module(google.protobuf REQUIRED) FIND_PACKAGE(NumPy REQUIRED) - IF(${PY_GOOGLE.PROTOBUF_VERSION} AND ${PY_GOOGLE.PROTOBUF_VERSION} VERSION_LESS "3.0.0") - MESSAGE(FATAL_ERROR "Found Python Protobuf ${PY_GOOGLE.PROTOBUF_VERSION} < 3.0.0, " + IF(${PY_GOOGLE.PROTOBUF_VERSION} AND ${PY_GOOGLE.PROTOBUF_VERSION} VERSION_LESS "3.6.1") + MESSAGE(FATAL_ERROR "Found Python Protobuf ${PY_GOOGLE.PROTOBUF_VERSION} < 3.6.1, " "please use pip to upgrade protobuf. pip install -U protobuf") ENDIF() ENDIF(PYTHONINTERP_FOUND) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 1135caf4f8..bb24ada223 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -128,30 +128,35 @@ function cmake_gen() { PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27m/bin/python -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27m/include/python2.7 -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs2/lib/libpython2.7.so" + pip install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp27-cp27mu" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs2/lib:} export PATH=/opt/python/cp27-cp27mu/bin/:${PATH} PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27mu/bin/python -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27mu/include/python2.7 -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs4/lib/libpython2.7.so" + pip install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp35-cp35m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-3.5.1/lib/:${LD_LIBRARY_PATH} export PATH=/opt/_internal/cpython-3.5.1/bin/:${PATH} export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.5.1/bin/python3 -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.5.1/include/python3.5m -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.5.1/lib/libpython3.so" + pip3.5 install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp36-cp36m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-3.6.0/lib/:${LD_LIBRARY_PATH} export PATH=/opt/_internal/cpython-3.6.0/bin/:${PATH} export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.6.0/bin/python3 -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.6.0/include/python3.6m -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.6.0/lib/libpython3.so" + pip3.6 install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp37-cp37m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-3.7.0/lib/:${LD_LIBRARY_PATH} export PATH=/opt/_internal/cpython-3.7.0/bin/:${PATH} export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.7.0/bin/python3.7 -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.7.0/include/python3.7m -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.7.0/lib/libpython3.so" + pip3.7 install -r ${PADDLE_ROOT}/python/requirements.txt fi fi fi From e5d3d7c63d6c536b72210a4e4d1e3ae437d4c1cb Mon Sep 17 00:00:00 2001 From: "Zhang, Guoming" Date: Sat, 16 Feb 2019 00:07:37 +0800 Subject: [PATCH 032/117] resolve #15724 1.Remove the code for setting mkldnn environment in the test_calibration.py; 2.Update the cmake file for MKLDNN environment enabling; 3.Update the INT8 inference doc. test=develop --- python/paddle/fluid/contrib/int8_inference/README.md | 4 ++-- python/paddle/fluid/contrib/tests/CMakeLists.txt | 6 +++++- python/paddle/fluid/contrib/tests/test_calibration.py | 4 ---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/python/paddle/fluid/contrib/int8_inference/README.md b/python/paddle/fluid/contrib/int8_inference/README.md index a9691dad44..460ae393f1 100644 --- a/python/paddle/fluid/contrib/int8_inference/README.md +++ b/python/paddle/fluid/contrib/int8_inference/README.md @@ -63,10 +63,10 @@ Notes: ## 4. How to reproduce the results * Small dataset ```bash -python python/paddle/fluid/contrib/tests/test_calibration.py +FLAGS_use_mkldnn=true python python/paddle/fluid/contrib/tests/test_calibration.py ``` * Full dataset ```bash -DATASET=full python python/paddle/fluid/contrib/tests/test_calibration.py +FLAGS_use_mkldnn=true DATASET=full python python/paddle/fluid/contrib/tests/test_calibration.py ``` diff --git a/python/paddle/fluid/contrib/tests/CMakeLists.txt b/python/paddle/fluid/contrib/tests/CMakeLists.txt index 81aee1233d..a2c5941646 100644 --- a/python/paddle/fluid/contrib/tests/CMakeLists.txt +++ b/python/paddle/fluid/contrib/tests/CMakeLists.txt @@ -6,5 +6,9 @@ if(APPLE OR WIN32 OR NOT WITH_MKL) endif() foreach(src ${TEST_OPS}) - py_test(${src} SRCS ${src}.py) + if(src MATCHES "test_calibration") + py_test(${src} SRCS ${src}.py ENVS FLAGS_use_mkldnn=true) + else() + py_test(${src} SRCS ${src}.py) + endif() endforeach() diff --git a/python/paddle/fluid/contrib/tests/test_calibration.py b/python/paddle/fluid/contrib/tests/test_calibration.py index 424ea245a0..b9f938bebe 100644 --- a/python/paddle/fluid/contrib/tests/test_calibration.py +++ b/python/paddle/fluid/contrib/tests/test_calibration.py @@ -199,7 +199,6 @@ class TestCalibrationForResnet50(unittest.TestCase): def run_program(self, model_path, generate_int8=False, algo='direct'): image_shape = [3, 224, 224] - os.environ['FLAGS_use_mkldnn'] = 'True' fluid.memory_optimize(fluid.default_main_program()) @@ -241,9 +240,6 @@ class TestCalibrationForResnet50(unittest.TestCase): label = label.reshape([-1, 1]) running_program = calibrator.sampling_program.clone( ) if generate_int8 else infer_program.clone() - for op in running_program.current_block().ops: - if op.has_attr("use_mkldnn"): - op._set_attr("use_mkldnn", True) t1 = time.time() _, acc1, _ = exe.run( From 1e46ab2e3ebbee882aa229dd0a8793415e18f3f3 Mon Sep 17 00:00:00 2001 From: chengduozh Date: Fri, 15 Feb 2019 18:57:21 +0800 Subject: [PATCH 033/117] follow comment test=develop --- python/paddle/fluid/layers/nn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index f4c4fc3b65..3183a49794 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -5938,7 +5938,7 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): inplace(bool): If ``inplace`` is `True`, the input and output of ``layers.reshape`` are the same variable, otherwise, the input and output of ``layers.reshape`` are different variables. Note that if :attr:`x` - is more than one layers' input, ``inplace`` must be :attr:`False`. + is more than one layer's input, ``inplace`` must be :attr:`False`. name (str): The name of this layer. It is optional. Returns: From d376cf71b743b65dd4fc21edd3a634f69148a3eb Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 00:13:16 +0800 Subject: [PATCH 034/117] polish code for reading. test=develop --- .../fluid/framework/details/build_strategy.cc | 2 + .../details/memory_optimize_helper.cc | 15 ++++-- .../details/memory_optimize_helper.h | 1 + .../details/memory_optimize_helper_test.cc | 46 +++++++++++++++++++ .../framework/details/memory_optimize_pass.cc | 38 ++++++++------- .../unittests/parallel_executor_test_base.py | 2 +- .../test_ir_memory_optimize_transformer.py | 46 +++++++++++++++++++ 7 files changed, 128 insertions(+), 22 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index f8030c53f7..0c823b9ca2 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -240,7 +240,9 @@ std::unique_ptr BuildStrategy::Apply( continue; } } + VLOG(3) << "Start Apply Pass " << pass->Type(); graph = pass->Apply(std::move(graph)); + VLOG(3) << "Finish Apply Pass " << pass->Type(); } return graph; } diff --git a/paddle/fluid/framework/details/memory_optimize_helper.cc b/paddle/fluid/framework/details/memory_optimize_helper.cc index ef2b4131bf..33c2186067 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.cc +++ b/paddle/fluid/framework/details/memory_optimize_helper.cc @@ -268,10 +268,15 @@ bool OrderedSet::Has(ir::Node* var) const { return false; } +void OrderedSet::Erase(const std::string& var) { + PADDLE_ENFORCE(mark_table_.count(var)); + nodes_.erase(mark_table_[var]); + mark_table_.erase(var); +} + void OrderedSet::Erase(ir::Node* var) { - PADDLE_ENFORCE(mark_table_.count(var->Name())); - nodes_.erase(mark_table_[var->Name()]); - mark_table_.erase(var->Name()); + PADDLE_ENFORCE(var != nullptr); + Erase(var->Name()); } std::string OrderedSet::ToString() const { @@ -509,7 +514,9 @@ ir::Node* ControlFlowGraph::GetNodeByName(const std::string& name, for (auto* node : ops_) { if (node == op) break; for (auto& output : node->outputs) { - if (output->Name() == name) { + PADDLE_ENFORCE((output != nullptr && output->IsVar()), + "Output is empty!"); + if (output->Var() && output->Name() == name) { found_node = output; } } diff --git a/paddle/fluid/framework/details/memory_optimize_helper.h b/paddle/fluid/framework/details/memory_optimize_helper.h index e17030b2ab..dba96309fd 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.h +++ b/paddle/fluid/framework/details/memory_optimize_helper.h @@ -55,6 +55,7 @@ class OrderedSet { void Insert(ir::Node* var); void Erase(ir::Node* var); + void Erase(const std::string& var); bool Has(ir::Node* var) const; void Clear() { mark_table_.clear(); diff --git a/paddle/fluid/framework/details/memory_optimize_helper_test.cc b/paddle/fluid/framework/details/memory_optimize_helper_test.cc index 5c13dda9e5..3cfe297a73 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper_test.cc +++ b/paddle/fluid/framework/details/memory_optimize_helper_test.cc @@ -107,6 +107,52 @@ TEST(OrderedSet, Normal) { ASSERT_EQ(pool.GetNodeIndexInPool(cache), 5); // match 4:[5,2] } } + +TEST(OrderedSet, FindBestFitNode) { + OrderedSet pool; + std::vector> nodes; + ProgramDesc prog; + BlockDesc* block_desc = prog.MutableBlock(0); + auto* op_desc = block_desc->AppendOp(); + op_desc->SetType("dummy"); + std::unique_ptr op = ir::CreateNodeForTest(op_desc); + + { + auto desc = block_desc->Var("a"); + desc->SetShape({128, 128}); + std::unique_ptr node = ir::CreateNodeForTest(desc); + node->inputs.emplace_back(op.get()); + nodes.emplace_back(std::move(node)); + } + { + auto desc = block_desc->Var("b"); + desc->SetShape({128, 129}); + std::unique_ptr node = ir::CreateNodeForTest(desc); + node->inputs.emplace_back(op.get()); + nodes.emplace_back(std::move(node)); + } + { + auto desc = block_desc->Var("c"); + desc->SetShape({128, 128}); + std::unique_ptr node = ir::CreateNodeForTest(desc); + node->inputs.emplace_back(op.get()); + nodes.emplace_back(std::move(node)); + } + + for (auto& node : nodes) { + pool.Insert(node.get()); + } + + // FindNextBestFitNode + auto* n = nodes[0].get(); + auto* cache = pool.FindBestFitNode(n); + PADDLE_ENFORCE(cache->Name() == "a"); + cache = pool.FindNextBestFitNode(n, cache); + PADDLE_ENFORCE(cache->Name() == "c"); + cache = pool.FindNextBestFitNode(n, cache); + PADDLE_ENFORCE(cache->Name() == "b"); +} + } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index 2f9e2e662b..c426059a6a 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -69,7 +69,7 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( } for (auto& var : op->outputs) { - if (skip_set_.count(var->Name())) { + if (var->IsVar() && !var->IsCtrlVar() && skip_set_.count(var->Name())) { VLOG(3) << "Skip set contains variable of " << var->Name() << "disable reuse on it. skipped"; continue; @@ -77,8 +77,8 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( if (NodeCanReused(var) && cfg_->Use(op).count(var->Name()) == 0) { ir::Node* cache = pool_.FindBestFitNode(var); while (cache != nullptr && var->Name() == cache->Name()) { - VLOG(3) << "The same cache variable is cascade reused." << var->Name() - << " is re-filled to the pool after" + VLOG(3) << "The same cache variable is cascade reused. " + << var->Name() << " is re-filled to the pool after" << "the reused op is finished. Current op can not " << "replace it again. Skip this candidate."; cache = pool_.FindNextBestFitNode(var, cache); @@ -107,11 +107,13 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( // // CFG Graph store the liveness information, when reuse happens // we also need to update the variable liveness. - cfg_->RenameVarInCFGGraph(var->Name(), cache->Name(), idx); - RenameVarInGraphDesc(var->Name(), cache->Name(), idx); - RenameVarInGraphNode(var->Name(), cache->Name(), idx, graph.get()); + const std::string var_name = var->Name(); + const std::string cache_name = cache->Name(); - pool_.Erase(cache); + cfg_->RenameVarInCFGGraph(var_name, cache_name, idx); + RenameVarInGraphDesc(var_name, cache_name, idx); + RenameVarInGraphNode(var_name, cache_name, idx, graph.get()); + pool_.Erase(cache_name); } } } @@ -119,7 +121,7 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( for (auto var : cfg_->LiveIn(op)) { if (cfg_->LiveOut(op).count(var) == 0) { ir::Node* var_node = cfg_->GetNodeByName(var, op); - if (var_node == nullptr) continue; + if (var_node == nullptr || var_node->IsCtrlVar()) continue; if (NodeCanReused(var_node) && !pool_.Has(var_node)) { pool_.Insert(var_node); } @@ -275,8 +277,7 @@ void MemoryOptimizePass::RenameVarInGraphNode(const std::string& var, // redirect the input to the latest version of cache_var for (auto* node : op->inputs) { if (node->Name() == var) { - ir::Node* cache_node = graph->CreateVarNode(var_desc.get()); - var_nodes_[cache_var].emplace_back(cache_node); + ir::Node* cache_node = var_nodes_[cache_var].back(); // swap node to cache_node cache_node->outputs.insert(cache_node->outputs.end(), @@ -285,11 +286,15 @@ void MemoryOptimizePass::RenameVarInGraphNode(const std::string& var, auto* prev_op = node->inputs[0]; std::replace(prev_op->outputs.begin(), prev_op->outputs.end(), node, cache_node); - cache_node->inputs.emplace_back(prev_op); for (auto* next_op : node->outputs) { std::replace(next_op->inputs.begin(), next_op->inputs.end(), node, cache_node); } + + // erase unused node + auto& nodes = var_nodes_.at(var); + nodes.erase(std::remove(nodes.begin(), nodes.end(), node), nodes.end()); + graph->RemoveNode(node); } } @@ -309,15 +314,14 @@ void MemoryOptimizePass::RenameVarInGraphNode(const std::string& var, std::replace(next_op->inputs.begin(), next_op->inputs.end(), node, cache_node); } + + // erase unused node + auto& nodes = var_nodes_.at(var); + nodes.erase(std::remove(nodes.begin(), nodes.end(), node), nodes.end()); + graph->RemoveNode(node); } } } - - // release node of unused var in graph - for (auto* node : var_nodes_[var]) { - graph->RemoveNode(node); - } - var_nodes_.at(var).clear(); } } // namespace details diff --git a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py index c429c8af7d..a94487e67d 100644 --- a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py +++ b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py @@ -79,7 +79,7 @@ class TestParallelExecutorBase(unittest.TestCase): if use_reduce else fluid.BuildStrategy.ReduceStrategy.AllReduce build_strategy.fuse_elewise_add_act_ops = fuse_elewise_add_act_ops build_strategy.fuse_relu_depthwise_conv = fuse_relu_depthwise_conv - build_strategy.memory_optimize = use_ir_memory_optimize + build_strategy.memory_optimize = False if memory_opt else use_ir_memory_optimize # python memory optimization is conflict with inplace pass. # Use ir graph memory optimization after inplace pass is the correct way. build_strategy.enable_inplace = False if memory_opt else enable_inplace diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py new file mode 100644 index 0000000000..d34ce44d7c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py @@ -0,0 +1,46 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest +os.environ['FLAGS_eager_delete_tensor_gb'] = "0.0" +os.environ['FLAGS_fast_eager_deletion_mode'] = True + +os.environ[ + 'RECORDIO_FILENAME'] = '/tmp/ir_memory_optimize_transformer.wmt16.recordio' + +from test_parallel_executor_transformer import TestTransformer + + +# NOTE(dzhwinter): test diferent strategy colisions. +# open the eager delete tensor strategy by default. +class TestTransformerWithIR(TestTransformer): + def test_main(self): + if core.is_compiled_with_cuda(): + # check python transpiler + self.check_network_convergence( + transformer, + use_cuda=True, + memory_opt=True, + use_ir_memory_optimize=False) + # check IR memory optimize + self.check_network_convergence( + transformer, + use_cuda=True, + memory_opt=False, + use_ir_memory_optimize=True) + + +if __name__ == '__main__': + unittest.main() From d0a2a202d03d79daad60ac82dde5de74f72368f1 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 00:33:50 +0800 Subject: [PATCH 035/117] polish code for reading. test=develop --- .../tests/unittests/test_ir_memory_optimize_transformer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py index d34ce44d7c..f32e1161ad 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py @@ -14,9 +14,10 @@ import os import unittest -os.environ['FLAGS_eager_delete_tensor_gb'] = "0.0" -os.environ['FLAGS_fast_eager_deletion_mode'] = True +import paddle.fluid as fluid +import paddle.fluid.core as core +os.environ['FLAGS_eager_delete_tensor_gb'] = "0.0" os.environ[ 'RECORDIO_FILENAME'] = '/tmp/ir_memory_optimize_transformer.wmt16.recordio' From 6deac40724995e04039f1fda19b7ea037bf1597c Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 00:41:26 +0800 Subject: [PATCH 036/117] polish code for reading. test=develop --- .../fluid/tests/unittests/test_ir_memory_optimize_transformer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py index f32e1161ad..c0f480e34d 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_transformer.py @@ -22,6 +22,7 @@ os.environ[ 'RECORDIO_FILENAME'] = '/tmp/ir_memory_optimize_transformer.wmt16.recordio' from test_parallel_executor_transformer import TestTransformer +from test_parallel_executor_transformer import transformer # NOTE(dzhwinter): test diferent strategy colisions. From 3787e61fcaada5f5ac36fe17bf504cbda1cdfa0b Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 09:34:55 +0800 Subject: [PATCH 037/117] polish code for reading. test=develop --- paddle/fluid/framework/details/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index 6b1957ae59..dc308fd259 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -53,7 +53,7 @@ cc_library(fuse_vars_op_handle SRCS fuse_vars_op_handle.cc DEPS op_handle_base s if(WITH_GPU) cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper gpu_info) else() -nv_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper cpu_info) +cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper cpu_info) endif() cc_library(memory_optimize_pass SRCS memory_optimize_pass.cc DEPS memory_optimize_helper pass) From 684b572307ccbcbc038c175fda038ab5607c6c1f Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 11:14:42 +0800 Subject: [PATCH 038/117] polish code for reading. test=develop --- .../details/memory_optimize_helper.cc | 5 +++ .../framework/inplace_op_inference_test.cc | 32 +++++++++---------- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/paddle/fluid/framework/details/memory_optimize_helper.cc b/paddle/fluid/framework/details/memory_optimize_helper.cc index 33c2186067..6126c168cc 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.cc +++ b/paddle/fluid/framework/details/memory_optimize_helper.cc @@ -172,6 +172,11 @@ struct NodeComparator { bool operator()(ir::Node* lhs, ir::Node* rhs) const { auto* lhs_desc = FindVarDescInBlock(lhs); auto* rhs_desc = FindVarDescInBlock(rhs); + // match data type + if (lhs_desc->GetDataType() != rhs_desc->GetDataType()) { + return false; + } + // match shape auto lhs_shape = lhs_desc->GetShape(); auto rhs_shape = rhs_desc->GetShape(); if ((lhs_shape[0] == -1 && rhs_shape[0] == -1) || diff --git a/paddle/fluid/framework/inplace_op_inference_test.cc b/paddle/fluid/framework/inplace_op_inference_test.cc index 3e4d715c6f..bf9d1dcd38 100644 --- a/paddle/fluid/framework/inplace_op_inference_test.cc +++ b/paddle/fluid/framework/inplace_op_inference_test.cc @@ -179,11 +179,11 @@ TEST(InferInplace, SingleOpInplaceInToOut) { op->SetOutput("Out", {"test2_out"}); prog.MutableBlock(0)->Var("test2_a")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 64}); + prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 64, 128, 128}); prog.MutableBlock(0)->Var("test2_b")->SetType(proto::VarType::LOD_TENSOR); prog.MutableBlock(0)->Var("test2_c")->SetType(proto::VarType::LOD_TENSOR); prog.MutableBlock(0)->Var("test2_out"); - prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16}); + prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16, 128, 128}); auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; auto in_to_outs = infer_inplace(*op, op->Block()); @@ -201,11 +201,11 @@ TEST(InferInplace, SingleGradOpInplaceInToOut) { op->SetOutput(GradVarName("X"), {"test2_a", "test2_b", "test2_c"}); prog.MutableBlock(0)->Var("test2_a")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 16}); + prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 16, 1024, 1024}); prog.MutableBlock(0)->Var("test2_b")->SetType(proto::VarType::LOD_TENSOR); prog.MutableBlock(0)->Var("test2_c")->SetType(proto::VarType::LOD_TENSOR); prog.MutableBlock(0)->Var("test2_out"); - prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16}); + prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16, 1024, 1024}); auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; auto in_to_outs = infer_inplace(*op, op->Block()); @@ -233,12 +233,12 @@ TEST(InferInplace, MultiOutInplaceInToOut) { prog.MutableBlock(0)->Var("o0"); prog.MutableBlock(0)->Var("y0"); prog.MutableBlock(0)->Var("z0"); - prog.MutableBlock(0)->Var("a0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("b0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("c0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("o0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("y0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("z0")->SetShape({32, 16}); + prog.MutableBlock(0)->Var("a0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("b0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("c0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("o0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("y0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("z0")->SetShape({32, 16, 1024, 1024}); auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; auto in_to_outs = infer_inplace(*op, op->Block()); @@ -267,12 +267,12 @@ TEST(InferInplace, MultiGradInplaceInToOut) { prog.MutableBlock(0)->Var("o0"); prog.MutableBlock(0)->Var("y0"); prog.MutableBlock(0)->Var("z0"); - prog.MutableBlock(0)->Var("a0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("b0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("c0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("o0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("y0")->SetShape({32, 16}); - prog.MutableBlock(0)->Var("z0")->SetShape({32, 16}); + prog.MutableBlock(0)->Var("a0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("b0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("c0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("o0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("y0")->SetShape({32, 16, 1024, 1024}); + prog.MutableBlock(0)->Var("z0")->SetShape({32, 16, 1024, 1024}); auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; auto in_to_outs = infer_inplace(*op, op->Block()); From c2a5d97172ddff73fa1f634ecaf733ee89a7c63e Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Mon, 18 Feb 2019 03:20:55 +0000 Subject: [PATCH 039/117] test=develop, uninstall protobuf on linux brefore install latest version of it --- paddle/scripts/paddle_build.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index bb24ada223..dbae55db56 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -88,6 +88,7 @@ function cmake_gen() { -DPYTHON_INCLUDE_DIR:PATH=/Library/Frameworks/Python.framework/Versions/3.5/include/python3.5m/ -DPYTHON_LIBRARY:FILEPATH=/Library/Frameworks/Python.framework/Versions/3.5/lib/libpython3.5m.dylib" WITH_FLUID_ONLY=${WITH_FLUID_ONLY:-ON} + pip3.5 uninstall -y protobuf pip3.5 install --user -r ${PADDLE_ROOT}/python/requirements.txt else exit 1 @@ -101,6 +102,7 @@ function cmake_gen() { -DPYTHON_INCLUDE_DIR:PATH=/Library/Frameworks/Python.framework/Versions/3.6/include/python3.6m/ -DPYTHON_LIBRARY:FILEPATH=/Library/Frameworks/Python.framework/Versions/3.6/lib/libpython3.6m.dylib" WITH_FLUID_ONLY=${WITH_FLUID_ONLY:-ON} + pip3.6 uninstall -y protobuf pip3.6 install --user -r ${PADDLE_ROOT}/python/requirements.txt else exit 1 @@ -114,6 +116,7 @@ function cmake_gen() { -DPYTHON_INCLUDE_DIR:PATH=/Library/Frameworks/Python.framework/Versions/3.7/include/python3.7m/ -DPYTHON_LIBRARY:FILEPATH=/Library/Frameworks/Python.framework/Versions/3.7/lib/libpython3.7m.dylib" WITH_FLUID_ONLY=${WITH_FLUID_ONLY:-ON} + pip3.7 uninstall -y protobuf pip3.7 install --user -r ${PADDLE_ROOT}/python/requirements.txt else exit 1 @@ -128,6 +131,7 @@ function cmake_gen() { PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27m/bin/python -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27m/include/python2.7 -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs2/lib/libpython2.7.so" + pip uninstall -y protobuf pip install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp27-cp27mu" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs2/lib:} @@ -135,6 +139,7 @@ function cmake_gen() { PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27mu/bin/python -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27mu/include/python2.7 -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs4/lib/libpython2.7.so" + pip uninstall -y protobuf pip install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp35-cp35m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-3.5.1/lib/:${LD_LIBRARY_PATH} @@ -142,6 +147,7 @@ function cmake_gen() { export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.5.1/bin/python3 -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.5.1/include/python3.5m -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.5.1/lib/libpython3.so" + pip3.5 uninstall -y protobuf pip3.5 install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp36-cp36m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-3.6.0/lib/:${LD_LIBRARY_PATH} @@ -149,6 +155,7 @@ function cmake_gen() { export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.6.0/bin/python3 -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.6.0/include/python3.6m -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.6.0/lib/libpython3.so" + pip3.6 uninstall -y protobuf pip3.6 install -r ${PADDLE_ROOT}/python/requirements.txt elif [ "$1" == "cp37-cp37m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-3.7.0/lib/:${LD_LIBRARY_PATH} @@ -156,6 +163,7 @@ function cmake_gen() { export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/_internal/cpython-3.7.0/bin/python3.7 -DPYTHON_INCLUDE_DIR:PATH=/opt/_internal/cpython-3.7.0/include/python3.7m -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-3.7.0/lib/libpython3.so" + pip3.7 uninstall -y protobuf pip3.7 install -r ${PADDLE_ROOT}/python/requirements.txt fi fi From 077d12b93951d48117011472ea1917e4760f14ef Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Mon, 18 Feb 2019 11:31:26 +0800 Subject: [PATCH 040/117] fix scale cleaner (#15742) --- .../fluid/framework/ir/identity_scale_op_clean_pass.cc | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/framework/ir/identity_scale_op_clean_pass.cc b/paddle/fluid/framework/ir/identity_scale_op_clean_pass.cc index 3b738aa159..5bdc0c5fae 100644 --- a/paddle/fluid/framework/ir/identity_scale_op_clean_pass.cc +++ b/paddle/fluid/framework/ir/identity_scale_op_clean_pass.cc @@ -38,9 +38,13 @@ std::unique_ptr IdentityScaleOpCleanPass::ApplyImpl( ->assert_is_op("scale") ->assert_op_attr("scale", 1.) ->assert_op_attr("bias", 0.); - auto scale_out = detector.mutable_pattern() - ->NewNode("scale_out") - ->assert_is_op_output("scale"); + auto scale_out = + detector.mutable_pattern() + ->NewNode("scale_out") + ->assert_is_op_output("scale") + // scale's output var should has only one consumer, or it can't be + // removed. + ->assert_more([](Node* x) { return x->outputs.size() == 1UL; }); pre_op->LinksTo({scale_in}); scale_op->LinksFrom({scale_in}).LinksTo({scale_out}); From 18afb77e78bae25ed1d0ac768b37ff229cecef3c Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 12:12:21 +0800 Subject: [PATCH 041/117] polish code for reading. test=develop --- .../framework/details/memory_optimize_pass.cc | 28 ++++++++++++++++++- .../framework/details/memory_optimize_pass.h | 1 + .../test_fuse_elewise_add_act_pass.py | 4 +++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index c426059a6a..fabcd2ecd2 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -128,7 +128,7 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( } } } - graph->ResolveHazard(var_nodes_); + // graph->ResolveHazard(var_nodes_); return graph; } @@ -324,6 +324,32 @@ void MemoryOptimizePass::RenameVarInGraphNode(const std::string& var, } } +void MemoryOptimizePass::ClearControlDepVars(ir::Graph* graph) const { + for (auto& op : graph->Nodes()) { + if (!op->IsOp()) continue; + { + auto& nodes = op->inputs; + nodes.erase( + std::remove_if(nodes.begin(), nodes.end(), + [&](ir::Node* var) { return var->IsCtrlVar(); }), + nodes.end()); + } + { + auto& nodes = op->outputs; + nodes.erase( + std::remove_if(nodes.begin(), nodes.end(), + [&](ir::Node* var) { return var->IsCtrlVar(); }), + nodes.end()); + } + } + + for (auto& node : graph->Nodes()) { + if (node->IsCtrlVar()) { + graph->RemoveNode(node); + } + } +} + } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/memory_optimize_pass.h b/paddle/fluid/framework/details/memory_optimize_pass.h index 593ffc10fc..f5d188101f 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.h +++ b/paddle/fluid/framework/details/memory_optimize_pass.h @@ -48,6 +48,7 @@ class MemoryOptimizePass : public ir::Pass { void RenameVarInGraphNode(const std::string& var, const std::string& cache_var, size_t idx, ir::Graph* graph) const; + void ClearControlDepVars(ir::Graph* graph) const; void SubGraphOptimize(OpDesc* op_desc) const; // 1. scan op with subblock and collect the output/input vars. diff --git a/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py index 03471a4432..c1fb53ecf5 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_elewise_add_act_pass.py @@ -121,6 +121,8 @@ class TestMNIST(TestParallelExecutorBase): regularization=fluid.regularizer.L2Decay(1e-6)) return optimizer + # NOTE(dzh): + # need to make it compatible with elewise fuse act not_fuse_op_first_loss, not_fuse_op_last_loss = self.check_network_convergence( model, feed_dict={"image": img, @@ -128,6 +130,7 @@ class TestMNIST(TestParallelExecutorBase): use_cuda=use_cuda, fuse_elewise_add_act_ops=False, memory_opt=False, + use_ir_memory_optimize=False, optimizer=_optimizer) fuse_op_first_loss, fuse_op_last_loss = self.check_network_convergence( model, @@ -136,6 +139,7 @@ class TestMNIST(TestParallelExecutorBase): use_cuda=use_cuda, fuse_elewise_add_act_ops=True, memory_opt=False, + use_ir_memory_optimize=False, optimizer=_optimizer) for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss): From 591ad33e32a3528b9def15ef8c707b6a2be10334 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 12:14:09 +0800 Subject: [PATCH 042/117] polish code for reading. test=develop --- paddle/fluid/framework/details/memory_optimize_pass.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index fabcd2ecd2..aa6641d3f2 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -46,6 +46,7 @@ namespace details { std::unique_ptr MemoryOptimizePass::ApplyImpl( std::unique_ptr graph) const { auto nodes = graph->Nodes(); + ClearControlDepVars(graph.get()); CollectSkipVarsSet(nodes); cfg_.reset(new details::ControlFlowGraph(*graph)); @@ -128,7 +129,7 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( } } } - // graph->ResolveHazard(var_nodes_); + graph->ResolveHazard(var_nodes_); return graph; } From 576e7d71f8a39d03c0ff3453105c8547d3d6586c Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Mon, 18 Feb 2019 05:22:48 +0000 Subject: [PATCH 043/117] test=develop, fix pip --- paddle/scripts/paddle_build.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index dbae55db56..5ef3a31024 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -125,6 +125,8 @@ function cmake_gen() { else if [ "$1" != "" ]; then echo "using python abi: $1" + pip uninstall -y protobuf + pip install -r ${PADDLE_ROOT}/python/requirements.txt if [ "$1" == "cp27-cp27m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs4/lib:} export PATH=/opt/python/cp27-cp27m/bin/:${PATH} From d386a71b65d44587892b3b0110cd1c6625f1592e Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Mon, 18 Feb 2019 06:15:25 +0000 Subject: [PATCH 044/117] test=develop, install protobuf in linux --- paddle/scripts/paddle_build.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 5ef3a31024..e7078499ca 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -125,8 +125,6 @@ function cmake_gen() { else if [ "$1" != "" ]; then echo "using python abi: $1" - pip uninstall -y protobuf - pip install -r ${PADDLE_ROOT}/python/requirements.txt if [ "$1" == "cp27-cp27m" ]; then export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs4/lib:} export PATH=/opt/python/cp27-cp27m/bin/:${PATH} @@ -168,6 +166,9 @@ function cmake_gen() { pip3.7 uninstall -y protobuf pip3.7 install -r ${PADDLE_ROOT}/python/requirements.txt fi + else + pip uninstall -y protobuf + pip install -r ${PADDLE_ROOT}/python/requirements.txt fi fi From d94a314db55e82e7cef707d016a2796f0b6cc2bb Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 14:37:53 +0800 Subject: [PATCH 045/117] add reference. test=develop --- .../framework/details/memory_optimize_pass.cc | 29 +------------------ .../framework/details/memory_optimize_pass.h | 1 - 2 files changed, 1 insertion(+), 29 deletions(-) diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index aa6641d3f2..b35b967c72 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -46,7 +46,6 @@ namespace details { std::unique_ptr MemoryOptimizePass::ApplyImpl( std::unique_ptr graph) const { auto nodes = graph->Nodes(); - ClearControlDepVars(graph.get()); CollectSkipVarsSet(nodes); cfg_.reset(new details::ControlFlowGraph(*graph)); @@ -79,7 +78,7 @@ std::unique_ptr MemoryOptimizePass::ApplyImpl( ir::Node* cache = pool_.FindBestFitNode(var); while (cache != nullptr && var->Name() == cache->Name()) { VLOG(3) << "The same cache variable is cascade reused. " - << var->Name() << " is re-filled to the pool after" + << cache->Name() << " is re-filled to the pool after " << "the reused op is finished. Current op can not " << "replace it again. Skip this candidate."; cache = pool_.FindNextBestFitNode(var, cache); @@ -325,32 +324,6 @@ void MemoryOptimizePass::RenameVarInGraphNode(const std::string& var, } } -void MemoryOptimizePass::ClearControlDepVars(ir::Graph* graph) const { - for (auto& op : graph->Nodes()) { - if (!op->IsOp()) continue; - { - auto& nodes = op->inputs; - nodes.erase( - std::remove_if(nodes.begin(), nodes.end(), - [&](ir::Node* var) { return var->IsCtrlVar(); }), - nodes.end()); - } - { - auto& nodes = op->outputs; - nodes.erase( - std::remove_if(nodes.begin(), nodes.end(), - [&](ir::Node* var) { return var->IsCtrlVar(); }), - nodes.end()); - } - } - - for (auto& node : graph->Nodes()) { - if (node->IsCtrlVar()) { - graph->RemoveNode(node); - } - } -} - } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/memory_optimize_pass.h b/paddle/fluid/framework/details/memory_optimize_pass.h index f5d188101f..593ffc10fc 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.h +++ b/paddle/fluid/framework/details/memory_optimize_pass.h @@ -48,7 +48,6 @@ class MemoryOptimizePass : public ir::Pass { void RenameVarInGraphNode(const std::string& var, const std::string& cache_var, size_t idx, ir::Graph* graph) const; - void ClearControlDepVars(ir::Graph* graph) const; void SubGraphOptimize(OpDesc* op_desc) const; // 1. scan op with subblock and collect the output/input vars. From 642fd68ce0e4c71e0a5e9fd4417769a9e98ee8b7 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Mon, 18 Feb 2019 14:44:25 +0800 Subject: [PATCH 046/117] update by comment test=develop --- .../framework/details/all_reduce_deps_pass.h | 2 -- .../details/memory_optimize_helper.h | 2 -- .../details/multi_devices_graph_pass.cc | 1 - .../details/parallel_ssa_graph_executor.cc | 28 +++++++++++++------ .../details/parallel_ssa_graph_executor.h | 11 ++++---- paddle/fluid/framework/ir/graph.h | 5 ++++ paddle/fluid/framework/parallel_executor.cc | 18 +++--------- 7 files changed, 35 insertions(+), 32 deletions(-) diff --git a/paddle/fluid/framework/details/all_reduce_deps_pass.h b/paddle/fluid/framework/details/all_reduce_deps_pass.h index 1637c7a7a6..e8b9108981 100644 --- a/paddle/fluid/framework/details/all_reduce_deps_pass.h +++ b/paddle/fluid/framework/details/all_reduce_deps_pass.h @@ -21,8 +21,6 @@ namespace paddle { namespace framework { namespace details { -constexpr char kAllOpDescs[] = "all_op_descs"; - // TODO(gongwb): overlap allreduce with backward computation. class AllReduceDepsPass : public ir::Pass { protected: diff --git a/paddle/fluid/framework/details/memory_optimize_helper.h b/paddle/fluid/framework/details/memory_optimize_helper.h index 0bfaf827fe..2c9a16d445 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.h +++ b/paddle/fluid/framework/details/memory_optimize_helper.h @@ -29,8 +29,6 @@ namespace paddle { namespace framework { namespace details { -constexpr char kAllOpDescs[] = "all_op_descs"; - std::vector SortOpLikeDescOrder(const ir::Graph& graph); // NOTE(dzh): A ordered set for node reuse in memory optimize. diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_pass.cc index 4f856c6d9e..27bc771814 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.cc @@ -221,7 +221,6 @@ std::unique_ptr MultiDevSSAGraphBuilderBase::ApplyImpl( * Only variables should be the leaves of graph. */ AddOutputToLeafOps(&result); - // result.Erase(kGraphOps); return graph; } diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc index 3433c3424e..2cafa1873a 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc @@ -19,12 +19,12 @@ namespace paddle { namespace framework { namespace details { -std::vector> SeparateMultiDevicesGraph( - const std::vector &places, - std::unique_ptr graph) { +std::vector> +ParallelSSAGraphExecutor::SeparateMultiDevicesGraph( + std::unique_ptr &&graph) { std::vector> graphs; - graphs.reserve(places.size()); - for (size_t i = 0; i < places.size(); ++i) { + graphs.reserve(places_.size()); + for (size_t i = 0; i < places_.size(); ++i) { ProgramDesc empty; graphs.emplace_back(std::unique_ptr(new ir::Graph(empty))); auto &g = graphs.back(); @@ -60,7 +60,7 @@ std::vector> SeparateMultiDevicesGraph( } } - for (size_t dev_id = 0; dev_id < places.size(); ++dev_id) { + for (size_t dev_id = 0; dev_id < places_.size(); ++dev_id) { auto &dev_vars = graphs[dev_id]->Get(kGraphVars)[0]; auto &origin_vars = graph->Get(kGraphVars)[dev_id]; for (auto &name_pair : origin_vars) { @@ -80,14 +80,26 @@ std::vector> SeparateMultiDevicesGraph( ParallelSSAGraphExecutor::ParallelSSAGraphExecutor( const ExecutionStrategy &strategy, const std::vector &local_scopes, const std::vector &places, - std::vector> &&graphs) + const framework::ProgramDesc &main_prog, std::unique_ptr &&graph) : strategy_(std::move(strategy)), local_scopes_(std::move(local_scopes)), pool_(places.size() >= 2 ? new ::ThreadPool(places.size()) : nullptr), places_(std::move(places)), - graphs_(std::move(graphs)) { + main_prog_(main_prog), + // TODO(Yancey1989): copy graphs is not safely since it deleted the attrs. + graphs_(SeparateMultiDevicesGraph(std::move(graph))) { PADDLE_ENFORCE_EQ(places_.size(), local_scopes_.size()); + auto seq_allreduce_pass = + ir::PassRegistry::Instance().Get("all_reduce_deps_pass"); + seq_allreduce_pass->Erase(details::kAllOpDescs); + seq_allreduce_pass->Set>( + details::kAllOpDescs, + new std::vector(main_prog_.Block(0).AllOps())); + for (size_t i = 0; i < graphs_.size(); ++i) { + graphs_[i] = seq_allreduce_pass->Apply(std::move(graphs_[i])); + } + // set the correct size of thread pool to each device. strategy_.num_threads_ = strategy_.num_threads_ < places_.size() ? 1UL diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.h b/paddle/fluid/framework/details/parallel_ssa_graph_executor.h index c31bba17f6..f59305bf98 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.h @@ -28,16 +28,13 @@ namespace paddle { namespace framework { namespace details { -std::vector> SeparateMultiDevicesGraph( - const std::vector &places, - std::unique_ptr graph); - class ParallelSSAGraphExecutor : public SSAGraphExecutor { public: ParallelSSAGraphExecutor(const ExecutionStrategy &strategy, const std::vector &local_scopes, const std::vector &places, - std::vector> &&graphs); + const framework::ProgramDesc &main_prog, + std::unique_ptr &&graph); ~ParallelSSAGraphExecutor() final = default; const ir::Graph &Graph() const override { return *graphs_[0]; } @@ -45,10 +42,14 @@ class ParallelSSAGraphExecutor : public SSAGraphExecutor { FeedFetchList Run(const std::vector &fetch_tensors) override; private: + std::vector> SeparateMultiDevicesGraph( + std::unique_ptr &&graph); + ExecutionStrategy strategy_; std::vector local_scopes_; std::unique_ptr<::ThreadPool> pool_{nullptr}; std::vector places_; + framework::ProgramDesc main_prog_; std::vector> graphs_; std::vector> executors_; diff --git a/paddle/fluid/framework/ir/graph.h b/paddle/fluid/framework/ir/graph.h index b55a774513..d5b3782f62 100644 --- a/paddle/fluid/framework/ir/graph.h +++ b/paddle/fluid/framework/ir/graph.h @@ -26,6 +26,11 @@ limitations under the License. */ namespace paddle { namespace framework { + +namespace details { +constexpr char kAllOpDescs[] = "all_op_descs"; +} // namespace details + namespace ir { /* diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index dbe1bf9b29..56da566009 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -305,21 +305,11 @@ ParallelExecutor::ParallelExecutor( if (build_strategy.enable_parallel_graph_) { #ifdef PADDLE_WITH_CUDA - auto parallel_graph = - details::SeparateMultiDevicesGraph(member_->places_, std::move(graph)); - auto seq_allreduce_pass = - ir::PassRegistry::Instance().Get("all_reduce_deps_pass"); - seq_allreduce_pass->Erase(details::kAllOpDescs); - seq_allreduce_pass->Set>( - details::kAllOpDescs, - new std::vector(main_program.Block(0).AllOps())); - for (size_t i = 0; i < parallel_graph.size(); ++i) { - parallel_graph[i] = - seq_allreduce_pass->Apply(std::move(parallel_graph[i])); - } + // TODO(Yancey1989): Remove passing in the main_program when + // allreduce_seq_pass doesn't need it as the attr. member_->executor_.reset(new details::ParallelSSAGraphExecutor( - exec_strategy, member_->local_scopes_, member_->places_, - std::move(parallel_graph))); + exec_strategy, member_->local_scopes_, member_->places_, main_program, + std::move(graph))); #else PADDLE_THROW( "Paddle should be compiled with CUDA for ParallelGraph Execution."); From 5677c9d4eed6b7d591e214b980354d18bb1c4c87 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Mon, 18 Feb 2019 14:45:39 +0800 Subject: [PATCH 047/117] update comment test=develop --- paddle/fluid/framework/details/parallel_ssa_graph_executor.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc index 2cafa1873a..c36618016b 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc @@ -86,7 +86,8 @@ ParallelSSAGraphExecutor::ParallelSSAGraphExecutor( pool_(places.size() >= 2 ? new ::ThreadPool(places.size()) : nullptr), places_(std::move(places)), main_prog_(main_prog), - // TODO(Yancey1989): copy graphs is not safely since it deleted the attrs. + // TODO(Yancey1989): Copying graphs is not safely since it deleted the + // attrs. graphs_(SeparateMultiDevicesGraph(std::move(graph))) { PADDLE_ENFORCE_EQ(places_.size(), local_scopes_.size()); From 0f8bd73cc9d23ba1bf2fc9b15bae74450daee0d5 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Mon, 18 Feb 2019 14:51:47 +0800 Subject: [PATCH 048/117] cleanup code test=develop --- paddle/fluid/framework/details/build_strategy.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index 7d2a081e3b..45c2c73415 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -34,6 +34,8 @@ namespace details { static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) { // Should fix the allreduce op order if scheduling // them in multiple threads or processes to avoid hang. + // NOTE: ParallelExecutor would execute this pass on each graph, so + // don't need to append it here. return (!strategy.enable_sequential_execution_ && strategy.num_trainers_ > 1) && !strategy.enable_parallel_graph_; @@ -118,7 +120,7 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { } // Verify that the graph is correct for multi-device executor. - auto multi_devices_pass = AppendPass("multi_devices_check_pass"); + AppendPass("multi_devices_check_pass"); if (SeqOnlyAllReduceOps(strategy)) { AppendPass("all_reduce_deps_pass"); From 5e6834d891252723961efb4de4b89e189745fd12 Mon Sep 17 00:00:00 2001 From: Dun Date: Mon, 18 Feb 2019 15:21:55 +0800 Subject: [PATCH 049/117] inplace group_norm (#15754) * inplace group * test=develop --- paddle/fluid/operators/group_norm_op.cc | 39 +++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/operators/group_norm_op.cc b/paddle/fluid/operators/group_norm_op.cc index e18d9841bb..cbdffa0db8 100644 --- a/paddle/fluid/operators/group_norm_op.cc +++ b/paddle/fluid/operators/group_norm_op.cc @@ -170,13 +170,48 @@ class GroupNormGradMaker : public framework::SingleGradOpDescMaker { } }; +class GroupNormInplaceInToOut : public framework::InplaceInToOut { + public: + using InplaceInToOut::InplaceInToOut; + + protected: + std::unordered_map Apply( + const framework::OpDesc &op_desc, + framework::BlockDesc *block) const override { + return {{"X", "Y"}}; + } +}; + +class GroupNormGradInplaceInToOut : public framework::InplaceInToOut { + public: + using InplaceInToOut::InplaceInToOut; + + protected: + std::unordered_map Apply( + const framework::OpDesc &op_desc, + framework::BlockDesc *block) const override { + return {{framework::GradVarName("Y"), framework::GradVarName("X")}}; + } +}; + +class GroupNormOpInferVarType + : public framework::PassInDtypeAndVarTypeToOutput { + protected: + std::unordered_map GetInputOutputWithSameType() + const override { + return {{"X", /*->*/ "Y"}}; + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(group_norm, ops::GroupNormOp, ops::GroupNormOpMaker, - ops::GroupNormGradMaker); -REGISTER_OPERATOR(group_norm_grad, ops::GroupNormGradOp); + ops::GroupNormOpInferVarType, ops::GroupNormGradMaker, + ops::GroupNormInplaceInToOut); +REGISTER_OPERATOR(group_norm_grad, ops::GroupNormGradOp, + ops::GroupNormGradInplaceInToOut); REGISTER_OP_CPU_KERNEL( group_norm, ops::GroupNormKernel, ops::GroupNormKernel); From 6cb0208ab0c8ac7e2133788b09fca797ecd78020 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 18 Feb 2019 15:44:21 +0800 Subject: [PATCH 050/117] add reference. test=develop --- python/paddle/fluid/tests/unittests/CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 534411219b..289a48aac9 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -77,6 +77,7 @@ list(REMOVE_ITEM TEST_OPS test_bilinear_interp_op) list(REMOVE_ITEM TEST_OPS test_nearest_interp_op) list(REMOVE_ITEM TEST_OPS test_imperative_resnet) list(REMOVE_ITEM TEST_OPS test_imperative_optimizer) +list(REMOVE_ITEM TEST_OPS test_ir_memory_optimize_transformer) foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) endforeach(TEST_OP) @@ -107,6 +108,9 @@ py_test_modules(test_parallel_executor_crf MODULES test_parallel_executor_crf SE py_test_modules(test_parallel_executor_fetch_feed MODULES test_parallel_executor_fetch_feed SERIAL) set_tests_properties(test_parallel_executor_fetch_feed PROPERTIES TIMEOUT 450) py_test_modules(test_parallel_executor_transformer MODULES test_parallel_executor_transformer SERIAL) +if(NOT WIN32) +py_test_modules(test_ir_memory_optimize_transformer MODULES test_ir_memory_optimize_transformer SERIAL) +endif() if(NOT APPLE) py_test_modules(test_image_classification_resnet MODULES test_image_classification_resnet SERIAL) if(CMAKE_BUILD_TYPE STREQUAL "Debug") From 3ce12b1b8e9ae4bb43567e79b081b6cdc4e4ceeb Mon Sep 17 00:00:00 2001 From: chengduozh Date: Mon, 18 Feb 2019 16:42:16 +0800 Subject: [PATCH 051/117] fix shape api doc test=develop --- paddle/fluid/operators/shape_op.cc | 13 +++++++------ python/paddle/fluid/layers/nn.py | 8 +++++--- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/operators/shape_op.cc b/paddle/fluid/operators/shape_op.cc index 1be9fe47af..efc497fa47 100644 --- a/paddle/fluid/operators/shape_op.cc +++ b/paddle/fluid/operators/shape_op.cc @@ -35,14 +35,15 @@ class ShapeOp : public framework::OperatorWithKernel { class ShapeOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("Input", "(Tensor), The input tensor."); - AddOutput("Out", - "(Tensor), The shape of input tensor, the data type of the shape" - " is int32_t, will be on the same device with the input Tensor."); + AddInput("Input", "(LoDTensor), The input tensor."); + AddOutput( + "Out", + "(LoDTensor), The shape of input tensor, the data type of the shape" + " is int32_t, will be on the same device with the input Tensor."); AddComment(R"DOC( -Shape Operator +Shape Operator. -Get the shape of input tensor. Only support CPU input Tensor now. +Return the shape of the input. )DOC"); } }; diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 46ce58fd2d..69885fd17a 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -8710,13 +8710,15 @@ def slice(input, axes, starts, ends): @templatedoc() def shape(input): """ - ${comment} + **Shape Layer** + + Return the shape of the input. Args: - input (Variable): ${input_comment} + input (Variable): The input variable. Returns: - out (Variable): ${out_comment} + out (Variable): The shape of the input variable. Examples: .. code-block:: python From 40402d5e6885b2f0e938a6a30c46869c53d63b6e Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 15 Feb 2019 12:39:56 +0000 Subject: [PATCH 052/117] add emb seqpool jitcode test=develop --- paddle/fluid/operators/jit/gen/CMakeLists.txt | 1 + paddle/fluid/operators/jit/gen/embseqpool.cc | 148 ++++++++++++++++++ paddle/fluid/operators/jit/gen/embseqpool.h | 81 ++++++++++ paddle/fluid/operators/jit/gen/seqpool.h | 2 +- 4 files changed, 231 insertions(+), 1 deletion(-) create mode 100644 paddle/fluid/operators/jit/gen/embseqpool.cc create mode 100644 paddle/fluid/operators/jit/gen/embseqpool.h diff --git a/paddle/fluid/operators/jit/gen/CMakeLists.txt b/paddle/fluid/operators/jit/gen/CMakeLists.txt index efc7eb79d3..294f73d964 100644 --- a/paddle/fluid/operators/jit/gen/CMakeLists.txt +++ b/paddle/fluid/operators/jit/gen/CMakeLists.txt @@ -31,3 +31,4 @@ USE_JITKERNEL_GEN(kNCHW16CMulNC) USE_JITKERNEL_GEN(kSeqPool) USE_JITKERNEL_GEN(kHMax) USE_JITKERNEL_GEN(kHSum) +USE_JITKERNEL_GEN(kEmbSeqPool) diff --git a/paddle/fluid/operators/jit/gen/embseqpool.cc b/paddle/fluid/operators/jit/gen/embseqpool.cc new file mode 100644 index 0000000000..3f233acee9 --- /dev/null +++ b/paddle/fluid/operators/jit/gen/embseqpool.cc @@ -0,0 +1,148 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#include "paddle/fluid/operators/jit/gen/embseqpool.h" +#include // offsetof +#include +#include "paddle/fluid/operators/jit/gen/act.h" // for exp_float_consts ones +#include "paddle/fluid/operators/jit/registry.h" +#include "paddle/fluid/platform/cpu_info.h" + +namespace paddle { +namespace operators { +namespace jit { +namespace gen { + +void EmbSeqPoolJitCode::genCode() { + preCode(); + constexpr int block = YMM_FLOAT_BLOCK; + constexpr int max_num_regs = 8; + const int num_block = tbl_w_ / block; + const int num_groups = num_block / max_num_regs; + const size_t block_size = sizeof(float) * block; + std::vector groups(num_groups, max_num_regs); + int rest_num_regs = num_block % max_num_regs; + if (rest_num_regs > 0) { + groups.push_back(rest_num_regs); + } + + // protect param_dst + mov(reg_ptr_param_dst, param_dst); + mov(reg_idx_width_in_byte, + qword[param_attr + offsetof(emb_seq_pool_attr_t, index_width)]); + mov(reg_idx_height, + qword[param_attr + offsetof(emb_seq_pool_attr_t, index_height)]); + mov(rax, sizeof(int64_t)); + mul(reg_idx_width_in_byte); + mov(reg_idx_width_in_byte, rax); + const size_t tbl_width_in_byte = sizeof(float) * tbl_w_; + int acc_num_regs = 0; + for (int num_regs : groups) { + Label l_next_idx_w, l_next_idx_h, l_save_now; + xor_(reg_idx_w_i_in_byte, reg_idx_w_i_in_byte); + mov(reg_ptr_dst_i, reg_ptr_param_dst); + add(reg_ptr_dst_i, acc_num_regs * block_size); + add(param_tbl, acc_num_regs * block_size); + + L(l_next_idx_w); + { + // h == 0 + mov(reg_ptr_idx_i, param_idx); + add(reg_ptr_idx_i, reg_idx_w_i_in_byte); + mov(reg_idx, qword[reg_ptr_idx_i]); + mov(rax, tbl_width_in_byte); + mul(reg_idx); + mov(reg_ptr_tbl_i, rax); // reg is offset now + add(reg_ptr_tbl_i, param_tbl); // reg is ptr_i now + size_t w_offset = 0; + for (int reg_i = 0; reg_i < num_regs; ++reg_i) { + vmovups(ymm_t(reg_i + num_regs), ptr[reg_ptr_tbl_i + w_offset]); + w_offset += block_size; + } + add(reg_ptr_idx_i, reg_idx_width_in_byte); + + // end condition of idx h + mov(reg_idx_h_end, reg_idx_height); + mov(rax, reg_idx_width_in_byte); + mul(reg_idx_h_end); + mov(reg_idx_h_end, rax); + add(reg_idx_h_end, reg_idx_w_i_in_byte); + add(reg_idx_h_end, param_idx); + + cmp(reg_ptr_idx_i, reg_idx_h_end); + jge(l_save_now, T_NEAR); + L(l_next_idx_h); + { + mov(reg_idx, qword[reg_ptr_idx_i]); + mov(reg_ptr_tbl_i, reg_idx); + mov(rax, tbl_width_in_byte); + mul(reg_idx); + mov(reg_ptr_tbl_i, rax); + add(reg_ptr_tbl_i, param_tbl); + size_t w_offset = 0; + for (int reg_i = 0; reg_i < num_regs; ++reg_i) { + vmovups(ymm_t(reg_i), ptr[reg_ptr_tbl_i + w_offset]); + vaddps(ymm_t(reg_i + num_regs), ymm_t(reg_i + num_regs), + ymm_t(reg_i)); + w_offset += block_size; + } + add(reg_ptr_idx_i, reg_idx_width_in_byte); + cmp(reg_ptr_idx_i, reg_idx_h_end); + jl(l_next_idx_h, T_NEAR); + } // end of idx h + L(l_save_now); + // avg or sqrt here, if needed + w_offset = 0; + for (int reg_i = 0; reg_i < num_regs; ++reg_i) { + vmovups(ptr[reg_ptr_dst_i + w_offset], ymm_t(reg_i + num_regs)); + w_offset += block_size; + } + add(reg_ptr_dst_i, tbl_width_in_byte); + add(reg_idx_w_i_in_byte, sizeof(int64_t)); + cmp(reg_idx_w_i_in_byte, reg_idx_width_in_byte); + jl(l_next_idx_w, T_NEAR); + } // end of idx w + acc_num_regs += num_regs; + } // end of groups + postCode(); +} + +class EmbSeqPoolCreator : public JitCodeCreator { + public: + bool UseMe(const emb_seq_pool_attr_t& attr) const override { + return platform::MayIUse(platform::avx) && + attr.table_width % YMM_FLOAT_BLOCK == 0; + } + size_t CodeSize(const emb_seq_pool_attr_t& attr) const override { + return 96 + (attr.table_width / YMM_FLOAT_BLOCK) * 96 * 8; + } + std::unique_ptr CreateJitCode( + const emb_seq_pool_attr_t& attr) const override { + PADDLE_ENFORCE_GT(attr.table_height, 0); + PADDLE_ENFORCE_GT(attr.table_width, 0); + PADDLE_ENFORCE_GT(attr.index_height, 0); + PADDLE_ENFORCE_GT(attr.index_width, 0); + PADDLE_ENFORCE_GT(attr.out_width, 0); + return make_unique(attr, CodeSize(attr)); + } +}; + +} // namespace gen +} // namespace jit +} // namespace operators +} // namespace paddle + +namespace gen = paddle::operators::jit::gen; + +REGISTER_JITKERNEL_GEN(kEmbSeqPool, gen::EmbSeqPoolCreator); diff --git a/paddle/fluid/operators/jit/gen/embseqpool.h b/paddle/fluid/operators/jit/gen/embseqpool.h new file mode 100644 index 0000000000..5afcfbdc17 --- /dev/null +++ b/paddle/fluid/operators/jit/gen/embseqpool.h @@ -0,0 +1,81 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#pragma once + +#include +#include "glog/logging.h" +#include "paddle/fluid/operators/jit/gen/jitcode.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace operators { +namespace jit { +namespace gen { + +class EmbSeqPoolJitCode : public JitCode { + public: + explicit EmbSeqPoolJitCode(const emb_seq_pool_attr_t& attr, + size_t code_size = 256 * 1024, + void* code_ptr = nullptr) + : JitCode(code_size, code_ptr), + tbl_w_(attr.table_width), + type_(attr.pool_type) { + if (type_ != SeqPoolType::kSum) { + LOG(FATAL) << "Only support sum pool yet "; + } + this->genCode(); + } + + std::string name() const override { + std::string base = "EmbSeqPoolJitCode"; + if (type_ == SeqPoolType::kSum) { + base += "_Sum"; + } else if (type_ == SeqPoolType::kAvg) { + base += "_Avg"; + } else if (type_ == SeqPoolType::kSqrt) { + base += "_Sqrt"; + } + base += ("_W" + std::to_string(tbl_w_)); + return base; + } + void genCode() override; + + private: + int tbl_w_; + SeqPoolType type_; + reg64_t param_tbl{abi_param1}; + reg64_t param_idx{abi_param2}; + reg64_t param_dst{abi_param3}; + reg64_t param_attr{abi_param4}; + + reg64_t reg_tmp{rax}; + + reg64_t reg_idx_width_in_byte{r8}; + reg64_t reg_idx_height{r9}; + + reg64_t reg_ptr_tbl_i{r10}; + reg64_t reg_idx{r10}; // could use same of reg_ptr_tbl_i + reg64_t reg_ptr_idx_i{r11}; + reg64_t reg_ptr_dst_i{r12}; + reg64_t reg_ptr_param_dst{r13}; // rdx is used in mul so protect param_dst + + reg64_t reg_idx_w_i_in_byte{r14}; + reg64_t reg_idx_h_end{r15}; +}; + +} // namespace gen +} // namespace jit +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/jit/gen/seqpool.h b/paddle/fluid/operators/jit/gen/seqpool.h index 4108ee2f46..e909bc7c79 100644 --- a/paddle/fluid/operators/jit/gen/seqpool.h +++ b/paddle/fluid/operators/jit/gen/seqpool.h @@ -32,7 +32,7 @@ class SeqPoolJitCode : public JitCode { : JitCode(code_size, code_ptr), w_(attr.w), type_(attr.type) { if (!(type_ == SeqPoolType::kSum || type_ == SeqPoolType::kAvg || type_ == SeqPoolType::kSqrt)) { - LOG(FATAL) << "Only support sum pool yet "; + LOG(FATAL) << "Only supported pool type: sum, avg and sqrt."; } fp_h_[0] = 1.f; this->genCode(); From 75fc792d40990e6ac7755a56b5d5861f36066fb4 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 18 Feb 2019 09:33:18 +0000 Subject: [PATCH 053/117] fix when table width larger than 64 test=develop --- paddle/fluid/operators/jit/benchmark.cc | 2 +- paddle/fluid/operators/jit/gen/embseqpool.cc | 5 +++-- paddle/fluid/operators/jit/test.cc | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/operators/jit/benchmark.cc b/paddle/fluid/operators/jit/benchmark.cc index 9831b6ef92..96196d26a8 100644 --- a/paddle/fluid/operators/jit/benchmark.cc +++ b/paddle/fluid/operators/jit/benchmark.cc @@ -312,7 +312,7 @@ void BenchEmbSeqPoolKernel() { const T* table_data = table.data(); for (auto type : pool_types) { for (int idx_w : {1, 2, 10, 16}) { - for (int idx_h : {1, 2, 10, 16}) { + for (int idx_h : {1, 2, 9, 13, 16}) { int64_t out_w = tbl_w * idx_w; jit::emb_seq_pool_attr_t attr(tbl_h, tbl_w, idx_h, idx_w, out_w, type); diff --git a/paddle/fluid/operators/jit/gen/embseqpool.cc b/paddle/fluid/operators/jit/gen/embseqpool.cc index 3f233acee9..23837a3fb9 100644 --- a/paddle/fluid/operators/jit/gen/embseqpool.cc +++ b/paddle/fluid/operators/jit/gen/embseqpool.cc @@ -53,7 +53,6 @@ void EmbSeqPoolJitCode::genCode() { xor_(reg_idx_w_i_in_byte, reg_idx_w_i_in_byte); mov(reg_ptr_dst_i, reg_ptr_param_dst); add(reg_ptr_dst_i, acc_num_regs * block_size); - add(param_tbl, acc_num_regs * block_size); L(l_next_idx_w); { @@ -113,8 +112,10 @@ void EmbSeqPoolJitCode::genCode() { cmp(reg_idx_w_i_in_byte, reg_idx_width_in_byte); jl(l_next_idx_w, T_NEAR); } // end of idx w + acc_num_regs += num_regs; - } // end of groups + add(param_tbl, num_regs * block_size); // do not use acc_num_regs + } // end of groups postCode(); } diff --git a/paddle/fluid/operators/jit/test.cc b/paddle/fluid/operators/jit/test.cc index c35b6aef23..15e2993824 100644 --- a/paddle/fluid/operators/jit/test.cc +++ b/paddle/fluid/operators/jit/test.cc @@ -625,7 +625,7 @@ void TestEmbSeqPoolKernel() { const T* table_data = table.data(); for (auto type : pool_types) { for (int idx_w : {1, 2, 10, 16}) { - for (int idx_h : {1, 2, 10, 16}) { + for (int idx_h : {1, 2, 9, 13, 16}) { auto ref = jit::GetRefer>(); EXPECT_TRUE(ref != nullptr); std::vector idx(idx_h * idx_w); From 685a20ef5683100aa139177a566d2d3758a5def4 Mon Sep 17 00:00:00 2001 From: Yihua Xu Date: Mon, 18 Feb 2019 18:29:32 +0800 Subject: [PATCH 054/117] Add JIT CRF_decoding and Layer_norm unit-test (#15699) * Add the CRFDecoding and LayerNorm's test case test=develop * Fix the size checking issue test=develop * Remove the remnant code test=develop * Add TestAllImpls and double support test=develop * Clean Code test=develop * Add benchmark test for LayerNorm & CRFDecoding test=develop --- paddle/fluid/operators/jit/benchmark.cc | 75 +++++++++++++ paddle/fluid/operators/jit/test.cc | 133 +++++++++++++++++++++++- 2 files changed, 207 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/jit/benchmark.cc b/paddle/fluid/operators/jit/benchmark.cc index 97ddf223ae..77a2d04ebf 100644 --- a/paddle/fluid/operators/jit/benchmark.cc +++ b/paddle/fluid/operators/jit/benchmark.cc @@ -339,6 +339,71 @@ void BenchSoftmaxKernel() { } } +template +void BenchLayerNormKernel() { + const T epsilon = 9.99999975e-06; + for (int n : {1, 2, 10}) { + for (int x_dim_0 : {1, 9, 17, 50}) { + int left = n * x_dim_0; + for (int x_dim_1 : TestSizes()) { + int right = x_dim_1; + int sz = left * right; + Tensor x, mean, var, scale, bias, out; + x.Resize({n, x_dim_0, x_dim_1}); + out.Resize({n, x_dim_0, x_dim_1}); + mean.Resize({n, x_dim_0}); + var.Resize({n, x_dim_0}); + scale.Resize({x_dim_1}); + bias.Resize({x_dim_1}); + + RandomVec(sz, x.mutable_data(PlaceType()), -2.f, 2.f); + RandomVec(left, mean.mutable_data(PlaceType()), -2.f, 2.f); + RandomVec(left, var.mutable_data(PlaceType()), -2.f, 2.f); + RandomVec(right, scale.mutable_data(PlaceType()), -2.f, 2.f); + RandomVec(right, bias.mutable_data(PlaceType()), -2.f, 2.f); + + const T* scale_data = scale.data(); + const T* bias_data = bias.data(); + T* x_data = x.data(); + T* mean_data = mean.data(); + T* var_data = var.data(); + T* out_data = out.mutable_data(PlaceType()); + + BenchAllImpls, PlaceType>( + right, x_data, out_data, mean_data, var_data, scale_data, bias_data, + left, epsilon, right); + } + } + } +} + +template +void BenchCRFDecodingKernel() { + constexpr int state_trans_base_idx = 2; + for (int seq_len : {1, 11, 17, 50}) { + for (int tag_num : TestSizes()) { + int x_sz = seq_len * tag_num; + int w_sz = (tag_num + state_trans_base_idx) * tag_num; + Tensor x, w, alpha, track; + x.Resize({seq_len, tag_num}); + w.Resize({tag_num + state_trans_base_idx, tag_num}); + alpha.Resize({seq_len, tag_num}); + track.Resize({seq_len, tag_num}); + + RandomVec(x_sz, x.mutable_data(PlaceType()), -2.f, 2.f); + RandomVec(w_sz, w.mutable_data(PlaceType()), -2.f, 2.f); + + const T* x_data = x.data(); + const T* w_data = w.data(); + T* alpha_data = alpha.mutable_data(PlaceType()); + int* track_data = track.mutable_data(PlaceType()); + + BenchAllImpls, PlaceType>( + tag_num, seq_len, x_data, w_data, alpha_data, track_data, tag_num); + } + } +} + using T = float; using CPUPlace = paddle::platform::CPUPlace; @@ -382,6 +447,16 @@ BENCH_FP32_CPU(kMatMul) { BenchMatMulKernel(); } // softmax BENCH_FP32_CPU(kSoftmax) { BenchSoftmaxKernel(); } +// layernorm +BENCH_FP32_CPU(kLayerNorm) { + BenchLayerNormKernel(); +} + +// crfdecoding +BENCH_FP32_CPU(kCRFDecoding) { + BenchCRFDecodingKernel(); +} + // Benchmark all jit kernels including jitcode, mkl and refer. // To use this tool, run command: ./benchmark [options...] // Options: diff --git a/paddle/fluid/operators/jit/test.cc b/paddle/fluid/operators/jit/test.cc index 237e588d35..85b50b79d9 100644 --- a/paddle/fluid/operators/jit/test.cc +++ b/paddle/fluid/operators/jit/test.cc @@ -292,6 +292,63 @@ struct TestFuncWithRefer, std::vector, std::vector, } }; +template +struct TestFuncWithRefer, std::vector, + std::vector, std::vector, std::vector, + std::vector, std::vector, int, float, int> { + void operator()(const typename jit::LayerNormTuples::func_type tgt, + std::vector& x, std::vector& outref, // NOLINT + std::vector& mean, std::vector& var, // NOLINT + const std::vector& scale, const std::vector& bias, + int left, const float epsilon, int right) { + EXPECT_TRUE(tgt != nullptr); + EXPECT_EQ(x.size(), static_cast(left * right)); + EXPECT_EQ(outref.size(), static_cast(left * right)); + EXPECT_EQ(mean.size(), static_cast(left)); + EXPECT_EQ(var.size(), static_cast(left)); + EXPECT_EQ(scale.size(), static_cast(right)); + EXPECT_EQ(bias.size(), static_cast(right)); + std::vector outtgt(outref.size()); + const T* scale_data = scale.data(); + const T* bias_data = bias.data(); + T* x_data = x.data(); + T* mean_data = mean.data(); + T* var_data = var.data(); + T* outref_data = outref.data(); + T* outtgt_data = outtgt.data(); + + tgt(x_data, outtgt_data, mean_data, var_data, scale_data, bias_data, left, + epsilon, right); + ExpectEQ(outtgt_data, outref_data, left * right); + } +}; + +template +struct TestFuncWithRefer, int, std::vector, + std::vector, std::vector, std::vector, + int> { + void operator()(const typename jit::CRFDecodingTuples::func_type tgt, + const int seq_len, const std::vector& x, + const std::vector& w, std::vector& alpharef, // NOLINT + std::vector& trackref, int tag_num) { // NOLINT + constexpr int state_trans_base_idx = 2; + EXPECT_TRUE(tgt != nullptr); + EXPECT_EQ(x.size(), static_cast(seq_len * tag_num)); + EXPECT_EQ(w.size(), + static_cast((tag_num + state_trans_base_idx) * tag_num)); + EXPECT_EQ(alpharef.size(), static_cast(seq_len * tag_num)); + EXPECT_EQ(trackref.size(), static_cast(seq_len * tag_num)); + std::vector alphatgt(alpharef.size()); + std::vector tracktgt(trackref.size()); + + memcpy(trackref.data(), tracktgt.data(), tag_num * sizeof(int)); + tgt(seq_len, (const T*)x.data(), (const T*)w.data(), alphatgt.data(), + tracktgt.data(), tag_num); + ExpectEQ(alpharef.data(), alphatgt.data(), seq_len * tag_num); + ExpectEQ(trackref.data(), tracktgt.data(), seq_len * tag_num); + } +}; + template void TestAllImpls(const typename KernelTuples::attr_type& attr, Args... args) { @@ -640,6 +697,71 @@ void TestNCHW16CMulNCKernel() { } } +template +void TestLayerNormKernel() { + VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); + const T epsilon = 9.99999975e-06; + for (int n : {1, 2, 10}) { + for (int x_dim_0 : {1, 9, 17, 50}) { + int left = n * x_dim_0; + for (int x_dim_1 : TestSizes()) { + int right = x_dim_1; + auto ref = jit::GetRefer>(); + EXPECT_TRUE(ref != nullptr); + int sz = left * right; + std::vector x(sz), mean(left), var(left), scale(right), bias(right), + outref(sz); + RandomVec(sz, x.data(), -2.f, 2.f); + RandomVec(left, mean.data(), -2.f, 2.f); + RandomVec(left, var.data(), -2.f, 2.f); + RandomVec(right, scale.data(), -2.f, 2.f); + RandomVec(right, bias.data(), -2.f, 2.f); + + const T* scale_data = scale.data(); + const T* bias_data = bias.data(); + T* x_data = x.data(); + T* mean_data = mean.data(); + T* var_data = var.data(); + T* outref_data = outref.data(); + + ref(x_data, outref_data, mean_data, var_data, scale_data, bias_data, + left, epsilon, right); + + TestAllImpls, PlaceType, std::vector, + std::vector, std::vector, std::vector, + std::vector, std::vector, int, float>( + right, x, outref, mean, var, scale, bias, left, epsilon, right); + } + } + } +} + +template +void TestCRFDecodingKernel() { + VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); + constexpr int state_trans_base_idx = 2; + for (int seq_len : {1, 11, 17, 50}) { + for (int tag_num : TestSizes()) { + auto ref = jit::GetRefer>(); + EXPECT_TRUE(ref != nullptr); + int x_sz = seq_len * tag_num; + int w_sz = (tag_num + state_trans_base_idx) * tag_num; + std::vector x(x_sz), w(w_sz), alpharef(x_sz); + std::vector trackref(x_sz); + RandomVec(x_sz, x.data(), -2.f, 2.f); + RandomVec(w_sz, w.data(), -2.f, 2.f); + + ref(seq_len, (const T*)x.data(), (const T*)w.data(), alpharef.data(), + trackref.data(), tag_num); + + TestAllImpls, PlaceType, int, + std::vector, std::vector, std::vector, + std::vector, int>(tag_num, seq_len, x, w, alpharef, + trackref, tag_num); + } + } +} + // XYZNTuple TEST(JITKernel, kVMul) { TestXYZNKernel(); @@ -761,7 +883,16 @@ TEST(JITKernel, kNCHW16CMulNC) { TestNCHW16CMulNCKernel(); } -// TODO(yihua/TJ): add crf decoding and layer norm unit tests +TEST(JITKernel, kLayerNorm) { + TestLayerNormKernel(); + TestLayerNormKernel(); +} + +TEST(JITKernel, kCRFDecoding) { + TestCRFDecodingKernel(); + TestCRFDecodingKernel(); +} TEST(JITKernel, pool) { // TODO(TJ): add some test From 700495e11f3a7567fed5552fc7a6d8d833b3d3e1 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 18 Feb 2019 18:47:26 +0800 Subject: [PATCH 055/117] Fix FtrlOptimizer's API comment test=develop --- python/paddle/fluid/optimizer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index fbd04f1eb4..fe2b3fbbd9 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -1368,9 +1368,9 @@ class FtrlOptimizer(Optimizer): Args: learning_rate (float|Variable): global learning rate. - l1 (float): - l2 (float): - lr_power (float): + l1 (float): L1 regularization strength. + l2 (float): L2 regularization strength. + lr_power (float): Learning Rate Power. regularization: A Regularizer, such as fluid.regularizer.L2DecayRegularizer. name: A optional name prefix. From 78d6bb3a7a5c191722593f23cf195bda6d62634b Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Mon, 18 Feb 2019 11:06:13 +0000 Subject: [PATCH 056/117] test=develop, fix patch ELF install failed --- tools/manylinux1/build_scripts/build.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/manylinux1/build_scripts/build.sh b/tools/manylinux1/build_scripts/build.sh index 6c551eceb4..3b78af00fd 100644 --- a/tools/manylinux1/build_scripts/build.sh +++ b/tools/manylinux1/build_scripts/build.sh @@ -107,11 +107,11 @@ curl-config --features rm -rf /usr/local/ssl # Install patchelf (latest with unreleased bug fixes) -curl -sLO http://nipy.bic.berkeley.edu/manylinux/patchelf-0.9njs2.tar.gz -check_sha256sum patchelf-0.9njs2.tar.gz $PATCHELF_HASH -tar -xzf patchelf-0.9njs2.tar.gz -(cd patchelf-0.9njs2 && ./configure && make && make install) -rm -rf patchelf-0.9njs2.tar.gz patchelf-0.9njs2 +curl -sLO https://nixos.org/releases/patchelf/patchelf-0.9/patchelf-0.9.tar.gz +check_sha256sum patchelf-0.9.tar.gz $PATCHELF_HASH +tar -xzf patchelf-0.9.tar.gz +(cd patchelf-0.9 && ./configure && make && make install) +rm -rf patchelf-0.9.tar.gz patchelf-0.9 # Install latest pypi release of auditwheel LD_LIBRARY_PATH="${ORIGINAL_LD_LIBRARY_PATH}:$(dirname ${PY35_BIN})/lib" $PY35_BIN/pip install auditwheel From 96b861a83690fa306f0a76df5abb91297e7502f3 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Tue, 19 Feb 2019 02:45:30 +0000 Subject: [PATCH 057/117] test=develop, change md5 for patchELF --- tools/manylinux1/build_scripts/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/manylinux1/build_scripts/build.sh b/tools/manylinux1/build_scripts/build.sh index 3b78af00fd..5b676c0243 100644 --- a/tools/manylinux1/build_scripts/build.sh +++ b/tools/manylinux1/build_scripts/build.sh @@ -17,7 +17,7 @@ OPENSSL_ROOT=openssl-1.1.0i OPENSSL_HASH=ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99 EPEL_RPM_HASH=e5ed9ecf22d0c4279e92075a64c757ad2b38049bcf5c16c4f2b75d5f6860dc0d DEVTOOLS_HASH=a8ebeb4bed624700f727179e6ef771dafe47651131a00a78b342251415646acc -PATCHELF_HASH=d9afdff4baeacfbc64861454f368b7f2c15c44d245293f7587bbf726bfe722fb +PATCHELF_HASH=f2aa40a6148cb3b0ca807a1bf836b081793e55ec9e5540a5356d800132be7e0a CURL_ROOT=curl-7.49.1 CURL_HASH=eb63cec4bef692eab9db459033f409533e6d10e20942f4b060b32819e81885f1 AUTOCONF_ROOT=autoconf-2.69 From 72061b0ac0a135e40eb811278e9ad9b8cac48168 Mon Sep 17 00:00:00 2001 From: baojun <32073718+baojun-nervana@users.noreply.github.com> Date: Mon, 18 Feb 2019 18:56:45 -0800 Subject: [PATCH 058/117] Add ngraph op coverage (#15721) --- .../operators/ngraph/ops/fill_constant_op.h | 2 - .../ngraph/test_accuracy_ngraph_op.py | 34 +---------- .../ngraph/test_batch_norm_ngraph_op.py | 16 ------ .../unittests/ngraph/test_conv2d_ngraph_op.py | 55 ------------------ .../ngraph/test_elementwise_add_ngraph_op.py | 13 +---- .../ngraph/test_fill_constant_ngraph_op.py | 24 +++++--- .../unittests/ngraph/test_mean_ngraph_op.py | 7 --- .../unittests/ngraph/test_mul_ngraph_op.py | 34 +---------- .../unittests/ngraph/test_pool2d_ngraph_op.py | 56 ++++--------------- .../unittests/ngraph/test_scale_ngraph_op.py | 19 ------- .../ngraph/test_softmax_ngraph_op.py | 6 -- .../unittests/ngraph/test_top_k_ngraph_op.py | 25 --------- .../paddle/fluid/tests/unittests/op_test.py | 4 ++ 13 files changed, 35 insertions(+), 260 deletions(-) diff --git a/paddle/fluid/operators/ngraph/ops/fill_constant_op.h b/paddle/fluid/operators/ngraph/ops/fill_constant_op.h index 406a4314f8..58783bc220 100644 --- a/paddle/fluid/operators/ngraph/ops/fill_constant_op.h +++ b/paddle/fluid/operators/ngraph/ops/fill_constant_op.h @@ -46,8 +46,6 @@ void BuildFillConstantNode( ng_dtype = ngraph::element::i64; } else if (data_type == paddle::framework::proto::VarType::INT32) { ng_dtype = ngraph::element::i32; - } else if (data_type == paddle::framework::proto::VarType::BOOL) { - ng_dtype = ngraph::element::boolean; } else { PADDLE_THROW("unsupported data type: %s", data_type); } diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_accuracy_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_accuracy_ngraph_op.py index 84b9198dbf..5298c3c2f6 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_accuracy_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_accuracy_ngraph_op.py @@ -15,39 +15,7 @@ from __future__ import print_function import unittest -import numpy as np -from paddle.fluid.tests.unittests.op_test import OpTest - - -class TestNGRAPHAccuracyOp(OpTest): - def setUp(self): - self.op_type = "accuracy" - self.dtype = np.float32 - self.init_dtype() - n = 128 - infer = np.random.random((n, 1)).astype(self.dtype) - indices = np.random.randint(0, 2, (n, 1)) - label = np.random.randint(0, 2, (n, 1)) - self.inputs = {'Out': infer, 'Indices': indices, "Label": label} - num_correct = 0 - for rowid in range(n): - for ele in indices[rowid]: - if ele == label[rowid]: - num_correct += 1 - break - self.outputs = { - 'Accuracy': np.array([num_correct / float(n)]).astype(self.dtype), - 'Correct': np.array([num_correct]).astype("int64"), - 'Total': np.array([n]).astype("int64") - } - self._cpu_only = True - - def init_dtype(self): - pass - - def test_check_output(self): - self.check_output() - +from paddle.fluid.tests.unittests.test_accuracy_op import TestAccuracyOp if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_batch_norm_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_batch_norm_ngraph_op.py index 511173af5e..34fb73f3cf 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_batch_norm_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_batch_norm_ngraph_op.py @@ -17,21 +17,5 @@ from __future__ import print_function import unittest from paddle.fluid.tests.unittests.test_batch_norm_op import TestBatchNormOpTraining, TestBatchNormOpInference - -class TestNGRAPHBatchNormOpTraining(TestBatchNormOpTraining): - def init_kernel_type(self): - super(TestNGRAPHBatchNormOpTraining, self).init_kernel_type() - - -class TestNGRAPHBatchNormOpInference(TestBatchNormOpInference): - def init_kernel_type(self): - super(TestNGRAPHBatchNormOpInference, self).init_kernel_type() - - -class TestNGRAPHBatchNormOpWithReluInference(TestBatchNormOpInference): - def init_kernel_type(self): - super(TestNGRAPHBatchNormOpWithReluInference, self).init_kernel_type() - - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_conv2d_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_conv2d_ngraph_op.py index dbc8557b4e..ff2e865b66 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_conv2d_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_conv2d_ngraph_op.py @@ -17,60 +17,5 @@ from __future__ import print_function import unittest from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp, TestWithPad, TestWithStride, TestWithGroup, TestWith1x1, TestWithInput1x1Filter1x1 - -class TestNGRAPH(TestConv2dOp): - def setUp(self): - super(TestNGRAPH, self).setUp() - self._cpu_only = True - - def init_kernel_type(self): - super(TestNGRAPH, self).init_kernel_type() - - -class TestNGRAPHWithPad(TestWithPad): - def setUp(self): - super(TestNGRAPHWithPad, self).setUp() - self._cpu_only = True - - def init_kernel_type(self): - super(TestNGRAPHWithPad, self).init_kernel_type() - - -class TestNGRAPHWithStride(TestWithStride): - def setUp(self): - super(TestNGRAPHWithStride, self).setUp() - self._cpu_only = True - - def init_kernel_type(self): - super(TestNGRAPHWithStride, self).init_kernel_type() - - -class TestNGRAPHWithGroup(TestWithGroup): - def setUp(self): - super(TestNGRAPHWithGroup, self).setUp() - self._cpu_only = True - - def init_kernel_type(self): - super(TestNGRAPHWithGroup, self).init_kernel_type() - - -class TestNGRAPHWith1x1(TestWith1x1): - def setUp(self): - super(TestNGRAPHWith1x1, self).setUp() - self._cpu_only = True - - def init_kernel_type(self): - super(TestNGRAPHWith1x1, self).init_kernel_type() - - -class TestNGRAPHWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): - def setUp(self): - super(TestNGRAPHWithInput1x1Filter1x1, self).setUp() - self._cpu_only = True - - def init_kernel_type(self): - super(TestNGRAPHWithInput1x1Filter1x1, self).init_kernel_type() - - if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_add_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_add_ngraph_op.py index 67f749bfee..3fb9af3a54 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_add_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_elementwise_add_ngraph_op.py @@ -13,18 +13,9 @@ # limitations under the License. from __future__ import print_function -import unittest -from paddle.fluid.tests.unittests.test_elementwise_add_op import TestElementwiseAddOp - - -class TestNGRAPHElementwiseAddOp(TestElementwiseAddOp): - def setUp(self): - super(TestNGRAPHElementwiseAddOp, self).setUp() - self._cpu_only = True - - def init_input_output(self): - super(TestNGRAPHElementwiseAddOp, self).init_input_output() +import unittest +from paddle.fluid.tests.unittests.test_elementwise_add_op import TestElementwiseAddOp, TestElementwiseAddOp_broadcast_0 if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py index 835376ffe7..2b10b8f7a3 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_fill_constant_ngraph_op.py @@ -13,24 +13,34 @@ # limitations under the License. from __future__ import print_function + import unittest +import numpy as np from paddle.fluid.tests.unittests.test_fill_constant_op import TestFillConstantOp1, TestFillConstantOp2, TestFillConstantOpWithSelectedRows -class TestNGRAPHFillConstantOp1(TestFillConstantOp1): +class TestNGRAPHFillConstantFP64(TestFillConstantOp1): def setUp(self): - super(TestNGRAPHFillConstantOp1, self).setUp() + super(TestNGRAPHFillConstantFP64, self).setUp() + + self.attrs = {'shape': [123, 92], 'value': 3.8, 'dtype': 6} + self.outputs = {'Out': np.full((123, 92), 3.8)} -class TestNGRAPHFillConstantOp2(TestFillConstantOp2): +class TestNGRAPHFillConstantINT32(TestFillConstantOp2): def setUp(self): - super(TestNGRAPHFillConstantOp2, self).setUp() + super(TestNGRAPHFillConstantINT32, self).setUp() + self.attrs = {'shape': [123, 92], 'dtype': 2} + self.outputs = {'Out': np.full((123, 92), 0)} -class TestNGRAPHFillConstantOpWithSelectedRows( - TestFillConstantOpWithSelectedRows): + +class TestNGRAPHFillConstantINT64(TestFillConstantOp2): def setUp(self): - super(TestFillConstantOpWithSelectedRows, self).setUp() + super(TestNGRAPHFillConstantINT64, self).setUp() + + self.attrs = {'shape': [123, 92], 'dtype': 3} + self.outputs = {'Out': np.full((123, 92), 0)} if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_mean_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_mean_ngraph_op.py index 11881ac6e5..b4894734cb 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_mean_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_mean_ngraph_op.py @@ -16,12 +16,5 @@ from __future__ import print_function import unittest from paddle.fluid.tests.unittests.test_mean_op import TestMeanOp - -class TestNGRAPHMeanOp(TestMeanOp): - def setUp(self): - super(TestNGRAPHMeanOp, self).setUp() - self._cpu_only = True - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py index a916c8d450..549d03f6e9 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py @@ -15,39 +15,7 @@ from __future__ import print_function import unittest -import numpy as np -from paddle.fluid.tests.unittests.op_test import OpTest - - -class TestNGRAPHMulOp(OpTest): - def setUp(self): - self.op_type = "mul" - self.dtype = np.float32 - self.init_dtype_type() - self.inputs = { - 'X': np.random.random((2, 4)).astype(self.dtype), - 'Y': np.random.random((4, 4)).astype(self.dtype) - } - self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} - self._cpu_only = True - - def init_dtype_type(self): - pass - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.5) - - def test_check_grad_ingore_x(self): - self.check_grad( - ['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X")) - - def test_check_grad_ingore_y(self): - self.check_grad( - ['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y')) - +from paddle.fluid.tests.unittests.test_mul_op import TestMulOp, TestMulOp2 if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py index 96a2b72d8a..ff82e9fa1d 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_pool2d_ngraph_op.py @@ -14,61 +14,25 @@ from __future__ import print_function -from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5 - - -class TestNGRAPHPool2D_Op(TestPool2D_Op): - def setUp(self): - super(TestNGRAPHPool2D_Op, self).setUp() - self._cpu_only = True - - def init_test_case(self): - super(TestNGRAPHPool2D_Op, self).init_test_case() - - -class TestNGRAPHCase1(TestCase1): - def setUp(self): - super(TestNGRAPHCase1, self).setUp() - self._cpu_only = True - - def init_test_case(self): - super(TestNGRAPHCase1, self).init_test_case() +import unittest - -class TestNGRAPHCase2(TestCase2): - def setUp(self): - super(TestNGRAPHCase2, self).setUp() - self._cpu_only = True - - def init_test_case(self): - super(TestNGRAPHCase2, self).init_test_case() - - -class TestNGRAPHCase3(TestCase3): - def setUp(self): - super(TestNGRAPHCase3, self).setUp() - self._cpu_only = True - - def init_pool_type(self): - super(TestNGRAPHCase3, self).init_pool_type() +from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5 -class TestNGRAPHCase4(TestCase4): +class TestNGRAPHCeilMode(TestCase1): def setUp(self): - super(TestNGRAPHCase4, self).setUp() - self._cpu_only = True + super(TestNGRAPHCeilMode, self).setUp() - def init_pool_type(self): - super(TestNGRAPHCase4, self).init_pool_type() + def init_ceil_mode(self): + self.ceil_mode = True -class TestNGRAPHCase5(TestCase5): +class TestNGRAPHAdaptive(TestCase1): def setUp(self): - super(TestNGRAPHCase5, self).setUp() - self._cpu_only = True + super(TestNGRAPHAdaptive, self).setUp() - def init_pool_type(self): - super(TestNGRAPHCase5, self).init_pool_type() + def init_adaptive(self): + self.adaptive = True if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_scale_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_scale_ngraph_op.py index 4da5ca4583..8beb44f55e 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_scale_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_scale_ngraph_op.py @@ -15,24 +15,5 @@ from __future__ import print_function import unittest from paddle.fluid.tests.unittests.test_scale_op import TestScaleOp, TestScaleOpSelectedRows - -class TestNGRAPHScaleOp(TestScaleOp): - def setUp(self): - super(TestNGRAPHScaleOp, self).setUp() - self._cpu_only = True - - def init_dtype_type(self): - pass - - -class TestNGRAPHScaleOpSelectedRows(TestScaleOpSelectedRows): - def setUp(self): - super(TestNGRAPHScaleOpSelectedRows, self).setUp() - self._cpu_only = True - - def init_dtype_type(self): - pass - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py index 81894c6e38..0cb08842df 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py @@ -16,11 +16,5 @@ from __future__ import print_function import unittest from paddle.fluid.tests.unittests.test_softmax_op import TestSoftmaxOp - -class TestSoftmaxNGRAPHOp(TestSoftmaxOp): - def setUp(self): - super(TestSoftmaxNGRAPHOp, self).setUp() - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py index fa68df1adf..d2319c4d92 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_top_k_ngraph_op.py @@ -16,30 +16,5 @@ from __future__ import print_function import unittest from paddle.fluid.tests.unittests.test_top_k_op import TestTopkOp, TestTopkOp3d, TestTopkOp2, TestTopkOp3, TestTopkOp4 - -class TestNGRAPHTopkOp(TestTopkOp): - def setUp(self): - super(TestNGRAPHTopkOp, self).setUp() - self._cpu_only = True - - -class TestNGRAPHTopkOp2(TestTopkOp2): - def setUp(self): - super(TestNGRAPHTopkOp2, self).setUp() - self._cpu_only = True - - -class TestNGRAPHTopkOp3(TestTopkOp3): - def setUp(self): - super(TestNGRAPHTopkOp3, self).setUp() - self._cpu_only = True - - -class TestNGRAPHTopkOp4(TestTopkOp4): - def setUp(self): - super(TestNGRAPHTopkOp4, self).setUp() - self._cpu_only = True - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 0fe836683b..8234457243 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -14,6 +14,7 @@ from __future__ import print_function +import os import unittest import numpy as np import random @@ -374,6 +375,9 @@ class OpTest(unittest.TestCase): return [] places = [fluid.CPUPlace()] cpu_only = self._cpu_only if hasattr(self, '_cpu_only') else False + use_ngraph = bool(os.getenv("FLAGS_use_ngraph", False)) + if use_ngraph: + cpu_only = True if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type)\ and not cpu_only: places.append(core.CUDAPlace(0)) From 796e221efc896beb6670088c14f47120d7798c4a Mon Sep 17 00:00:00 2001 From: sneaxiy Date: Mon, 18 Feb 2019 07:52:15 +0000 Subject: [PATCH 059/117] fix api arg0 test=release/1.3 --- paddle/fluid/API.spec | 6 +- paddle/fluid/pybind/pybind.cc | 109 +++++++++++++++++++++++++++++----- 2 files changed, 96 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index df961be911..8a3c062dba 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -473,11 +473,11 @@ paddle.fluid.LoDTensor.has_valid_recursive_sequence_lengths has_valid_recursive_ paddle.fluid.LoDTensor.lod lod(self: paddle.fluid.core.LoDTensor) -> List[List[int]] paddle.fluid.LoDTensor.recursive_sequence_lengths recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor) -> List[List[int]] paddle.fluid.LoDTensor.set 1. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CPUPlace) -> None 2. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CPUPlace) -> None 3. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CPUPlace) -> None 4. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CPUPlace) -> None 5. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CPUPlace) -> None 6. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CPUPlace) -> None 7. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CPUPlace) -> None 8. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int8], arg1: paddle::platform::CPUPlace) -> None 9. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CUDAPlace) -> None 10. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CUDAPlace) -> None 11. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CUDAPlace) -> None 12. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CUDAPlace) -> None 13. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CUDAPlace) -> None 14. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CUDAPlace) -> None 15. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CUDAPlace) -> None 16. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int8], arg1: paddle::platform::CUDAPlace) -> None 17. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CUDAPinnedPlace) -> None 18. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CUDAPinnedPlace) -> None 19. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CUDAPinnedPlace) -> None 20. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CUDAPinnedPlace) -> None 21. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CUDAPinnedPlace) -> None 22. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CUDAPinnedPlace) -> None 23. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CUDAPinnedPlace) -> None 24. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int8], arg1: paddle::platform::CUDAPinnedPlace) -> None -paddle.fluid.LoDTensor.set_lod set_lod(self: paddle.fluid.core.LoDTensor, arg0: List[List[int]]) -> None -paddle.fluid.LoDTensor.set_recursive_sequence_lengths set_recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor, arg0: List[List[int]]) -> None +paddle.fluid.LoDTensor.set_lod set_lod(self: paddle.fluid.core.LoDTensor, lod: List[List[int]]) -> None +paddle.fluid.LoDTensor.set_recursive_sequence_lengths set_recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor, recursive_sequence_lengths: List[List[int]]) -> None paddle.fluid.LoDTensor.shape shape(self: paddle.fluid.core.Tensor) -> List[int] paddle.fluid.LoDTensorArray.__init__ __init__(self: paddle.fluid.core.LoDTensorArray) -> None -paddle.fluid.LoDTensorArray.append append(self: paddle.fluid.core.LoDTensorArray, arg0: paddle.fluid.core.LoDTensor) -> None +paddle.fluid.LoDTensorArray.append append(self: paddle.fluid.core.LoDTensorArray, tensor: paddle.fluid.core.LoDTensor) -> None paddle.fluid.CPUPlace.__init__ __init__(self: paddle.fluid.core.CPUPlace) -> None paddle.fluid.CUDAPlace.__init__ __init__(self: paddle.fluid.core.CUDAPlace, arg0: int) -> None paddle.fluid.CUDAPinnedPlace.__init__ __init__(self: paddle.fluid.core.CUDAPinnedPlace) -> None diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index a4a01ad647..a3a3872087 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -373,7 +373,13 @@ PYBIND11_MODULE(core, m) { PADDLE_ENFORCE(CheckLoD(new_lod, vectorize(self.dims()).front()), "the provided lod info is invalid"); self.set_lod(new_lod); - }) + }, + py::arg("lod"), R"DOC( + Set LoD of the LoDTensor. + + Args: + lod (List[List[int]]): the lod to be set. + )DOC") .def("set_recursive_sequence_lengths", [](LoDTensor &self, const std::vector> &recursive_sequence_lengths) { @@ -389,7 +395,17 @@ PYBIND11_MODULE(core, m) { CheckLoD(new_offset_lod, vectorize(self.dims()).front()), "the provided recursive_sequence_lengths info is invalid"); self.set_lod(new_offset_lod); - }) + }, + py::arg("recursive_sequence_lengths"), R"DOC( + Set LoD of the LoDTensor according to recursive sequence length. + + For example, if recursive_sequence_lengths=[2, 3], meaning that + there are two sequences with length 2 and 3 respectively, the + corresponding lod would be [0, 2, 2+3], i.e, [0, 2, 5]. + + Args: + recursive_sequence_lengths (List[List[int]]): sequence lengths. + )DOC") .def("lod", [](LoDTensor &self) -> std::vector> { // output the offset-based lod info @@ -398,7 +414,13 @@ PYBIND11_MODULE(core, m) { new_lod.reserve(lod.size()); std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); return new_lod; - }) + }, + R"DOC( + Return the LoD of the LoDTensor. + + Returns: + out (List[List[int]]): the lod of the LoDTensor. + )DOC") // Set above comments of set_lod. .def("recursive_sequence_lengths", [](LoDTensor &self) -> std::vector> { @@ -408,12 +430,25 @@ PYBIND11_MODULE(core, m) { new_lod.reserve(lod.size()); std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); return new_lod; - }) - .def("has_valid_recursive_sequence_lengths", [](LoDTensor &self) -> bool { - // Check that the lod info is valid and match the outermost - // dimension of the LoDTensor data - return CheckLoD(self.lod(), vectorize(self.dims()).front()); - }); + }, + R"DOC( + Return the sequence length of the LoDTensor corresponding to LoD. + + Returns: + out (List[List[int]): the sequence lengths. + )DOC") + .def("has_valid_recursive_sequence_lengths", + [](LoDTensor &self) -> bool { + // Check that the lod info is valid and match the outermost + // dimension of the LoDTensor data + return CheckLoD(self.lod(), vectorize(self.dims()).front()); + }, + R"DOC( + Check whether the lod of the LoDTensor is valid. + + Returns: + out (bool): whether the lod is valid. + )DOC"); py::class_(m, "SelectedRows") .def("__init__", @@ -549,11 +584,45 @@ All parameter, weight, gradient are variables in Paddle. [](Scope &self, const std::string &name) -> Variable * { return self.Var(name); }, + py::arg("name"), + R"DOC( + Find or create variable named :code:`name` in the current scope. + + If the variable named :code:`name` does not exist in the + current scope, the variable would be created. Otherwise, + return the existing variable. + + Args: + name (str): the variable name. + + Returns: + out (core.Variable): the found or created variable. + )DOC", + py::return_value_policy::reference) + .def("find_var", &Scope::FindVar, py::arg("name"), + R"DOC( + Find variable named :code:`name` in the current scope or + its parent scope. Return None if not found. + + Args: + name (str): the variable name. + + Returns: + out (core.Variable|None): the found variable or None. + )DOC", py::return_value_policy::reference) - .def("find_var", &Scope::FindVar, py::return_value_policy::reference) .def("new_scope", [](Scope &self) -> Scope * { return &self.NewScope(); }, + R"DOC( + Create a new sub-scope of the current scope. + + Returns: + out (core._Scope): the created sub-scope. + )DOC", py::return_value_policy::reference) - .def("drop_kids", &Scope::DropKids); + .def("drop_kids", &Scope::DropKids, + R"DOC( + Delete all sub-scopes of the current scope. + )DOC"); m.def("Scope", []() -> Scope * { @@ -561,6 +630,12 @@ All parameter, weight, gradient are variables in Paddle. ScopePool::Instance().Insert(std::unique_ptr(s)); return s; }, + R"DOC( + Create a new scope. + + Returns: + out (core._Scope): the created scope. + )DOC", py::return_value_policy::reference); //! @note: Be careful! PyBind will return std::string as an unicode, not @@ -789,11 +864,13 @@ All parameter, weight, gradient are variables in Paddle. self[i].ShareDataWith(t); self[i].set_lod(t.lod()); }) - .def("append", [](LoDTensorArray &self, const LoDTensor &t) { - self.emplace_back(); - self.back().ShareDataWith(t); - self.back().set_lod(t.lod()); - }); + .def("append", + [](LoDTensorArray &self, const LoDTensor &t) { + self.emplace_back(); + self.back().ShareDataWith(t); + self.back().set_lod(t.lod()); + }, + py::arg("tensor"), "Append a LoDensor to LoDTensorArray."); m.def("IsInplace", [](std::string op) -> bool { return operators::IsInplace(op); }); From e6ff5498494134c0e5351450da7005c6da31ab5d Mon Sep 17 00:00:00 2001 From: sneaxiy Date: Mon, 18 Feb 2019 07:56:45 +0000 Subject: [PATCH 060/117] small fix doc test=release/1.3 --- paddle/fluid/pybind/pybind.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index a3a3872087..c50c38160e 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -399,9 +399,9 @@ PYBIND11_MODULE(core, m) { py::arg("recursive_sequence_lengths"), R"DOC( Set LoD of the LoDTensor according to recursive sequence length. - For example, if recursive_sequence_lengths=[2, 3], meaning that + For example, if recursive_sequence_lengths=[[2, 3]], meaning that there are two sequences with length 2 and 3 respectively, the - corresponding lod would be [0, 2, 2+3], i.e, [0, 2, 5]. + corresponding lod would be [[0, 2, 2+3]], i.e, [[0, 2, 5]]. Args: recursive_sequence_lengths (List[List[int]]): sequence lengths. From 3d0610b59bed21a79c1c93bf8083e8a083f17848 Mon Sep 17 00:00:00 2001 From: sneaxiy Date: Mon, 18 Feb 2019 08:03:59 +0000 Subject: [PATCH 061/117] fix data doc test=develop --- python/paddle/fluid/layers/io.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index b88be66906..a9b391fd53 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -56,7 +56,10 @@ def data(name, Args: name(str): The name/alias of the function - shape(list): Tuple declaring the shape. + shape(list): Tuple declaring the shape. If :code:`append_batch_size` is + True and there is no -1 inside :code:`shape`, it should be + considered as the shape of the each sample. Otherwise, it + should be considered as the shape of the batched data. append_batch_size(bool): 1. If true, it prepends -1 to the shape. For example if shape=[1], the resulting shape is [-1, 1]. From 56a5039e24ba581602185841fff970d89ab6e177 Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Tue, 19 Feb 2019 11:20:21 +0800 Subject: [PATCH 062/117] Correct the doc in Python API (#15725) * Correct the comment in control_flow.py. * Correct the argument list of ops. test=develop * Update API.spec. test=develop * Skip op_callstack attr for all op apis. test=develop * Remove use_mkldnn and is_test from python api. test=develop * Remove use_mkldnn and is_test from op_proto_maker and hard-coding them in python when generating doc string. test=develop --- paddle/fluid/API.spec | 2 +- .../fluid/operators/controlflow/compare_op.cc | 10 +++++----- python/paddle/fluid/framework.py | 3 ++- python/paddle/fluid/layers/control_flow.py | 20 ++++++++----------- .../fluid/layers/layer_function_generator.py | 8 ++++++-- python/paddle/fluid/layers/ops.py | 4 ++-- 6 files changed, 24 insertions(+), 23 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index df961be911..a9fc840e8e 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -261,7 +261,7 @@ paddle.fluid.layers.increment ArgSpec(args=['x', 'value', 'in_place'], varargs=N paddle.fluid.layers.array_write ArgSpec(args=['x', 'i', 'array'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.create_array ArgSpec(args=['dtype'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.less_than ArgSpec(args=['x', 'y', 'force_cpu', 'cond'], varargs=None, keywords='ignored', defaults=(None, None)) -paddle.fluid.layers.equal ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords='ignored', defaults=(None,)) +paddle.fluid.layers.equal ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.array_read ArgSpec(args=['array', 'i'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.array_length ArgSpec(args=['array'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.IfElse.__init__ ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,)) diff --git a/paddle/fluid/operators/controlflow/compare_op.cc b/paddle/fluid/operators/controlflow/compare_op.cc index 688457d4a7..5d3f9b43f8 100644 --- a/paddle/fluid/operators/controlflow/compare_op.cc +++ b/paddle/fluid/operators/controlflow/compare_op.cc @@ -51,6 +51,11 @@ class CompareOpProtoMaker : public framework::OpProtoAndCheckerMaker { comment.type)); AddInput("Y", string::Sprintf("the right hand operand of %s operator", comment.type)); + AddAttr( + "axis", + "The start dimension index for broadcasting Y onto X. [default -1]") + .SetDefault(-1) + .EqualGreaterThan(-1); AddAttr("force_cpu", "Force fill output variable to cpu " "memory. Otherwise, fill output variable to the running " @@ -64,11 +69,6 @@ N-dim tensor. X and Y could be any type. The each element of the Out tensor is calculated by $%s$ )DOC", comment.equation)); - AddAttr( - "axis", - "The start dimension index for broadcasting Y onto X. [default -1]") - .SetDefault(-1) - .EqualGreaterThan(-1); } }; diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index ef304b1110..15367c724e 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -557,7 +557,8 @@ class OpProtoHolder(object): return { core.op_proto_and_checker_maker.kOpRoleAttrName(), core.op_proto_and_checker_maker.kOpRoleVarAttrName(), - core.op_proto_and_checker_maker.kOpNameScopeAttrName() + core.op_proto_and_checker_maker.kOpNameScopeAttrName(), + core.op_proto_and_checker_maker.kOpCreationCallstackAttrName() } diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 3a6753b01f..539c9675b2 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -506,9 +506,9 @@ class While(object): while loop control flow. Args: - cond (Variable): condition used to compare. + cond(Variable): condition used to compare. is_test(bool): A flag indicating whether execution is in test phase. - name (str): The name of this layer. + name(str): The name of this layer. Examples: .. code-block:: python @@ -589,7 +589,8 @@ class While(object): def lod_rank_table(x, level=0): - """LoD Rank Table Operator. Given an input variable **x** and a level number + """ + LoD Rank Table Operator. Given an input variable **x** and a level number of LoD, this layer creates a LodRankTable object. A LoDRankTable object contains a list of bi-element tuples. Each tuple consists of an index and a length, both of which are int type. Refering to specified level of LoD, @@ -883,10 +884,8 @@ def less_than(x, y, force_cpu=None, cond=None, **ignored): return cond -def equal(x, y, cond=None, **ignored): +def equal(x, y, cond=None): """ - **equal** - This layer returns the truth value of :math:`x == y` elementwise. Args: @@ -1458,7 +1457,6 @@ class DynamicRNN(object): Returns: The current timestep in the input sequence. - """ self._assert_in_rnn_block_("step_input") if not isinstance(x, Variable): @@ -1535,8 +1533,7 @@ class DynamicRNN(object): @signature_safe_contextmanager def block(self): """ - The block for user to define operators in RNN. See the class docstring - for more details. + The block for user to define operators in RNN. """ if self.status != DynamicRNN.BEFORE_RNN: raise ValueError("rnn.block() can only be invoke once") @@ -1640,8 +1637,7 @@ class DynamicRNN(object): dtype(str|numpy.dtype): The data type of the initialized memory. Returns: - the memory variable. - + The memory variable. """ self._assert_in_rnn_block_('memory') self._init_zero_idx_() @@ -1740,7 +1736,7 @@ class DynamicRNN(object): def output(self, *outputs): """ - mark the RNN output variables. + Mark the RNN output variables. Args: outputs: The output variables. diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 09b1b30216..da6c241004 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -24,7 +24,7 @@ from ..framework import OpProtoHolder, Variable, core, convert_np_dtype_to_dtype from ..layer_helper import LayerHelper __all__ = [ - 'deprecated', 'generate_layer_fn', 'generate_layer_fn_noattr', 'autodoc', + 'deprecated', 'generate_layer_fn', 'generate_activation_fn', 'autodoc', 'templatedoc' ] @@ -89,6 +89,9 @@ def _generate_doc_string_(op_proto, additional_args_lines=None): buf.write('\n') skip_attrs = OpProtoHolder.generated_op_attr_names() + # attr use_mkldnn and is_test also should not be visible to users. + skip_attrs.add("use_mkldnn") + skip_attrs.add("is_test") for each_attr in op_proto.attrs: if each_attr.name in skip_attrs: @@ -226,7 +229,7 @@ def generate_layer_fn(op_type): return func -def generate_layer_fn_noattr(op_type): +def generate_activation_fn(op_type): """Register the Python layer for an Operator without Attribute. Args: @@ -246,6 +249,7 @@ def generate_layer_fn_noattr(op_type): func.__name__ = op_type func.__doc__ = _generate_doc_string_(op_proto) + return func diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 3dcf9dc069..6b4dc4ac89 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -14,7 +14,7 @@ from __future__ import print_function import os -from .layer_function_generator import generate_layer_fn, generate_layer_fn_noattr +from .layer_function_generator import generate_layer_fn, generate_activation_fn from .. import core from ..framework import convert_np_dtype_to_dtype_ @@ -53,7 +53,7 @@ globals()['_elementwise_div'] = generate_layer_fn('elementwise_div') __all__ += __activations_noattr__ for _OP in set(__activations_noattr__): - globals()[_OP] = generate_layer_fn_noattr(_OP) + globals()[_OP] = generate_activation_fn(_OP) __all__ += ["uniform_random"] From 07ee40c6e9496025b695721833575addc1e5ff26 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Tue, 19 Feb 2019 11:22:04 +0800 Subject: [PATCH 063/117] fix default value. test=develop --- python/paddle/fluid/compiler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index b24cec044f..403ceda87b 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -178,9 +178,9 @@ class CompiledProgram(object): # FIXME(dzhwinter): enable_inplace should be after memory_optimize # if turn on python memory optimize, turn off the inplace_pass. if self._build_strategy.memory_optimize is None: - self._build_strategy.memory_optimize = False if main._is_mem_optimized else True + self._build_strategy.memory_optimize = False if self._program._is_mem_optimized else True if self._build_strategy.enable_inplace is None: - self._build_strategy.enable_inplace = False if main._is_mem_optimized else True + self._build_strategy.enable_inplace = False if self._program._is_mem_optimized else True if self._build_strategy.num_trainers > 1 and trainers_endpoints: assert self._build_strategy.num_trainers == len( From b20a21e299718e0e68e717f9ae98c6cee39d4171 Mon Sep 17 00:00:00 2001 From: liuwei1031 Date: Tue, 19 Feb 2019 03:51:35 +0000 Subject: [PATCH 064/117] fix comments of PR 15529, test=develop --- paddle/fluid/memory/allocation/legacy_allocator.cc | 6 +++--- paddle/fluid/memory/allocation/legacy_allocator.h | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/memory/allocation/legacy_allocator.cc b/paddle/fluid/memory/allocation/legacy_allocator.cc index e983ae327d..cd1c0b6d1a 100644 --- a/paddle/fluid/memory/allocation/legacy_allocator.cc +++ b/paddle/fluid/memory/allocation/legacy_allocator.cc @@ -356,7 +356,7 @@ void MemInfo::Minus(const size_t &size) { usage_ -= size; } -uint64_t MemInfo::GetPeakUsage() { return peak_usage_; } +uint64_t MemInfo::GetPeakUsage() const { return peak_usage_; } LegacyMemMonitor::~LegacyMemMonitor() { for (auto &item : gpu_mem_info_) delete item.second; @@ -380,10 +380,10 @@ void LegacyMemMonitor::Minus(const int &device, const size_t &size) { gpu_mem_info_[device]->Minus(size); } -uint64_t LegacyMemMonitor::GetMemUsage(const int &device) { +uint64_t LegacyMemMonitor::GetMemUsage(const int &device) const { return gpu_mem_info_.find(device) == gpu_mem_info_.end() ? 0 - : gpu_mem_info_[device]->GetPeakUsage(); + : gpu_mem_info_.find(device)->second->GetPeakUsage(); } void LegacyMemMonitor::PrintMemUsage() { diff --git a/paddle/fluid/memory/allocation/legacy_allocator.h b/paddle/fluid/memory/allocation/legacy_allocator.h index ccbc8c70d8..d9bdae153d 100644 --- a/paddle/fluid/memory/allocation/legacy_allocator.h +++ b/paddle/fluid/memory/allocation/legacy_allocator.h @@ -27,20 +27,20 @@ namespace allocation { class MemInfo { public: MemInfo() : usage_(0), peak_usage_(0) {} - MemInfo(const MemInfo &) = delete; - MemInfo &operator=(const MemInfo &) = delete; // return a flag to indicate current operation will create a peak point or not bool Add(const size_t &); void Minus(const size_t &); - uint64_t GetPeakUsage(); + uint64_t GetPeakUsage() const; private: /* current memory usage*/ uint64_t usage_; uint64_t peak_usage_; std::mutex mutex_; + + DISABLE_COPY_AND_ASSIGN(MemInfo); }; class LegacyMemMonitor { @@ -56,11 +56,11 @@ class LegacyMemMonitor { void Add(const int &, const size_t &); void Minus(const int &, const size_t &); - uint64_t GetMemUsage(const int &); + uint64_t GetMemUsage(const int &) const; void PrintMemUsage(); - protected: + private: MemUsage gpu_mem_info_; }; From df23a6f894e74975448318f34a70120e05f96a85 Mon Sep 17 00:00:00 2001 From: mozga-intel Date: Tue, 19 Feb 2019 05:05:27 +0100 Subject: [PATCH 065/117] Enable cross_entropy operator for a ngraph engine (#15674) * Enable cross_entropy operator for a ngraph engine test=develop * Update tests test=develop * Added PADDLE_ENFORCE for the batch_norm operator test=develop * Update the message about which format are supported right now test=develop --- .../fluid/operators/ngraph/ngraph_bridge.cc | 2 + paddle/fluid/operators/ngraph/ngraph_ops.h | 1 + .../operators/ngraph/ops/batch_norm_op.h | 7 + .../operators/ngraph/ops/cross_entropy_op.h | 145 +++++++++ .../ngraph/test_cross_entropy_ngraph_op.py | 275 ++++++++++++++++++ 5 files changed, 430 insertions(+) create mode 100644 paddle/fluid/operators/ngraph/ops/cross_entropy_op.h create mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py diff --git a/paddle/fluid/operators/ngraph/ngraph_bridge.cc b/paddle/fluid/operators/ngraph/ngraph_bridge.cc index 08d72a5b39..36a2efc0ce 100644 --- a/paddle/fluid/operators/ngraph/ngraph_bridge.cc +++ b/paddle/fluid/operators/ngraph/ngraph_bridge.cc @@ -36,6 +36,8 @@ std::map("epsilon"); const float momentum = op_attrs.Get("momentum"); + PADDLE_ENFORCE( + data_layout == "NHWC" || data_layout == "NCHW" || data_layout == "NC", + "The BatchNorm operator only supports NHWC/NCHW/NC data format"); + if (data_layout == "NHWC") { x = paddle::platform::Nhwc2Nchw(x); } @@ -110,6 +114,9 @@ void BuildBatchNormGradNode( "BN grap input size needs to be 2 or 4"); PADDLE_ENFORCE_EQ(x_shape.size(), dy_shape.size(), "BN grap input and delta size needs to be equal"); + PADDLE_ENFORCE( + data_layout == "NHWC" || data_layout == "NCHW" || data_layout == "NC", + "The BatchNorm operator only supports NHWC/NCHW/NC data format"); if (x_shape.size() == 2) { x = std::make_shared( diff --git a/paddle/fluid/operators/ngraph/ops/cross_entropy_op.h b/paddle/fluid/operators/ngraph/ops/cross_entropy_op.h new file mode 100644 index 0000000000..f88a2cb941 --- /dev/null +++ b/paddle/fluid/operators/ngraph/ops/cross_entropy_op.h @@ -0,0 +1,145 @@ +/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include + +#include "ngraph/ngraph.hpp" +#include "paddle/fluid/platform/ngraph_helper.h" + +namespace paddle { +namespace operators { +namespace ngraphs { + +void BuildCrossEntropyNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map); + auto label_shape = label->get_shape(); + auto x_shape = x->get_shape(); + auto label_rank = label_shape.size(); + auto x_rank = x_shape.size(); + std::shared_ptr x_2d = x, label_2d = label; + auto label_2d_shape = label_shape, x_2d_shape = x_shape; + + if (label_rank > 2) { + label_2d_shape = paddle::platform::FlattenTo2d(label_shape, label_rank - 1); + label_2d = paddle::platform::NgReshaper(label, label_2d_shape); + } + if (x_rank > 2) { + x_2d_shape = paddle::platform::FlattenTo2d(x_shape, x_rank - 1); + x_2d = paddle::platform::NgReshaper(x, x_2d_shape); + } + + auto batch_size = x_2d_shape.at(0); + auto op_attrs = paddle::framework::AttrReader(op->Attrs()); + const bool is_soft_label = op_attrs.Get("soft_label"); + + std::shared_ptr node_1_hot = label_2d; + if (!is_soft_label) { + auto label_1d = paddle::platform::NgReshaper( + label_2d, ngraph::Shape{label_2d_shape.at(0)}); + node_1_hot = std::make_shared(label_1d, x_2d_shape, 1); + } + if (x->get_element_type() != node_1_hot->get_element_type()) { + node_1_hot = std::make_shared(node_1_hot, + x->get_element_type()); + } + + auto node_log = std::make_shared(x_2d); + auto high_clip = ngraph::op::Constant::create(node_log->get_element_type(), + node_log->get_shape(), {1e20}); + auto low_clip = ngraph::op::Constant::create(node_log->get_element_type(), + node_log->get_shape(), {-1e20}); + auto node_min = std::make_shared(node_log, high_clip); + auto node_max = std::make_shared(node_min, low_clip); + auto node_mul = node_1_hot * node_log; + auto node_sum = + std::make_shared(node_mul, ngraph::AxisSet{1}); + auto node_neg = std::make_shared(node_sum); + auto xe = + paddle::platform::NgReshaper(node_neg, ngraph::Shape{batch_size, 1}); + + if (!is_soft_label) { + auto ignore_index = op_attrs.Get("ignore_index"); + auto ignore_node = ngraph::op::Constant::create( + label->get_element_type(), label_2d_shape, {ignore_index}); + auto not_equal_node = + std::make_shared(label_2d, ignore_node); + auto mask = std::make_shared(not_equal_node, + xe->get_element_type()); + xe = xe * mask; + } + + paddle::platform::SetOutputNode(op, "Y", xe, ngb_node_map); +} + +void BuildCrossEntropyGradNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto op_attrs = paddle::framework::AttrReader(op->Attrs()); + const bool is_soft_label = op_attrs.Get("soft_label"); + + auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); + auto label = paddle::platform::GetInputNode(op, "Label", ngb_node_map); + auto dy = paddle::platform::GetInputNode(op, "Y@GRAD", ngb_node_map); + auto x_shape = x->get_shape(); + auto rank = x_shape.size(); + + std::shared_ptr mask; + if (!is_soft_label) { + auto label_shape = label->get_shape(); + label_shape.pop_back(); + label = paddle::platform::NgReshaper(label, label_shape); + + auto ignore_index = op_attrs.Get("ignore_index"); + auto ignore_node = ngraph::op::Constant::create( + label->get_element_type(), label_shape, {ignore_index}); + auto not_equal_node = + std::make_shared(label, ignore_node); + mask = std::make_shared(not_equal_node, + x->get_element_type()); + mask = std::make_shared(mask, x_shape, + ngraph::AxisSet{rank - 1}); + + label = std::make_shared(label, x_shape, rank - 1); + } + + auto dy_shape = dy->get_shape(); + dy_shape.pop_back(); + auto dy_reshape = paddle::platform::NgReshaper(dy, dy_shape); + auto dy_bcast = std::make_shared( + dy_reshape, x_shape, ngraph::AxisSet{rank - 1}); + if (x->get_element_type() != label->get_element_type()) { + label = std::make_shared(label, x->get_element_type()); + } + + auto xe_grad = -label * dy_bcast / x; + + if (!is_soft_label) { + xe_grad = xe_grad * mask; + } + + paddle::platform::SetOutputNode(op, "X@GRAD", xe_grad, ngb_node_map); +} +} // namespace ngraphs +} // namespace operators +} // namespace paddle diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py new file mode 100644 index 0000000000..9a185eb97c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py @@ -0,0 +1,275 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import paddle.fluid.core as core +from paddle.fluid.tests.unittests.op_test import OpTest, randomize_probability + + +class TestCrossEntropyOp(OpTest): + """Test cross-entropy with discrete one-hot labels. + """ + + def setUp(self): + self.op_type = "cross_entropy" + self.soft_label = False + self.ignore_index = -100 + self.dtype = np.float64 + self.batch_size = 30 + self.class_num = 10 + self._cpu_only = True + + self.init_dtype_type() + self.init_attr_type() + self.init_bs_class_num() + self.init_x() + self.init_label() + self.get_cross_entropy() + + self.inputs = {"X": self.x, "Label": self.label} + self.outputs = {"Y": self.cross_entropy} + self.attrs = { + "soft_label": self.soft_label, + "ignore_index": self.ignore_index + } + + def init_x(self): + self.x = randomize_probability( + self.batch_size, self.class_num, dtype=self.dtype) + + def init_label(self): + self.label = np.random.randint( + 0, self.class_num, (self.batch_size, 1), dtype="int64") + + def get_cross_entropy(self): + self.cross_entropy = np.asmatrix( + [[-np.log(self.x[i][self.label[i][0]])] + for i in range(self.x.shape[0])], + dtype="float64") + + def init_attr_type(self): + pass + + def init_dtype_type(self): + pass + + def init_bs_class_num(self): + pass + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Y", numeric_grad_delta=0.001) + + +class TestCrossEntropyOp2(TestCrossEntropyOp): + """Test cross-entropy with vectorized soft labels. + """ + + def init_label(self): + self.label = np.random.uniform( + 0.1, 1.0, [self.batch_size, self.class_num]).astype(self.dtype) + self.label /= self.label.sum(axis=1, keepdims=True) + + def get_cross_entropy(self): + self.cross_entropy = (-self.label * np.log(self.x)).sum( + axis=1, keepdims=True).astype(self.dtype) + + def init_attr_type(self): + self.soft_label = True + + def init_dtype_type(self): + self.dtype = np.float32 + + def init_bs_class_num(self): + self.batch_size = 5 + self.class_num = 37 + + def test_check_grad(self): + self.check_grad( + ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) + + +class TestCrossEntropyOp3(TestCrossEntropyOp): + """Test cross-entropy with vectorized one-hot representation of labels. + """ + + def init_label(self): + self.label_index = np.random.randint(0, self.class_num, + (self.batch_size)) + self.label = np.zeros(self.x.shape).astype(self.dtype) + self.label[np.arange(self.batch_size), self.label_index] = 1 + + def get_cross_entropy(self): + self.cross_entropy = np.asmatrix( + [[-np.log(self.x[i][self.label_index[i]])] + for i in range(self.x.shape[0])]).astype(self.dtype) + + def init_attr_type(self): + self.soft_label = True + + def init_dtype_type(self): + self.dtype = np.float32 + + def init_bs_class_num(self): + self.batch_size = 5 + self.class_num = 17 + + def test_check_grad(self): + self.check_grad( + ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) + + +class TestCrossEntropyOp4(TestCrossEntropyOp): + """Test high rank tensor cross-entropy with discrete one-hot labels. + """ + + def init_x(self): + self.shape = [10, 2, 4] + self.ins_num = np.prod(np.array(self.shape)) + self.X_2d = randomize_probability(self.ins_num, + self.class_num).astype(self.dtype) + self.x = self.X_2d.reshape(self.shape + [self.class_num]) + + def init_label(self): + self.label_2d = np.random.randint( + 0, self.class_num, (self.ins_num, 1), dtype="int64") + self.label = self.label_2d.reshape(self.shape + [1]) + + def get_cross_entropy(self): + cross_entropy_2d = np.asmatrix( + [[-np.log(self.X_2d[i][self.label_2d[i][0]])] + for i in range(self.X_2d.shape[0])]).astype(self.dtype) + self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape + + [1]) + + def init_attr_type(self): + self.soft_label = False + + def init_dtype_type(self): + self.dtype = np.float64 + + def init_bs_class_num(self): + self.class_num = 10 + + +class TestCrossEntropyOp5(TestCrossEntropyOp): + """Test high rank tensor cross-entropy with vectorized soft labels. + """ + + def init_x(self): + self.shape = [4, 3] + self.ins_num = np.prod(np.array(self.shape)) + self.X_2d = randomize_probability(self.ins_num, + self.class_num).astype(self.dtype) + self.x = self.X_2d.reshape(self.shape + [self.class_num]) + + def init_label(self): + self.label_2d = np.random.uniform( + 0.1, 1.0, [self.ins_num, self.class_num]).astype(self.dtype) + self.label_2d /= self.label_2d.sum(axis=1, keepdims=True) + self.label = self.label_2d.reshape(self.shape + [self.class_num]) + + def get_cross_entropy(self): + cross_entropy_2d = (-self.label_2d * np.log(self.X_2d)).sum( + axis=1, keepdims=True).astype(self.dtype) + self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape + + [1]) + + def init_attr_type(self): + self.soft_label = True + + def init_dtype_type(self): + self.dtype = np.float32 + + def init_bs_class_num(self): + self.class_num = 37 + + def test_check_grad(self): + self.check_grad( + ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) + + +class TestCrossEntropyOp6(TestCrossEntropyOp): + """Test high rank tensor cross-entropy with vectorized one-hot representation of labels. + """ + + def init_x(self): + self.shape = [4, 3, 2] + self.ins_num = np.prod(np.array(self.shape)) + self.X_2d = randomize_probability(self.ins_num, + self.class_num).astype(self.dtype) + self.x = self.X_2d.reshape(self.shape + [self.class_num]) + + def init_label(self): + self.label_index_2d = np.random.randint( + 0, self.class_num, (self.ins_num), dtype="int64") + label_2d = np.zeros(self.X_2d.shape) + label_2d[np.arange(self.ins_num), self.label_index_2d] = 1 + self.label = label_2d.reshape(self.shape + [self.class_num]).astype( + self.dtype) + + def get_cross_entropy(self): + cross_entropy_2d = np.asmatrix( + [[-np.log(self.X_2d[i][self.label_index_2d[i]])] + for i in range(self.X_2d.shape[0])]) + self.cross_entropy = np.array(cross_entropy_2d).reshape( + self.shape + [1]).astype(self.dtype) + + def init_attr_type(self): + self.soft_label = True + + def init_dtype_type(self): + self.dtype = np.float32 + + def init_bs_class_num(self): + self.class_num = 17 + + def test_check_grad(self): + self.check_grad( + ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) + + +class TestCrossEntropyOp7(TestCrossEntropyOp): + """Test cross-entropy with ignore index. + """ + + def init_label(self): + self.label = np.random.randint( + 0, self.class_num, (self.batch_size, 1), dtype="int64") + + def get_cross_entropy(self): + self.cross_entropy = np.asmatrix( + [[-np.log(self.x[i][self.label[i][0]])] + if self.label[i][0] != self.ignore_index else [0] + for i in range(self.x.shape[0])]).astype(self.dtype) + + def init_attr_type(self): + self.soft_label = False + self.ignore_index = 3 + + def init_dtype_type(self): + self.dtype = np.float64 + + def init_bs_class_num(self): + self.batch_size = 30 + self.class_num = 10 + + +if __name__ == "__main__": + unittest.main() From 9ae764c11d2320be45274c5159b4bc31877b7346 Mon Sep 17 00:00:00 2001 From: chengduozh Date: Tue, 19 Feb 2019 12:37:25 +0800 Subject: [PATCH 066/117] fix doc test=develop --- python/paddle/fluid/layers/nn.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d72921dc00..1a7d076835 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -8744,18 +8744,17 @@ def slice(input, axes, starts, ends): return out -@templatedoc() def shape(input): """ **Shape Layer** - Return the shape of the input. + Get the shape of the input. Args: input (Variable): The input variable. Returns: - out (Variable): The shape of the input variable. + Variable: The shape of the input variable. Examples: .. code-block:: python From 4c7b6e2e6762ba279741964d67dbb057045d43ef Mon Sep 17 00:00:00 2001 From: liuwei1031 Date: Tue, 19 Feb 2019 05:23:38 +0000 Subject: [PATCH 067/117] fix comment, test=develop --- paddle/fluid/memory/allocation/legacy_allocator.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/memory/allocation/legacy_allocator.cc b/paddle/fluid/memory/allocation/legacy_allocator.cc index cd1c0b6d1a..1936f9d4cd 100644 --- a/paddle/fluid/memory/allocation/legacy_allocator.cc +++ b/paddle/fluid/memory/allocation/legacy_allocator.cc @@ -383,7 +383,7 @@ void LegacyMemMonitor::Minus(const int &device, const size_t &size) { uint64_t LegacyMemMonitor::GetMemUsage(const int &device) const { return gpu_mem_info_.find(device) == gpu_mem_info_.end() ? 0 - : gpu_mem_info_.find(device)->second->GetPeakUsage(); + : gpu_mem_info_.at(device)->GetPeakUsage(); } void LegacyMemMonitor::PrintMemUsage() { From d5090c892d609bf1d394d3c755cc4bafb80ba6f7 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Tue, 19 Feb 2019 15:22:25 +0800 Subject: [PATCH 068/117] polish code test=develop --- paddle/fluid/framework/details/build_strategy.cc | 2 +- .../details/multi_devices_graph_pass.cc | 16 +++++++--------- .../details/parallel_ssa_graph_executor.cc | 3 ++- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index 45c2c73415..3a5e41ef3c 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -34,7 +34,7 @@ namespace details { static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) { // Should fix the allreduce op order if scheduling // them in multiple threads or processes to avoid hang. - // NOTE: ParallelExecutor would execute this pass on each graph, so + // NOTE: ParallelGraph would execute this pass on each graph, so // don't need to append it here. return (!strategy.enable_sequential_execution_ && strategy.num_trainers_ > 1) && diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_pass.cc index 27bc771814..3c0a8d7020 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.cc @@ -389,8 +389,8 @@ void MultiDevSSAGraphBuilderBase::CreateAllReduceOp( OpHandleBase *op_handle = nullptr; auto append_allreduce_op = [&]( - std::vector &scopes, - std::vector &places) -> OpHandleBase * { + const std::vector &scopes, + const std::vector &places) -> OpHandleBase * { #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) result->Get(kGraphOps).emplace_back(new AllReduceOpHandle( result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), @@ -407,13 +407,11 @@ void MultiDevSSAGraphBuilderBase::CreateAllReduceOp( op_handle = append_allreduce_op(local_scopes_, places_); for (size_t i = 0; i < places_.size(); ++i) { - auto p = places_[i]; - std::vector ss{local_scopes_[i]}; - std::vector ps{p}; - if (strategy_.enable_parallel_graph_) - op_handle = append_allreduce_op(ss, ps); + if (strategy_.enable_parallel_graph_) { + op_handle = append_allreduce_op({local_scopes_[i]}, {places_[i]}); + } - SetCommunicationContext(op_handle, p); + SetCommunicationContext(op_handle, places_[i]); auto &vars = result->Get(kGraphVars)[i][og]; PADDLE_ENFORCE(!vars.empty()); auto &prev_grad = vars.back(); @@ -421,7 +419,7 @@ void MultiDevSSAGraphBuilderBase::CreateAllReduceOp( auto var = new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable), - vars.size(), i, og, p); + vars.size(), i, og, places_[i]); vars.emplace_back(var); op_handle->AddOutput(var); } diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc index c36618016b..3740b795fa 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc @@ -32,8 +32,9 @@ ParallelSSAGraphExecutor::SeparateMultiDevicesGraph( g->Set(kGraphDepVars, new GraphDepVars); g->Set(kGraphOps, new GraphOps); } + auto op_handles = ir::FilterByNodeWrapper(*graph); - for (auto &op : graph->Get(kGraphOps)) { + for (auto &op : op_handles) { auto &dev_ctx = op->DeviceContext(); auto &p = dev_ctx.begin()->first; int dev_id = boost::get(p).device; From 209b35576237ef20e0cc1835bc784e0dea03735a Mon Sep 17 00:00:00 2001 From: sneaxiy Date: Tue, 19 Feb 2019 07:15:51 +0000 Subject: [PATCH 069/117] fix many warning test=develop --- paddle/fluid/platform/device_context.cc | 2 +- paddle/fluid/platform/enforce.h | 62 ++++++++++++++++++++++--- 2 files changed, 56 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index 2493fb71c0..ed0dbdeb13 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -291,7 +291,7 @@ CUDADeviceContext::CUDADeviceContext(CUDAPlace place) if (dynload::HasCUDNN()) { auto local_cudnn_version = cudnn_dso_ver / 100; auto compile_cudnn_version = CUDNN_VERSION / 100; - if (local_cudnn_version < compile_cudnn_version) { + if (local_cudnn_version < static_cast(compile_cudnn_version)) { LOG_FIRST_N(WARNING, 1) << "WARNING: device: " << place_.device << ". The installed Paddle is compiled with CUDNN " diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index d32f9c8667..54ad18a8e4 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -31,6 +31,8 @@ limitations under the License. */ #include #include #include +#include +#include #include "glog/logging.h" #include "paddle/fluid/platform/macros.h" @@ -280,16 +282,62 @@ inline void throw_on_error(ncclResult_t stat, const std::string& msg) { } \ } while (0) -#define __PADDLE_BINARY_COMPARE(__VAL0, __VAL1, __CMP, __INV_CMP, ...) \ +namespace details { +template +inline constexpr bool IsArithmetic() { + return std::is_arithmetic::value; +} + +template +struct TypeConverterImpl { + using Type1 = typename std::common_type::type; + using Type2 = Type1; +}; + +template +struct TypeConverterImpl { + using Type1 = T1; + using Type2 = T2; +}; + +template +struct TypeConverter { + private: + static constexpr bool kIsArithmetic = + IsArithmetic() && IsArithmetic(); + + public: + using Type1 = typename TypeConverterImpl::Type1; + using Type2 = typename TypeConverterImpl::Type2; +}; + +template +using CommonType1 = typename std::add_lvalue_reference< + typename std::add_const::Type1>::type>::type; + +template +using CommonType2 = typename std::add_lvalue_reference< + typename std::add_const::Type2>::type>::type; +} // namespace details + +#define __PADDLE_BINARY_COMPARE(__VAL1, __VAL2, __CMP, __INV_CMP, ...) \ do { \ - auto __cond1__ = (__VAL0); \ - auto __cond2__ = (__VAL1); \ - if (UNLIKELY(!((__cond1__)__CMP(__cond2__)))) { \ + auto __val1 = (__VAL1); \ + auto __val2 = (__VAL2); \ + using __TYPE1__ = decltype(__val1); \ + using __TYPE2__ = decltype(__val2); \ + using __COMMON_TYPE1__ = \ + ::paddle::platform::details::CommonType1<__TYPE1__, __TYPE2__>; \ + using __COMMON_TYPE2__ = \ + ::paddle::platform::details::CommonType2<__TYPE1__, __TYPE2__>; \ + bool __is_not_error = (static_cast<__COMMON_TYPE1__>(__val1))__CMP( \ + static_cast<__COMMON_TYPE2__>(__val2)); \ + if (UNLIKELY(!__is_not_error)) { \ PADDLE_THROW("Enforce failed. Expected %s " #__CMP \ " %s, but received %s:%s " #__INV_CMP " %s:%s.\n%s", \ - #__VAL0, #__VAL1, #__VAL0, \ - ::paddle::string::to_string(__cond1__), #__VAL1, \ - ::paddle::string::to_string(__cond2__), \ + #__VAL1, #__VAL2, #__VAL1, \ + ::paddle::string::to_string(__val1), #__VAL2, \ + ::paddle::string::to_string(__val2), \ ::paddle::string::Sprintf(__VA_ARGS__)); \ } \ } while (0) From 9c92d0304fd34236d0b123fb5def0725596865c3 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Tue, 19 Feb 2019 16:32:56 +0800 Subject: [PATCH 070/117] fix default value. test=develop --- paddle/fluid/framework/details/memory_optimize_pass.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index b35b967c72..93d08649db 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -235,7 +235,9 @@ void MemoryOptimizePass::RenameVarInGraphDesc(const std::string& var, auto* op_desc = op->Op(); op_desc->RenameInput(var, cache_var); op_desc->RenameOutput(var, cache_var); - if (op_desc->Block()->HasVar(var)) op_desc->Block()->RemoveVar(var); + if (op_desc->Block() != nullptr && op_desc->Block()->HasVar(var)) { + op_desc->Block()->RemoveVar(var); + } op_desc->Flush(); } } From 089d262c41a36d9fdd4fd61ecf3fda968fedc71a Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Tue, 19 Feb 2019 16:39:57 +0800 Subject: [PATCH 071/117] fix default value. test=develop --- paddle/fluid/framework/details/memory_optimize_helper.cc | 8 +++++++- paddle/fluid/framework/details/memory_optimize_pass.cc | 3 ++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/framework/details/memory_optimize_helper.cc b/paddle/fluid/framework/details/memory_optimize_helper.cc index 6126c168cc..db4e805bb6 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.cc +++ b/paddle/fluid/framework/details/memory_optimize_helper.cc @@ -129,7 +129,13 @@ size_t NodeSize(const VarDesc& node) { } size_t NodeSize(ir::Node* n) { - auto* desc = FindVarDescInBlock(n); + VarDesc* desc = nullptr; + // some op do not have block pointer + if (n->inputs[0]->Op() != nullptr) { + desc = FindVarDescInBlock(n); + } else { + desc = n->Var(); + } return NodeSize(*desc); } diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index 93d08649db..d45a43d851 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -194,7 +194,8 @@ void MemoryOptimizePass::SubGraphOptimize(OpDesc* op_desc) const { // effect. Because it is a single op in graph. No need to // update the ir nodes. sub_op_desc->Rename(var->Name(), cache->Name()); - if (sub_op_desc->Block()->HasVar(var->Name())) { + if (sub_op_desc->Block() != nullptr && + sub_op_desc->Block()->HasVar(var->Name())) { sub_op_desc->Block()->RemoveVar(var->Name()); } } From 6deb17ed8c5706835caffae94dcfa968d2151acb Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Tue, 19 Feb 2019 16:59:12 +0800 Subject: [PATCH 072/117] fix default value. test=develop --- paddle/fluid/framework/details/memory_optimize_pass.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index d45a43d851..fd02bc4697 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -236,8 +236,12 @@ void MemoryOptimizePass::RenameVarInGraphDesc(const std::string& var, auto* op_desc = op->Op(); op_desc->RenameInput(var, cache_var); op_desc->RenameOutput(var, cache_var); - if (op_desc->Block() != nullptr && op_desc->Block()->HasVar(var)) { + if (op_desc->Block() != nullptr) { op_desc->Block()->RemoveVar(var); + } else { + LOG(WARNING) << "op " << op->Name() << " not know its block." + << "Is the op_desc created without block pointer? " + << "Can not find " << var << " in Block(0)"; } op_desc->Flush(); } From 4b193db14c4862569c345e4cf7970418dbf01073 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Tue, 19 Feb 2019 17:17:36 +0800 Subject: [PATCH 073/117] polish code test=develop --- paddle/fluid/framework/details/multi_devices_graph_pass.cc | 6 ++++++ paddle/fluid/framework/details/multi_devices_helper.h | 6 ------ .../fluid/framework/details/parallel_ssa_graph_executor.cc | 3 --- .../fluid/framework/details/parallel_ssa_graph_executor.h | 2 -- paddle/fluid/framework/ir/graph.h | 3 +++ 5 files changed, 9 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_pass.cc index 3c0a8d7020..7d1e63f368 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.cc @@ -36,6 +36,11 @@ namespace framework { namespace details { namespace { +// TODO(panyx0718): Clean this up as well. +// all operators. NOTE that even we use a vector here, the operators is +// unordered. +typedef std::vector GraphOps; +const char kGraphOps[] = "ops"; bool OpHaveRole(const ir::Node &node, const framework::OpRole &role) { return boost::get( @@ -221,6 +226,7 @@ std::unique_ptr MultiDevSSAGraphBuilderBase::ApplyImpl( * Only variables should be the leaves of graph. */ AddOutputToLeafOps(&result); + result.Erase(kGraphOps); return graph; } diff --git a/paddle/fluid/framework/details/multi_devices_helper.h b/paddle/fluid/framework/details/multi_devices_helper.h index 5331b750eb..9afbb91005 100644 --- a/paddle/fluid/framework/details/multi_devices_helper.h +++ b/paddle/fluid/framework/details/multi_devices_helper.h @@ -44,12 +44,6 @@ const char kGraphVars[] = "vars"; typedef std::unordered_set GraphDepVars; const char kGraphDepVars[] = "dep_vars"; -// TODO(panyx0718): Clean this up as well. -// all operators. NOTE that even we use a vector here, the operators is -// unordered. -typedef std::vector GraphOps; -const char kGraphOps[] = "ops"; - } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc index 3740b795fa..4c8f69c68c 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc @@ -30,7 +30,6 @@ ParallelSSAGraphExecutor::SeparateMultiDevicesGraph( auto &g = graphs.back(); g->Set(kGraphVars, new GraphVars(1UL)); g->Set(kGraphDepVars, new GraphDepVars); - g->Set(kGraphOps, new GraphOps); } auto op_handles = ir::FilterByNodeWrapper(*graph); @@ -38,9 +37,7 @@ ParallelSSAGraphExecutor::SeparateMultiDevicesGraph( auto &dev_ctx = op->DeviceContext(); auto &p = dev_ctx.begin()->first; int dev_id = boost::get(p).device; - auto &dev_ops = graphs[dev_id]->Get(kGraphOps); auto &dev_dummys = graphs[dev_id]->Get(kGraphDepVars); - dev_ops.emplace_back(op); graphs[dev_id]->AddNode(graph->RemoveNode(op->Node()).release()); for (auto &var : op->Inputs()) { diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.h b/paddle/fluid/framework/details/parallel_ssa_graph_executor.h index f59305bf98..1c35d45fdd 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.h @@ -14,8 +14,6 @@ #pragma once -#include -#include #include #include diff --git a/paddle/fluid/framework/ir/graph.h b/paddle/fluid/framework/ir/graph.h index d5b3782f62..296f3b8396 100644 --- a/paddle/fluid/framework/ir/graph.h +++ b/paddle/fluid/framework/ir/graph.h @@ -28,6 +28,9 @@ namespace paddle { namespace framework { namespace details { + +// This attr is not recommended, because the graph should not dependence +// the program once it is built. constexpr char kAllOpDescs[] = "all_op_descs"; } // namespace details From 9b8e0e2f17418f19a52de1db5caa588a1c7c9e9f Mon Sep 17 00:00:00 2001 From: sneaxiy Date: Tue, 19 Feb 2019 18:56:46 +0800 Subject: [PATCH 074/117] fix enforce_test test=develop --- paddle/fluid/platform/enforce_test.cc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/platform/enforce_test.cc b/paddle/fluid/platform/enforce_test.cc index 1091badae5..91ce55820f 100644 --- a/paddle/fluid/platform/enforce_test.cc +++ b/paddle/fluid/platform/enforce_test.cc @@ -235,7 +235,13 @@ TEST(ENFORCE_USER_DEFINED_CLASS, EQ) { TEST(ENFORCE_USER_DEFINED_CLASS, NE) { Dims a{{1, 2, 3, 4}}, b{{5, 6, 7, 8}}; - ASSERT_THROW(PADDLE_ENFORCE_EQ(a, b), paddle::platform::EnforceNotMet); + bool caught_exception = false; + try { + PADDLE_ENFORCE_EQ(a, b); + } catch (paddle::platform::EnforceNotMet&) { + caught_exception = true; + } + EXPECT_TRUE(caught_exception); } TEST(EOF_EXCEPTION, THROW_EOF) { From 794b90c93ffa081c1ed0b6cce1c49f47f18160e3 Mon Sep 17 00:00:00 2001 From: xuezhong Date: Tue, 19 Feb 2019 12:03:45 +0000 Subject: [PATCH 075/117] for backward compatibility --- paddle/fluid/API.spec | 2 +- python/paddle/fluid/optimizer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 03478a932c..a4c426a336 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -427,7 +427,7 @@ paddle.fluid.optimizer.MomentumOptimizer.__init__ ArgSpec(args=['self', 'learnin paddle.fluid.optimizer.MomentumOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) paddle.fluid.optimizer.MomentumOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.MomentumOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) -paddle.fluid.optimizer.AdagradOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'epsilon', 'regularization', 'name', 'initial_accumulator_value'], varargs=None, keywords=None, defaults=(1e-06, None, None, 0.1)) +paddle.fluid.optimizer.AdagradOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'epsilon', 'regularization', 'name', 'initial_accumulator_value'], varargs=None, keywords=None, defaults=(1e-06, None, None, 0.0)) paddle.fluid.optimizer.AdagradOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None) paddle.fluid.optimizer.AdagradOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.optimizer.AdagradOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index ce5e5c4f37..61dedbe93c 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -663,7 +663,7 @@ class AdagradOptimizer(Optimizer): epsilon=1.0e-6, regularization=None, name=None, - initial_accumulator_value=0.1): + initial_accumulator_value=0.0): assert learning_rate is not None assert epsilon is not None super(AdagradOptimizer, self).__init__( From e1c707fe9cee4b9ad15c635b1130b73450983412 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Tue, 19 Feb 2019 21:00:58 +0800 Subject: [PATCH 076/117] fix warnings (#15790) * fix warnings test=develop * fix enforce test test=develop --- .../framework/details/broadcast_op_handle.cc | 2 +- .../details/data_balance_op_handle.cc | 2 +- .../framework/details/fuse_vars_op_handle.cc | 2 +- .../framework/details/reduce_op_handle.cc | 2 +- .../fluid/framework/ir/conv_bn_fuse_pass.cc | 2 +- .../ir/fuse_relu_depthwise_conv_pass.cc | 6 +-- .../framework/ir/graph_pattern_detector.cc | 4 +- paddle/fluid/inference/api/api.cc | 2 +- .../tests/api/analyzer_seq_pool1_tester.cc | 4 +- paddle/fluid/operators/attention_lstm_op.cc | 2 +- .../operators/controlflow/get_places_op.cc | 2 +- paddle/fluid/operators/crf_decoding_op.cc | 4 +- .../detection/anchor_generator_op.cc | 6 +-- paddle/fluid/operators/fc_op.cc | 2 +- .../fused/fused_embedding_seq_pool_op.h | 3 +- .../fused/fusion_repeated_fc_relu_op.cc | 4 +- .../fused/fusion_seqexpand_concat_fc_op.cc | 2 +- .../fused/fusion_seqpool_concat_op.cc | 2 +- .../fused/fusion_squared_mat_sub_op.cc | 2 +- paddle/fluid/operators/layer_norm_op.cc | 4 +- paddle/fluid/operators/linear_chain_crf_op.cc | 8 ++-- .../sequence_ops/sequence_enumerate_op.cc | 4 +- .../sequence_ops/sequence_expand_op.cc | 7 ++-- paddle/fluid/platform/enforce_test.cc | 41 +++++++++---------- 24 files changed, 60 insertions(+), 59 deletions(-) diff --git a/paddle/fluid/framework/details/broadcast_op_handle.cc b/paddle/fluid/framework/details/broadcast_op_handle.cc index 89d626eddd..c42a691be2 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle.cc @@ -30,7 +30,7 @@ void BroadcastOpHandle::RunImpl() { VarHandle *in_var_handle; { auto in_var_handles = DynamicCast(inputs_); - PADDLE_ENFORCE_EQ(in_var_handles.size(), 1, + PADDLE_ENFORCE_EQ(in_var_handles.size(), 1UL, "The number of input should be one."); in_var_handle = in_var_handles[0]; } diff --git a/paddle/fluid/framework/details/data_balance_op_handle.cc b/paddle/fluid/framework/details/data_balance_op_handle.cc index 48dcc52623..c9b52b6820 100644 --- a/paddle/fluid/framework/details/data_balance_op_handle.cc +++ b/paddle/fluid/framework/details/data_balance_op_handle.cc @@ -86,7 +86,7 @@ std::vector> DataBalanceOpHandle::GetBalancePlan( } void DataBalanceOpHandle::RunImpl() { - PADDLE_ENFORCE_GT(places_.size(), 1, + PADDLE_ENFORCE_GT(places_.size(), 1UL, "Data balance can only be enabled when the number of " "places to run larger than 1."); auto in_var_handles = DynamicCast(this->Inputs()); diff --git a/paddle/fluid/framework/details/fuse_vars_op_handle.cc b/paddle/fluid/framework/details/fuse_vars_op_handle.cc index d65b092069..14292c0a5d 100644 --- a/paddle/fluid/framework/details/fuse_vars_op_handle.cc +++ b/paddle/fluid/framework/details/fuse_vars_op_handle.cc @@ -23,7 +23,7 @@ void FuseVarsOpHandle::RunImpl() { auto in_var_handles = DynamicCast(this->Inputs()); auto out_var_handles = DynamicCast(this->Outputs()); - PADDLE_ENFORCE_EQ(in_var_handles.size(), 0); + PADDLE_ENFORCE_EQ(in_var_handles.size(), 0UL); PADDLE_ENFORCE_EQ(out_var_handles.size() - 1, inputs_numel_.size(), ""); auto scope = local_scope_->FindVar(kLocalExecScopeName)->Get(); diff --git a/paddle/fluid/framework/details/reduce_op_handle.cc b/paddle/fluid/framework/details/reduce_op_handle.cc index ee4c8a6ecf..ae76fad450 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.cc +++ b/paddle/fluid/framework/details/reduce_op_handle.cc @@ -153,7 +153,7 @@ void ReduceOpHandle::RunImpl() { { auto out_var_handles = DynamicCast(outputs_); - PADDLE_ENFORCE_EQ(out_var_handles.size(), 1, + PADDLE_ENFORCE_EQ(out_var_handles.size(), 1UL, "The number of output should be one."); out_var_handle = out_var_handles.front(); } diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc index 846a14e365..04765dd144 100644 --- a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc @@ -169,7 +169,7 @@ std::unique_ptr ConvBNFusePass::ApplyImpl( if (has_bias && conv->Op()->Input("Bias").size() > 0) { // reuse existing conv bias node auto conv_bias_names = conv->Op()->Input("Bias"); - PADDLE_ENFORCE_EQ(conv_bias_names.size(), 1); + PADDLE_ENFORCE_EQ(conv_bias_names.size(), 1UL); auto* conv_bias_var = scope->FindVar(conv_bias_names[0]); auto* conv_bias_tensor = conv_bias_var->GetMutable(); PADDLE_ENFORCE_EQ(conv_bias_tensor->dims(), diff --git a/paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.cc b/paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.cc index 0d94008ea8..fe844caed2 100644 --- a/paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.cc +++ b/paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.cc @@ -111,7 +111,7 @@ std::unique_ptr FuseReluDepthwiseConvPass::FuseReluDepthwiseConv( xg_var = subgraph.at(xg)->Var(); } - PADDLE_ENFORCE_EQ(layer_op->Input("Input").size(), 1); + PADDLE_ENFORCE_EQ(layer_op->Input("Input").size(), 1UL); PADDLE_ENFORCE_EQ(layer_op->Input("Input")[0], y_var->Name()); layer_op->SetInput("Input", {x_var->Name()}); subgraph.at(layer)->inputs.push_back(subgraph.at(x)); @@ -119,13 +119,13 @@ std::unique_ptr FuseReluDepthwiseConvPass::FuseReluDepthwiseConv( VLOG(4) << "replace " << y_var->Name() << " -> " << x_var->Name(); if (!only_forward) { - PADDLE_ENFORCE_EQ(layer_g_op->Input("Input").size(), 1); + PADDLE_ENFORCE_EQ(layer_g_op->Input("Input").size(), 1UL); PADDLE_ENFORCE_EQ(layer_g_op->Input("Input")[0], y_var->Name()); layer_g_op->SetInput("Input", {x_var->Name()}); subgraph.at(layer_g)->inputs.push_back(subgraph.at(x)); subgraph.at(x)->outputs.push_back(subgraph.at(layer_g)); - PADDLE_ENFORCE_EQ(layer_g_op->Output(GradVarName("Input")).size(), 1); + PADDLE_ENFORCE_EQ(layer_g_op->Output(GradVarName("Input")).size(), 1UL); PADDLE_ENFORCE_EQ(layer_g_op->Output(GradVarName("Input"))[0], yg_var->Name()); layer_g_op->SetOutput(GradVarName("Input"), {xg_var->Name()}); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 9ea0729e1f..c0c34d186b 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -38,7 +38,7 @@ size_t PDPattern::id_ = 0UL; PDNode *PDPattern::NewNode(const std::string &name) { if (!name.empty()) { - PADDLE_ENFORCE_EQ(node_map_.count(name), 0, + PADDLE_ENFORCE_EQ(node_map_.count(name), 0UL, "PDNode's name should be unique, get duplicate [%s]", name); } @@ -51,7 +51,7 @@ PDNode *PDPattern::NewNode(const std::string &name) { PDNode *PDPattern::NewNode(PDNode::teller_t &&teller, const std::string &name) { if (!name.empty()) { - PADDLE_ENFORCE_EQ(node_map_.count(name), 0, + PADDLE_ENFORCE_EQ(node_map_.count(name), 0UL, "PDNode's name should be unique, get duplicate [%s]", name); } diff --git a/paddle/fluid/inference/api/api.cc b/paddle/fluid/inference/api/api.cc index 6cd18277d6..f83537f064 100644 --- a/paddle/fluid/inference/api/api.cc +++ b/paddle/fluid/inference/api/api.cc @@ -92,7 +92,7 @@ void PaddleBuf::Reset(void *data, size_t length) { void PaddleBuf::Free() { if (memory_owned_ && data_) { - PADDLE_ENFORCE_GT(length_, 0); + PADDLE_ENFORCE_GT(length_, 0UL); free(static_cast(data_)); data_ = nullptr; length_ = 0; diff --git a/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc index dd953e0dcc..bd0059e184 100644 --- a/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_seq_pool1_tester.cc @@ -56,14 +56,14 @@ struct DataRecord { std::vector slot_data; split_to_float(data[1], ' ', &slot_data); std::string name = data[0]; - PADDLE_ENFORCE_EQ(slot_data.size() % 11, 0, + PADDLE_ENFORCE_EQ(slot_data.size() % 11, 0UL, "line %d, %s should be divisible", num_lines, name); datasets[name].emplace_back(std::move(slot_data)); } num_samples = num_lines / num_slots; PADDLE_ENFORCE_EQ(num_samples * num_slots, static_cast(num_lines), "num samples should be divisible"); - PADDLE_ENFORCE_GT(num_samples, 0); + PADDLE_ENFORCE_GT(num_samples, 0UL); } void Prepare(int bs) { diff --git a/paddle/fluid/operators/attention_lstm_op.cc b/paddle/fluid/operators/attention_lstm_op.cc index b6996be4b0..912ec79910 100644 --- a/paddle/fluid/operators/attention_lstm_op.cc +++ b/paddle/fluid/operators/attention_lstm_op.cc @@ -293,7 +293,7 @@ class AttentionLSTMKernel : public framework::OpKernel { int len = x_lod[0][i + 1] - x_lod[0][i]; max_seq_len = max_seq_len < len ? len : max_seq_len; } - PADDLE_ENFORCE_EQ(x_lod.size(), 1, "Input(X)'s lod size must be 1."); + PADDLE_ENFORCE_EQ(x_lod.size(), 1UL, "Input(X)'s lod size must be 1."); PADDLE_ENFORCE_EQ(c0->dims()[0], N, "C0 dims should be %d x %d.", N, D); fc_out->Resize({max_seq_len, 1}); diff --git a/paddle/fluid/operators/controlflow/get_places_op.cc b/paddle/fluid/operators/controlflow/get_places_op.cc index db6ff78256..1a157688f3 100644 --- a/paddle/fluid/operators/controlflow/get_places_op.cc +++ b/paddle/fluid/operators/controlflow/get_places_op.cc @@ -52,7 +52,7 @@ class GetPlacesOp : public framework::OperatorBase { device_count = is_gpu ? CUDADevCount() : std::thread::hardware_concurrency(); } - PADDLE_ENFORCE_NE(device_count, 0, "Cannot indicate %s device count", + PADDLE_ENFORCE_NE(device_count, 0UL, "Cannot indicate %s device count", is_gpu ? "GPU" : "CPU"); auto out_var_name = Output("Out"); diff --git a/paddle/fluid/operators/crf_decoding_op.cc b/paddle/fluid/operators/crf_decoding_op.cc index 81c9e9e543..e053ae5773 100644 --- a/paddle/fluid/operators/crf_decoding_op.cc +++ b/paddle/fluid/operators/crf_decoding_op.cc @@ -84,12 +84,12 @@ class CRFDecodingOp : public framework::OperatorWithKernel { "Output(ViterbiPath) should be not null."); auto emission_dims = ctx->GetInputDim("Emission"); - PADDLE_ENFORCE_EQ(emission_dims.size(), 2UL, + PADDLE_ENFORCE_EQ(emission_dims.size(), 2, "The Input(Emission) should be a 2-D tensor."); PADDLE_ENFORCE(emission_dims[0], "An empty mini-batch is not allowed."); auto transition_dims = ctx->GetInputDim("Transition"); - PADDLE_ENFORCE_EQ(transition_dims.size(), 2UL, + PADDLE_ENFORCE_EQ(transition_dims.size(), 2, "The Input(Transition) should be a 2-D tensor."); PADDLE_ENFORCE_EQ( transition_dims[0] - 2, transition_dims[1], diff --git a/paddle/fluid/operators/detection/anchor_generator_op.cc b/paddle/fluid/operators/detection/anchor_generator_op.cc index f2984d1af2..4a333b559f 100644 --- a/paddle/fluid/operators/detection/anchor_generator_op.cc +++ b/paddle/fluid/operators/detection/anchor_generator_op.cc @@ -85,7 +85,7 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker { " For instance, the anchor size of 64 means the area of this anchor " "equals to 64**2.") .AddCustomChecker([](const std::vector& anchor_sizes) { - PADDLE_ENFORCE_GT(anchor_sizes.size(), 0, + PADDLE_ENFORCE_GT(anchor_sizes.size(), 0UL, "Size of anchor_sizes must be at least 1."); for (size_t i = 0; i < anchor_sizes.size(); ++i) { PADDLE_ENFORCE_GT(anchor_sizes[i], 0.0, @@ -103,7 +103,7 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker { "(vector) List of variances to be used " "in box regression deltas") .AddCustomChecker([](const std::vector& variances) { - PADDLE_ENFORCE_EQ(variances.size(), 4, + PADDLE_ENFORCE_EQ(variances.size(), 4UL, "Must and only provide 4 variance."); for (size_t i = 0; i < variances.size(); ++i) { PADDLE_ENFORCE_GT(variances[i], 0.0, @@ -117,7 +117,7 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker { .SetDefault(std::vector(2, 16.0)) .AddCustomChecker([](const std::vector& stride) { PADDLE_ENFORCE_EQ( - stride.size(), 2, + stride.size(), 2UL, "Must and only provide 2 stride for width and height."); for (size_t i = 0; i < stride.size(); ++i) { PADDLE_ENFORCE_GT(stride[i], 0.0, diff --git a/paddle/fluid/operators/fc_op.cc b/paddle/fluid/operators/fc_op.cc index 38e57a41ed..eb4617a935 100644 --- a/paddle/fluid/operators/fc_op.cc +++ b/paddle/fluid/operators/fc_op.cc @@ -47,7 +47,7 @@ void FCOp::InferShape(framework::InferShapeContext* ctx) const { PADDLE_ENFORCE(in_dims.size() == 2 || in_dims.size() == 4, "Fully Connected input should be 2-D or 4-D tensor."); } - PADDLE_ENFORCE_EQ(w_dims.size(), 2UL, + PADDLE_ENFORCE_EQ(w_dims.size(), 2, "Fully Connected input should be 2-D tensor."); int in_num_col_dims = ctx->Attrs().Get("in_num_col_dims"); PADDLE_ENFORCE_GT( diff --git a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h index 92345b3c0e..33a1b47d15 100644 --- a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h +++ b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.h @@ -47,10 +47,11 @@ struct EmbeddingVSumFunctor { auto *output = output_t->mutable_data(context.GetPlace()); PADDLE_ENFORCE_LE(table_width * idx_width, out_width); + PADDLE_ENFORCE_GT(ids_lod.size(), 1UL); jit::emb_seq_pool_attr_t attr(table_height, table_width, 0, idx_width, out_width, jit::SeqPoolType::kSum); - for (int64_t i = 0; i != ids_lod.size() - 1; ++i) { + for (size_t i = 0; i != ids_lod.size() - 1; ++i) { attr.index_height = ids_lod[i + 1] - ids_lod[i]; auto emb_seqpool = jit::Get, platform::CPUPlace>(attr); diff --git a/paddle/fluid/operators/fused/fusion_repeated_fc_relu_op.cc b/paddle/fluid/operators/fused/fusion_repeated_fc_relu_op.cc index e9e2a3b1f5..8ecdf2ed9d 100644 --- a/paddle/fluid/operators/fused/fusion_repeated_fc_relu_op.cc +++ b/paddle/fluid/operators/fused/fusion_repeated_fc_relu_op.cc @@ -37,7 +37,7 @@ void FusionRepeatedFCReluOp::InferShape( "Output(Out) of FusionRepeatedFCReluOp should not be null."); auto i_dims = ctx->GetInputDim("X"); - PADDLE_ENFORCE_EQ(i_dims.size(), 2UL, "Input shape size should be 2"); + PADDLE_ENFORCE_EQ(i_dims.size(), 2, "Input shape size should be 2"); auto w_dims = ctx->GetInputsDim("W"); auto b_dims = ctx->GetInputsDim("Bias"); @@ -49,7 +49,7 @@ void FusionRepeatedFCReluOp::InferShape( "inpute width should be equal with weight height"); for (size_t i = 1; i < sz; ++i) { - PADDLE_ENFORCE_EQ(w_dims[i].size(), 2UL, + PADDLE_ENFORCE_EQ(w_dims[i].size(), 2, "Every weight shape size should be 2."); PADDLE_ENFORCE_EQ(framework::product(b_dims[i]), w_dims[i][1], "The length of Bias must be equal with w_dims[1]."); diff --git a/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc b/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc index aaef46de0d..d091da5aa8 100644 --- a/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc +++ b/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc @@ -39,7 +39,7 @@ void FusionSeqExpandConcatFCOp::InferShape( auto ins_dims = ctx->GetInputsDim("X"); auto w_dims = ctx->GetInputDim("FCWeight"); // (M0+M1+M2+..) x D - PADDLE_ENFORCE_EQ(w_dims.size(), 2UL, "Input(FCWeight)'s rank must be 2."); + PADDLE_ENFORCE_EQ(w_dims.size(), 2, "Input(FCWeight)'s rank must be 2."); const int D = w_dims[1]; int sum = ins_dims[0][1]; for (size_t i = 1; i < ins_dims.size(); ++i) { diff --git a/paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc b/paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc index b181140db7..d48bdafe0a 100644 --- a/paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc +++ b/paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc @@ -39,7 +39,7 @@ void FusionSeqPoolConcatOp::InferShape( // The output height should be confirmed in Compute, // since input lod is not accessible here. - PADDLE_ENFORCE_EQ(ins_dims[0].size(), 2UL, + PADDLE_ENFORCE_EQ(ins_dims[0].size(), 2, "The dims size of first input should be 2."); ctx->SetOutputDim("Out", {-1, ins_dims[0][axis] * static_cast(n)}); } diff --git a/paddle/fluid/operators/fused/fusion_squared_mat_sub_op.cc b/paddle/fluid/operators/fused/fusion_squared_mat_sub_op.cc index 8c8b079633..8493f4468f 100644 --- a/paddle/fluid/operators/fused/fusion_squared_mat_sub_op.cc +++ b/paddle/fluid/operators/fused/fusion_squared_mat_sub_op.cc @@ -42,7 +42,7 @@ void FusionSquaredMatSubOp::InferShape( auto y_dims = ctx->GetInputDim("Y"); PADDLE_ENFORCE_EQ(x_dims.size(), y_dims.size(), "Input tensors dims size should be equal."); - PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "Input tensors should be a Matrix."); + PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input tensors should be a Matrix."); PADDLE_ENFORCE_EQ(x_dims[1], y_dims[0], "Inputs Matrix should be multiply."); ctx->SetOutputDim("SquaredX", x_dims); diff --git a/paddle/fluid/operators/layer_norm_op.cc b/paddle/fluid/operators/layer_norm_op.cc index f83fe355b8..b9db6daf08 100644 --- a/paddle/fluid/operators/layer_norm_op.cc +++ b/paddle/fluid/operators/layer_norm_op.cc @@ -44,11 +44,11 @@ class LayerNormOp : public framework::OperatorWithKernel { int left = static_cast(matrix_dim[0]); int right = static_cast(matrix_dim[1]); if (ctx->HasInput("Scale")) { - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], right); } if (ctx->HasInput("Bias")) { - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1); PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], right); } diff --git a/paddle/fluid/operators/linear_chain_crf_op.cc b/paddle/fluid/operators/linear_chain_crf_op.cc index 1da14631e3..e17b6cb598 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.cc +++ b/paddle/fluid/operators/linear_chain_crf_op.cc @@ -144,12 +144,12 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { "Output(LogLikelihood) should be not null."); auto emission_dims = ctx->GetInputDim("Emission"); - PADDLE_ENFORCE_EQ(emission_dims.size(), 2UL, + PADDLE_ENFORCE_EQ(emission_dims.size(), 2, "The Input(Emission) should be a 2-D tensor."); PADDLE_ENFORCE(emission_dims[0], "An empty mini-batch is not allowed."); auto transition_dims = ctx->GetInputDim("Transition"); - PADDLE_ENFORCE_EQ(transition_dims.size(), 2UL, + PADDLE_ENFORCE_EQ(transition_dims.size(), 2, "The Input(Transition) should be a 2-D tensor."); PADDLE_ENFORCE_EQ( transition_dims[0] - 2, transition_dims[1], @@ -202,13 +202,13 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel { "Input(LogLikelihood@GRAD) shoudl be not null."); auto emission_exps_dims = ctx->GetInputDim("EmissionExps"); - PADDLE_ENFORCE_EQ(emission_exps_dims.size(), 2UL, + PADDLE_ENFORCE_EQ(emission_exps_dims.size(), 2, "The Input(EmissionExps) should be a 2-D tensor."); PADDLE_ENFORCE(emission_exps_dims[0], "An empty mini-batch is not allowed."); auto transition_exps_dims = ctx->GetInputDim("TransitionExps"); - PADDLE_ENFORCE_EQ(transition_exps_dims.size(), 2UL, + PADDLE_ENFORCE_EQ(transition_exps_dims.size(), 2, "The Input(TransitionExps) should be a 2-D tensor."); PADDLE_ENFORCE_EQ( transition_exps_dims[0] - 2, transition_exps_dims[1], diff --git a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc index 1eebadc2c9..0932211cad 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc @@ -31,10 +31,10 @@ class SequenceEnumerateOp : public framework::OperatorWithKernel { const auto x_dims = ctx->GetInputDim("X"); PADDLE_ENFORCE_EQ( - x_dims.size(), 2UL, + x_dims.size(), 2, "Input(X) of SequenceEnumerate operator's rank should be 2."); PADDLE_ENFORCE_EQ( - x_dims[1], 1UL, + x_dims[1], 1, "Input(X) of SequenceEnumerate operator's 2nd dimension should be 1."); const auto win_size = ctx->Attrs().Get("win_size"); diff --git a/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc index 27e0201bd7..f6c4241530 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc @@ -48,10 +48,10 @@ class SequenceExpandOp : public framework::OperatorWithKernel { auto& x_lod = x_var->Get().lod(); auto& y_lod = y_var->Get().lod(); - PADDLE_ENFORCE_LE(x_lod.size(), 1, + PADDLE_ENFORCE_LE(x_lod.size(), 1UL, "Level number of Input(X)'s lod should not be " "greater than 1."); - PADDLE_ENFORCE_GT(y_lod.size(), 0, + PADDLE_ENFORCE_GT(y_lod.size(), 0UL, "Level number of Input(Y)'s lod should be " "greater than 0."); PADDLE_ENFORCE( @@ -69,7 +69,8 @@ class SequenceExpandOp : public framework::OperatorWithKernel { "size of Input(X)'s first level lod should be equal to " "size of Input(Y)'s referred level lod."); } else { - PADDLE_ENFORCE_EQ(x_dims[0], y_lod[ref_level].size() - 1, + PADDLE_ENFORCE_EQ(x_dims[0], + static_cast(y_lod[ref_level].size()) - 1, "When Input(X)'s lod is null, the dims[0] of " "Input(X) should match the " "size of Input(Y)'s referred level lod."); diff --git a/paddle/fluid/platform/enforce_test.cc b/paddle/fluid/platform/enforce_test.cc index 1091badae5..f235932225 100644 --- a/paddle/fluid/platform/enforce_test.cc +++ b/paddle/fluid/platform/enforce_test.cc @@ -118,59 +118,58 @@ TEST(ENFORCE_GT, OK) { PADDLE_ENFORCE_GT(2, 1); } TEST(ENFORCE_GT, FAIL) { bool caught_exception = false; try { - PADDLE_ENFORCE_GT(1, 2UL); + PADDLE_ENFORCE_GT(1, 2); } catch (paddle::platform::EnforceNotMet error) { caught_exception = true; - EXPECT_TRUE(HasPrefix( - StringPiece(error.what()), - "Enforce failed. Expected 1 > 2UL, but received 1:1 <= 2UL:2.")); + EXPECT_TRUE( + HasPrefix(StringPiece(error.what()), + "Enforce failed. Expected 1 > 2, but received 1:1 <= 2:2.")); } EXPECT_TRUE(caught_exception); } TEST(ENFORCE_GE, OK) { - PADDLE_ENFORCE_GE(2, 2UL); - PADDLE_ENFORCE_GE(3, 2UL); + PADDLE_ENFORCE_GE(2, 2); PADDLE_ENFORCE_GE(3, 2); - PADDLE_ENFORCE_GE(3.21, 2UL); + PADDLE_ENFORCE_GE(3.21, 2.0); } TEST(ENFORCE_GE, FAIL) { bool caught_exception = false; try { - PADDLE_ENFORCE_GE(1, 2UL); + PADDLE_ENFORCE_GE(1, 2); } catch (paddle::platform::EnforceNotMet error) { caught_exception = true; - EXPECT_TRUE(HasPrefix( - StringPiece(error.what()), - "Enforce failed. Expected 1 >= 2UL, but received 1:1 < 2UL:2.")); + EXPECT_TRUE( + HasPrefix(StringPiece(error.what()), + "Enforce failed. Expected 1 >= 2, but received 1:1 < 2:2.")); } EXPECT_TRUE(caught_exception); } TEST(ENFORCE_LE, OK) { PADDLE_ENFORCE_LE(1, 1); - PADDLE_ENFORCE_LE(1, 1UL); - PADDLE_ENFORCE_LE(2, 3UL); - PADDLE_ENFORCE_LE(2UL, 3); - PADDLE_ENFORCE_LE(2UL, 3.2); + PADDLE_ENFORCE_LE(1UL, 1UL); + PADDLE_ENFORCE_LE(2, 3); + PADDLE_ENFORCE_LE(2UL, 3UL); + PADDLE_ENFORCE_LE(2.0, 3.2); } TEST(ENFORCE_LE, FAIL) { bool caught_exception = false; try { - PADDLE_ENFORCE_GT(1, 2UL); + PADDLE_ENFORCE_GT(1, 2); } catch (paddle::platform::EnforceNotMet error) { caught_exception = true; - EXPECT_TRUE(HasPrefix( - StringPiece(error.what()), - "Enforce failed. Expected 1 > 2UL, but received 1:1 <= 2UL:2.")); + EXPECT_TRUE( + HasPrefix(StringPiece(error.what()), + "Enforce failed. Expected 1 > 2, but received 1:1 <= 2:2.")); } EXPECT_TRUE(caught_exception); } TEST(ENFORCE_LT, OK) { PADDLE_ENFORCE_LT(3, 10); - PADDLE_ENFORCE_LT(2, 3UL); - PADDLE_ENFORCE_LT(2UL, 3); + PADDLE_ENFORCE_LT(2UL, 3UL); + PADDLE_ENFORCE_LT(2, 3); } TEST(ENFORCE_LT, FAIL) { bool caught_exception = false; From 6311ae5df92011a6af9f77e12fc8b7875d4f8315 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Tue, 19 Feb 2019 21:16:21 +0800 Subject: [PATCH 077/117] remove legacy WITH_DOUBLE option --- CMakeLists.txt | 1 - cmake/configure.cmake | 4 ---- paddle/scripts/submit_local.sh.in | 1 - 3 files changed, 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 61f5e63098..cfaafc8ed7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -54,7 +54,6 @@ option(WITH_NGRAPH "Compile PaddlePaddle with nGraph support." OFF) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF) option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) -option(WITH_DOUBLE "Compile PaddlePaddle with double precision" OFF) option(WITH_RDMA "Compile PaddlePaddle with RDMA support" OFF) option(WITH_TIMER "Compile PaddlePaddle with stats timer" OFF) option(WITH_PROFILER "Compile PaddlePaddle with GPU profiler and gperftools" OFF) diff --git a/cmake/configure.cmake b/cmake/configure.cmake index b0f54bf49a..fdc9e38f4b 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -20,10 +20,6 @@ if(WITH_DSO) add_definitions(-DPADDLE_USE_DSO) endif(WITH_DSO) -if(WITH_DOUBLE) - add_definitions(-DPADDLE_TYPE_DOUBLE) -endif(WITH_DOUBLE) - if(WITH_ARM_FP16) add_definitions(-DPADDLE_ARM_FP16) add_definitions("-march=armv8.2-a+fp16+simd") diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in index 1f421f248f..3181e60fbe 100755 --- a/paddle/scripts/submit_local.sh.in +++ b/paddle/scripts/submit_local.sh.in @@ -6,7 +6,6 @@ function version(){ echo " with_gpu: @WITH_GPU@" echo " with_mkl: @WITH_MKL@" echo " with_mkldnn: @WITH_MKLDNN@" - echo " with_double: @WITH_DOUBLE@" echo " with_python: @WITH_PYTHON@" echo " with_rdma: @WITH_RDMA@" echo " with_timer: @WITH_TIMER@" From 688023ede09796a193e901b9ff4bcde766160c5b Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Tue, 19 Feb 2019 21:24:15 +0800 Subject: [PATCH 078/117] remove legacy WITH_RDMA option --- CMakeLists.txt | 2 - cmake/hip.cmake | 6 --- cmake/rdma.cmake | 82 ------------------------------- paddle/scripts/submit_local.sh.in | 1 - 4 files changed, 91 deletions(-) delete mode 100644 cmake/rdma.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index cfaafc8ed7..9ce82e51d3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -54,7 +54,6 @@ option(WITH_NGRAPH "Compile PaddlePaddle with nGraph support." OFF) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF) option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) -option(WITH_RDMA "Compile PaddlePaddle with RDMA support" OFF) option(WITH_TIMER "Compile PaddlePaddle with stats timer" OFF) option(WITH_PROFILER "Compile PaddlePaddle with GPU profiler and gperftools" OFF) option(WITH_JEMALLOC "Compile PaddlePaddle with jemalloc" OFF) @@ -224,7 +223,6 @@ include(generic) # simplify cmake module include(package) # set paddle packages include(ccache) # set ccache for compilation include(util) # set unittest and link libs -include(rdma) # set rdma libraries include(version) # set PADDLE_VERSION include(coveralls) # set code coverage include(inference_lib) # add paddle fluid inference libraries diff --git a/cmake/hip.cmake b/cmake/hip.cmake index 4276bc5b08..c25397b980 100644 --- a/cmake/hip.cmake +++ b/cmake/hip.cmake @@ -41,12 +41,6 @@ endif(WITH_MKLDNN) set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DANY_IMPL_ANY_CAST_MOVEABLE") -if(NOT WITH_RDMA) - set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_DISABLE_RDMA") -endif(NOT WITH_RDMA) - - - if(CMAKE_BUILD_TYPE STREQUAL "Debug") list(APPEND HIP_HCC_FLAGS ${CMAKE_CXX_FLAGS_DEBUG}) elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") diff --git a/cmake/rdma.cmake b/cmake/rdma.cmake deleted file mode 100644 index b698f3bdc3..0000000000 --- a/cmake/rdma.cmake +++ /dev/null @@ -1,82 +0,0 @@ -# user should download rdma first from subversion repository - -# execute following instruction to download svn mannally -# svn co https://svn.baidu.com/sys/ip/trunk/rdma/sockrdmav1 rdma/ -# svn co https://svn.baidu.com/sys/ip/trunk/rdma/thirdparty rdma/ -# we use static output in svn repositories to avoid implict bugs from not standard runtime env. - -if(WITH_RDMA) - set(RDMA_ROOT $ENV{RDMA_ROOT} CACHE PATH "Folder contains RDMA sock library and thirdparty library") - - function(generate_rdma_links) - #redirect to current DIR to isolate the pollution from system runtime environment - #it can benifits unified control for different gcc environment. - #e.g, by default gcc48 did not refer /usr/lib64 which could contain low version - #runtime libraries that will crash process while loading it. That redirect trick - #can fix it. - execute_process( - COMMAND mkdir -p librdma - COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so.1 - COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so - COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so.1 - COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so - COMMAND ln -s -f /lib64/libnl.so.1.1.4 librdma/libnl.so.1 - COMMAND ln -s -f /lib64/libnl.so.1.1.4 librdma/libnl.so - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - ) - endfunction(generate_rdma_links) - - #check and set headers - find_path(RDMA_INC_SXISOCK sxi_sock.h PATHS ${RDMA_ROOT}/sockrdmav1/output/include) - find_path(RDMA_INC_XIO libxio.h PATHS ${RDMA_ROOT}/thirdparty/output/accelio) - find_path(RDMA_INC_EVENT event2 PATHS ${RDMA_ROOT}/thirdparty/output/libevent) - find_path(RDMA_INC_NUMA numa.h PATHS ${RDMA_ROOT}/thirdparty/output/libnuma) - - #check and set libs - find_library(RDMA_LIB_SXISOCK NAMES sxisock PATHS ${RDMA_ROOT}/sockrdmav1/output) - find_library(RDMA_LIB_XIO NAMES xio PATHS ${RDMA_ROOT}/thirdparty/output/accelio) - find_library(RDMA_LIB_EVENT NAMES event PATHS ${RDMA_ROOT}/thirdparty/output/libevent) - find_library(RDMA_LIB_EVENT_CORE NAMES event_core PATHS ${RDMA_ROOT}/thirdparty/output/libevent) - find_library(RDMA_LIB_EVENT_EXTRA NAMES event_extra PATHS ${RDMA_ROOT}/thirdparty/output/libevent) - find_library(RDMA_LIB_EVENT_PTHREADS NAMES event_pthreads PATHS ${RDMA_ROOT}/thirdparty/output/libevent) - find_library(RDMA_LIB_NUMA NAMES numa PATHS ${RDMA_ROOT}/thirdparty/output/libnuma) - - if( - RDMA_INC_SXISOCK AND - RDMA_INC_XIO AND - RDMA_INC_EVENT AND - RDMA_INC_NUMA AND - RDMA_LIB_SXISOCK AND - RDMA_LIB_XIO AND - RDMA_LIB_EVENT AND - RDMA_LIB_EVENT_CORE AND - RDMA_LIB_EVENT_EXTRA AND - RDMA_LIB_EVENT_PTHREADS AND - RDMA_LIB_NUMA - ) - - set(RDMA_INC_DIR - ${RDMA_INC_SXISOCK} - ${RDMA_INC_XIO} - ${RDMA_INC_EVENT} - ${RDMA_INC_NUMA}) - set(RDMA_LIBS - ${RDMA_LIB_SXISOCK} - ${RDMA_LIB_XIO} - ${RDMA_LIB_EVENT} - ${RDMA_LIB_EVENT_CORE} - ${RDMA_LIB_EVENT_EXTRA} - ${RDMA_LIB_EVENT_PTHREADS} - ${RDMA_LIB_NUMA} - ) - set(RDMA_LD_FLAGS "-L./librdma -libverbs -lrdmacm -Xlinker -rpath ./librdma") - include_directories("${RDMA_INC_DIR}") - else() - #if this module is not called, RDMA_INC_DIR RDMA_LIBS will be null, so top module always refer this variable - message(FATAL_ERROR, "RDMA libraries are not found, try to set RDMA_ROOT or check all related libraries.") - endif() -else(WITH_RDMA) - set(RDMA_LIBS "") - set(RDMA_LD_FLAGS "") - add_definitions(-DPADDLE_DISABLE_RDMA) -endif(WITH_RDMA) diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in index 3181e60fbe..9d07bba81e 100755 --- a/paddle/scripts/submit_local.sh.in +++ b/paddle/scripts/submit_local.sh.in @@ -7,7 +7,6 @@ function version(){ echo " with_mkl: @WITH_MKL@" echo " with_mkldnn: @WITH_MKLDNN@" echo " with_python: @WITH_PYTHON@" - echo " with_rdma: @WITH_RDMA@" echo " with_timer: @WITH_TIMER@" } From ff2a8386a0230fe646e0d4c9ec6a16e361818521 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Tue, 19 Feb 2019 21:28:17 +0800 Subject: [PATCH 079/117] remove legacy USE_EIGEN_FOR_BLAS option --- CMakeLists.txt | 1 - cmake/configure.cmake | 4 ---- cmake/external/openblas.cmake | 5 ----- 3 files changed, 10 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9ce82e51d3..37cce8746a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -66,7 +66,6 @@ option(WITH_GOLANG "Compile PaddlePaddle with GOLANG" OFF) option(GLIDE_INSTALL "Download and install go dependencies " ON) option(WITH_DISTRIBUTE "Compile with distributed support" OFF) option(WITH_PSLIB "Compile with pslib support" OFF) -option(USE_EIGEN_FOR_BLAS "Use matrix multiplication in Eigen" OFF) option(EIGEN_USE_THREADS "Compile with multi-threaded Eigen" OFF) option(WITH_ARM_FP16 "Use half precision support on armv8.2-a cpu" OFF) option(WITH_CONTRIB "Compile the third-party contributation" OFF) diff --git a/cmake/configure.cmake b/cmake/configure.cmake index fdc9e38f4b..cc5ee3f654 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -33,10 +33,6 @@ if(NOT WITH_TIMER) add_definitions(-DPADDLE_DISABLE_TIMER) endif(NOT WITH_TIMER) -if(USE_EIGEN_FOR_BLAS) - add_definitions(-DPADDLE_USE_EIGEN_FOR_BLAS) -endif(USE_EIGEN_FOR_BLAS) - if(EIGEN_USE_THREADS) add_definitions(-DEIGEN_USE_THREADS) endif(EIGEN_USE_THREADS) diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index b347a59292..f4c2a406f0 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -11,11 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -IF(USE_EIGEN_FOR_BLAS) - return() -ENDIF(USE_EIGEN_FOR_BLAS) - INCLUDE(cblas) IF(NOT ${CBLAS_FOUND}) From f522b4417f14df6f53ad168d8ad770c5af02e5c4 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Tue, 19 Feb 2019 21:35:19 +0800 Subject: [PATCH 080/117] remove legacy WITH_TIMER, WITH_DOC, ON_TRAVIS options --- CMakeLists.txt | 3 --- cmake/configure.cmake | 4 ---- paddle/contrib/float16/run_float16_demo.sh | 1 - paddle/scripts/README.md | 1 - paddle/scripts/submit_local.sh.in | 1 - 5 files changed, 10 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 37cce8746a..cefee607ad 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -54,13 +54,10 @@ option(WITH_NGRAPH "Compile PaddlePaddle with nGraph support." OFF) option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF) option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) -option(WITH_TIMER "Compile PaddlePaddle with stats timer" OFF) option(WITH_PROFILER "Compile PaddlePaddle with GPU profiler and gperftools" OFF) option(WITH_JEMALLOC "Compile PaddlePaddle with jemalloc" OFF) -option(WITH_DOC "Compile PaddlePaddle with documentation" OFF) option(WITH_COVERAGE "Compile PaddlePaddle with code coverage" OFF) option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF) -option(ON_TRAVIS "Exclude special unit test on Travis CI" OFF) option(WITH_FLUID_ONLY "Compile PaddlePaddle fluid only" OFF) option(WITH_GOLANG "Compile PaddlePaddle with GOLANG" OFF) option(GLIDE_INSTALL "Download and install go dependencies " ON) diff --git a/cmake/configure.cmake b/cmake/configure.cmake index cc5ee3f654..498ff019c5 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -29,10 +29,6 @@ if(WITH_TESTING) add_definitions(-DPADDLE_WITH_TESTING) endif(WITH_TESTING) -if(NOT WITH_TIMER) - add_definitions(-DPADDLE_DISABLE_TIMER) -endif(NOT WITH_TIMER) - if(EIGEN_USE_THREADS) add_definitions(-DEIGEN_USE_THREADS) endif(EIGEN_USE_THREADS) diff --git a/paddle/contrib/float16/run_float16_demo.sh b/paddle/contrib/float16/run_float16_demo.sh index 031225a85d..9701588d8f 100755 --- a/paddle/contrib/float16/run_float16_demo.sh +++ b/paddle/contrib/float16/run_float16_demo.sh @@ -14,7 +14,6 @@ cmake .. -DWITH_AVX=OFF \ -DWITH_MKL=OFF \ -DWITH_GPU=ON \ -DWITH_TESTING=ON \ - -DWITH_TIMER=ON \ -DWITH_PROFILER=ON \ -DWITH_FLUID_ONLY=ON make -j `nproc` diff --git a/paddle/scripts/README.md b/paddle/scripts/README.md index 6c608fce3c..0d6921bdf8 100644 --- a/paddle/scripts/README.md +++ b/paddle/scripts/README.md @@ -71,7 +71,6 @@ Users can specify the following Docker build arguments with either "ON" or "OFF" | `WITH_STYLE_CHECK` | ON | Check the code style when building. | | `PYTHON_ABI` | "" | Build for different python ABI support, can be cp27-cp27m or cp27-cp27mu | | `RUN_TEST` | OFF | Run unit test immediently after the build. | -| `WITH_DOC` | OFF | Build docs after build binaries. | | `WOBOQ` | OFF | Generate WOBOQ code viewer under `build/woboq_out` | ## Docker Images diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in index 9d07bba81e..be8bc29414 100755 --- a/paddle/scripts/submit_local.sh.in +++ b/paddle/scripts/submit_local.sh.in @@ -7,7 +7,6 @@ function version(){ echo " with_mkl: @WITH_MKL@" echo " with_mkldnn: @WITH_MKLDNN@" echo " with_python: @WITH_PYTHON@" - echo " with_timer: @WITH_TIMER@" } function ver2num() { From 978599154fc6e6c8563d45c116f8efa83b7edeb4 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Tue, 19 Feb 2019 21:48:21 +0800 Subject: [PATCH 081/117] remove legacy WITH_GOLANG, GLIDE_INSTALL options --- CMakeLists.txt | 2 - cmake/configure.cmake | 53 --------------------------- cmake/hip.cmake | 4 -- paddle/scripts/README.md | 1 - paddle/scripts/paddle_build.sh | 6 --- paddle/scripts/paddle_docker_build.sh | 1 - 6 files changed, 67 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index cefee607ad..ac7be9a7f4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -59,8 +59,6 @@ option(WITH_JEMALLOC "Compile PaddlePaddle with jemalloc" OFF) option(WITH_COVERAGE "Compile PaddlePaddle with code coverage" OFF) option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF) option(WITH_FLUID_ONLY "Compile PaddlePaddle fluid only" OFF) -option(WITH_GOLANG "Compile PaddlePaddle with GOLANG" OFF) -option(GLIDE_INSTALL "Download and install go dependencies " ON) option(WITH_DISTRIBUTE "Compile with distributed support" OFF) option(WITH_PSLIB "Compile with pslib support" OFF) option(EIGEN_USE_THREADS "Compile with multi-threaded Eigen" OFF) diff --git a/cmake/configure.cmake b/cmake/configure.cmake index 498ff019c5..420f50bd7d 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -66,10 +66,6 @@ if(WIN32) endif(NOT MSVC) endif(WIN32) -if(NOT WITH_GOLANG) - add_definitions(-DPADDLE_WITHOUT_GOLANG) -endif(NOT WITH_GOLANG) - if(WITH_PSLIB) add_definitions(-DPADDLE_WITH_PSLIB) endif() @@ -159,55 +155,6 @@ if(WITH_DISTRIBUTE) add_definitions(-DPADDLE_WITH_DISTRIBUTE) endif() -if(WITH_GOLANG) - # we need to symlink Paddle directory into GOPATH. If we - # don't do it and we have code that depends on Paddle, go - # get ./... will download a new Paddle repo from Github, - # without the changes in our current Paddle repo that we - # want to build. - set(GOPATH "${CMAKE_CURRENT_BINARY_DIR}/go") - file(MAKE_DIRECTORY ${GOPATH}) - set(PADDLE_IN_GOPATH "${GOPATH}/src/github.com/PaddlePaddle/Paddle") - file(MAKE_DIRECTORY "${PADDLE_IN_GOPATH}") - set(PADDLE_GO_PATH "${CMAKE_SOURCE_DIR}/go") - - add_custom_target(go_path) - add_custom_command(TARGET go_path - # Symlink Paddle directory into GOPATH - COMMAND mkdir -p ${PADDLE_IN_GOPATH} - COMMAND rm -rf ${PADDLE_IN_GOPATH} - COMMAND ln -sf ${CMAKE_SOURCE_DIR} ${PADDLE_IN_GOPATH} - # Automatically get all dependencies specified in the source code - # We can't run `go get -d ./...` for every target, because - # multiple `go get` can not run concurrently, but make need to be - # able to run with multiple jobs. - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - ) - - if (GLIDE_INSTALL) - if(EXISTS $ENV{GOPATH}/bin/glide) - set(GLIDE "$ENV{GOPATH}/bin/glide") - else() - message(FATAL_ERROR "no glide executeble found: $ENV{GOPATH}/bin/glide") - endif() - - # this command will only run when the file it depends is missing - # or has changed, or the output is missing. - add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/glide - COMMAND env GOPATH=${GOPATH} ${GLIDE} install - COMMAND touch ${CMAKE_BINARY_DIR}/glide - DEPENDS ${PADDLE_SOURCE_DIR}/go/glide.lock - WORKING_DIRECTORY "${PADDLE_IN_GOPATH}/go" - ) - - # depends on the custom command which outputs - # ${CMAKE_BINARY_DIR}/glide, the custom command does not need to - # run every time this target is built. - add_custom_target(go_vendor DEPENDS ${CMAKE_BINARY_DIR}/glide go_path) - endif() - -endif(WITH_GOLANG) - if(WITH_GRPC) add_definitions(-DPADDLE_WITH_GRPC) endif(WITH_GRPC) diff --git a/cmake/hip.cmake b/cmake/hip.cmake index c25397b980..4dc4952346 100644 --- a/cmake/hip.cmake +++ b/cmake/hip.cmake @@ -31,10 +31,6 @@ if(WITH_GRPC) set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_WITH_GRPC") endif(WITH_GRPC) -if(NOT WITH_GOLANG) - set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_WITHOUT_GOLANG") -endif(NOT WITH_GOLANG) - if(WITH_MKLDNN) set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_WITH_MKLDNN") endif(WITH_MKLDNN) diff --git a/paddle/scripts/README.md b/paddle/scripts/README.md index 0d6921bdf8..1db262f06d 100644 --- a/paddle/scripts/README.md +++ b/paddle/scripts/README.md @@ -66,7 +66,6 @@ Users can specify the following Docker build arguments with either "ON" or "OFF" | `WITH_AVX` | OFF | Set to "ON" to enable AVX support. | | `WITH_TESTING` | OFF | Build unit tests binaries. | | `WITH_MKL` | ON | Build with [Intel® MKL](https://software.intel.com/en-us/mkl) and [Intel® MKL-DNN](https://github.com/01org/mkl-dnn) support. | -| `WITH_GOLANG` | OFF | Build fault-tolerant parameter server written in go. | | `WITH_PYTHON` | ON | Build with python support. Turn this off if build is only for capi. | | `WITH_STYLE_CHECK` | ON | Check the code style when building. | | `PYTHON_ABI` | "" | Build for different python ABI support, can be cp27-cp27m or cp27-cp27mu | diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index e7078499ca..2bf15dcd73 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -722,12 +722,6 @@ EOF EOF fi - if [[ ${WITH_GOLANG:-OFF} == "ON" ]]; then - cat >> ${PADDLE_ROOT}/build/Dockerfile <> ${PADDLE_ROOT}/build/Dockerfile < Date: Tue, 19 Feb 2019 21:58:28 +0800 Subject: [PATCH 082/117] remove legacy EIGEN_USE_THREADS, WITH_ARM_FP16 options --- CMakeLists.txt | 2 -- cmake/configure.cmake | 9 --------- 2 files changed, 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ac7be9a7f4..ae6788231e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -61,8 +61,6 @@ option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF) option(WITH_FLUID_ONLY "Compile PaddlePaddle fluid only" OFF) option(WITH_DISTRIBUTE "Compile with distributed support" OFF) option(WITH_PSLIB "Compile with pslib support" OFF) -option(EIGEN_USE_THREADS "Compile with multi-threaded Eigen" OFF) -option(WITH_ARM_FP16 "Use half precision support on armv8.2-a cpu" OFF) option(WITH_CONTRIB "Compile the third-party contributation" OFF) option(REPLACE_ENFORCE_GLOG "Replace PADDLE_ENFORCE with glog/CHECK for better debug." OFF) option(WITH_ANAKIN "Compile with Anakin library" OFF) diff --git a/cmake/configure.cmake b/cmake/configure.cmake index 420f50bd7d..93d74bb0a8 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -20,19 +20,10 @@ if(WITH_DSO) add_definitions(-DPADDLE_USE_DSO) endif(WITH_DSO) -if(WITH_ARM_FP16) - add_definitions(-DPADDLE_ARM_FP16) - add_definitions("-march=armv8.2-a+fp16+simd") -endif(WITH_ARM_FP16) - if(WITH_TESTING) add_definitions(-DPADDLE_WITH_TESTING) endif(WITH_TESTING) -if(EIGEN_USE_THREADS) - add_definitions(-DEIGEN_USE_THREADS) -endif(EIGEN_USE_THREADS) - if(NOT WITH_PROFILER) add_definitions(-DPADDLE_DISABLE_PROFILER) endif(NOT WITH_PROFILER) From 6b83845c41ad3e6c4efcf408a1e6d132c6da24ac Mon Sep 17 00:00:00 2001 From: xuezhong Date: Tue, 19 Feb 2019 13:59:02 +0000 Subject: [PATCH 083/117] update for backward compatibility test=develop --- paddle/fluid/API.spec | 2 +- python/paddle/fluid/layers/nn.py | 36 ++++++++++++++++---------------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 1c2f562067..6fca3f3bfc 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -71,7 +71,7 @@ paddle.fluid.initializer.NumpyArrayInitializer.__init__ ArgSpec(args=['self', 'v paddle.fluid.layers.fc ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, None, False, None)) paddle.fluid.layers.embedding ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32')) paddle.fluid.layers.dynamic_lstm ArgSpec(args=['input', 'size', 'h_0', 'c_0', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'float32', None)) -paddle.fluid.layers.dynamic_lstmp ArgSpec(args=['input', 'size', 'proj_size', 'h_0', 'c_0', 'param_attr', 'bias_attr', 'use_peepholes', 'cell_clip', 'proj_clip', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'proj_activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, True, None, None, False, 'sigmoid', 'tanh', 'tanh', 'identity', 'float32', None)) +paddle.fluid.layers.dynamic_lstmp ArgSpec(args=['input', 'size', 'proj_size', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'proj_activation', 'dtype', 'name', 'h_0', 'c_0', 'cell_clip', 'proj_clip'], varargs=None, keywords=None, defaults=(None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'tanh', 'float32', None, None, None, None, None)) paddle.fluid.layers.dynamic_gru ArgSpec(args=['input', 'size', 'param_attr', 'bias_attr', 'is_reverse', 'gate_activation', 'candidate_activation', 'h_0', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, False, 'sigmoid', 'tanh', None, False)) paddle.fluid.layers.gru_unit ArgSpec(args=['input', 'hidden', 'size', 'param_attr', 'bias_attr', 'activation', 'gate_activation', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, 'tanh', 'sigmoid', False)) paddle.fluid.layers.linear_chain_crf ArgSpec(args=['input', 'label', 'param_attr'], varargs=None, keywords=None, defaults=(None,)) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 8ca2ca45ee..de2cb46cff 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -659,20 +659,20 @@ def lstm(input, def dynamic_lstmp(input, size, proj_size, - h_0=None, - c_0=None, param_attr=None, bias_attr=None, use_peepholes=True, - cell_clip=None, - proj_clip=None, is_reverse=False, gate_activation='sigmoid', cell_activation='tanh', candidate_activation='tanh', - proj_activation='identity', + proj_activation='tanh', dtype='float32', - name=None): + name=None, + h_0=None, + c_0=None, + cell_clip=None, + proj_clip=None): """ **Dynamic LSTMP Layer** @@ -740,12 +740,6 @@ def dynamic_lstmp(input, mini-batch, D is the hidden size. size(int): 4 * hidden size. proj_size(int): The size of projection output. - h_0(Variable): The initial hidden state is an optional input, default is zero. - This is a tensor with shape (N x D), where N is the - batch size and D is the projection size. - c_0(Variable): The initial cell state is an optional input, default is zero. - This is a tensor with shape (N x D), where N is the - batch size. `h_0` and `c_0` can be NULL but only at the same time. param_attr(ParamAttr|None): The parameter attribute for the learnable hidden-hidden weight and projection weight. @@ -780,11 +774,6 @@ def dynamic_lstmp(input, the bias is initialized zero. Default: None. use_peepholes(bool): Whether to enable diagonal/peephole connections, default `True`. - cell_clip(float): If provided the cell state is clipped - by this value prior to the cell output activation. - proj_clip(float): If `num_proj > 0` and `proj_clip` is - provided, then the projected values are clipped elementwise to within - `[-proj_clip, proj_clip]`. is_reverse(bool): Whether to compute reversed LSTM, default `False`. gate_activation(str): The activation for input gate, forget gate and output gate. Choices = ["sigmoid", "tanh", "relu", @@ -796,10 +785,21 @@ def dynamic_lstmp(input, default "tanh". proj_activation(str): The activation for projection output. Choices = ["sigmoid", "tanh", "relu", "identity"], - default "identity". + default "tanh". dtype(str): Data type. Choices = ["float32", "float64"], default "float32". name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. + h_0(Variable): The initial hidden state is an optional input, default is zero. + This is a tensor with shape (N x D), where N is the + batch size and D is the projection size. + c_0(Variable): The initial cell state is an optional input, default is zero. + This is a tensor with shape (N x D), where N is the + batch size. `h_0` and `c_0` can be NULL but only at the same time. + cell_clip(float): If provided the cell state is clipped + by this value prior to the cell output activation. + proj_clip(float): If `num_proj > 0` and `proj_clip` is + provided, then the projected values are clipped elementwise to within + `[-proj_clip, proj_clip]`. Returns: tuple: A tuple of two output variable: the projection of hidden state, \ From b9d1bf2364294a9211a90257bca2bf37bede64a8 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Tue, 19 Feb 2019 22:06:51 +0800 Subject: [PATCH 084/117] remove leacy WITH_FLUID_ONLY option --- CMakeLists.txt | 3 --- paddle/contrib/float16/run_float16_demo.sh | 1 - paddle/fluid/train/demo/README.md | 1 - paddle/scripts/paddle_build.sh | 19 +++---------------- paddle/scripts/paddle_docker_build.sh | 1 - 5 files changed, 3 insertions(+), 22 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ae6788231e..cad0f71702 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -58,7 +58,6 @@ option(WITH_PROFILER "Compile PaddlePaddle with GPU profiler and gperftools" option(WITH_JEMALLOC "Compile PaddlePaddle with jemalloc" OFF) option(WITH_COVERAGE "Compile PaddlePaddle with code coverage" OFF) option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF) -option(WITH_FLUID_ONLY "Compile PaddlePaddle fluid only" OFF) option(WITH_DISTRIBUTE "Compile with distributed support" OFF) option(WITH_PSLIB "Compile with pslib support" OFF) option(WITH_CONTRIB "Compile the third-party contributation" OFF) @@ -95,8 +94,6 @@ endif() if (WIN32) set(WITH_DISTRIBUTE OFF CACHE STRING "Disable DISTRIBUTE when compiling for Windows" FORCE) - set(WITH_FLUID_ONLY ON CACHE STRING - "Enable FLUID_ONLY when compiling for Windows" FORCE) endif() set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING diff --git a/paddle/contrib/float16/run_float16_demo.sh b/paddle/contrib/float16/run_float16_demo.sh index 9701588d8f..34cb7a12db 100755 --- a/paddle/contrib/float16/run_float16_demo.sh +++ b/paddle/contrib/float16/run_float16_demo.sh @@ -15,7 +15,6 @@ cmake .. -DWITH_AVX=OFF \ -DWITH_GPU=ON \ -DWITH_TESTING=ON \ -DWITH_PROFILER=ON \ - -DWITH_FLUID_ONLY=ON make -j `nproc` pip install -U "$WHEEL_PATH/$(ls $WHEEL_PATH)" diff --git a/paddle/fluid/train/demo/README.md b/paddle/fluid/train/demo/README.md index 191da20669..bd53ab4b0c 100644 --- a/paddle/fluid/train/demo/README.md +++ b/paddle/fluid/train/demo/README.md @@ -9,7 +9,6 @@ PADDLE_LIB=/paddle/lib/dir cmake .. -DFLUID_INSTALL_DIR=$PADDLE_LIB \ -DCMAKE_BUILD_TYPE=Release \ - -DWITH_FLUID_ONLY=ON \ -DWITH_GPU=OFF \ -DWITH_STYLE_CHECK=OFF \ -DWITH_MKL=OFF \ diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 2bf15dcd73..26b26c9b1f 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -87,7 +87,6 @@ function cmake_gen() { PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/Library/Frameworks/Python.framework/Versions/3.5/bin/python3 -DPYTHON_INCLUDE_DIR:PATH=/Library/Frameworks/Python.framework/Versions/3.5/include/python3.5m/ -DPYTHON_LIBRARY:FILEPATH=/Library/Frameworks/Python.framework/Versions/3.5/lib/libpython3.5m.dylib" - WITH_FLUID_ONLY=${WITH_FLUID_ONLY:-ON} pip3.5 uninstall -y protobuf pip3.5 install --user -r ${PADDLE_ROOT}/python/requirements.txt else @@ -101,7 +100,6 @@ function cmake_gen() { PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/Library/Frameworks/Python.framework/Versions/3.6/bin/python3 -DPYTHON_INCLUDE_DIR:PATH=/Library/Frameworks/Python.framework/Versions/3.6/include/python3.6m/ -DPYTHON_LIBRARY:FILEPATH=/Library/Frameworks/Python.framework/Versions/3.6/lib/libpython3.6m.dylib" - WITH_FLUID_ONLY=${WITH_FLUID_ONLY:-ON} pip3.6 uninstall -y protobuf pip3.6 install --user -r ${PADDLE_ROOT}/python/requirements.txt else @@ -115,7 +113,6 @@ function cmake_gen() { PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/Library/Frameworks/Python.framework/Versions/3.7/bin/python3 -DPYTHON_INCLUDE_DIR:PATH=/Library/Frameworks/Python.framework/Versions/3.7/include/python3.7m/ -DPYTHON_LIBRARY:FILEPATH=/Library/Frameworks/Python.framework/Versions/3.7/lib/libpython3.7m.dylib" - WITH_FLUID_ONLY=${WITH_FLUID_ONLY:-ON} pip3.7 uninstall -y protobuf pip3.7 install --user -r ${PADDLE_ROOT}/python/requirements.txt else @@ -202,7 +199,6 @@ function cmake_gen() { -DWITH_TESTING=${WITH_TESTING:-ON} -DCMAKE_MODULE_PATH=/opt/rocm/hip/cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON - -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DWITH_CONTRIB=${WITH_CONTRIB:-ON} -DWITH_INFERENCE_API_TEST=${WITH_INFERENCE_API_TEST:-ON} @@ -235,7 +231,6 @@ EOF -DCUDNN_ROOT=/usr/ \ -DWITH_TESTING=${WITH_TESTING:-ON} \ -DCMAKE_MODULE_PATH=/opt/rocm/hip/cmake \ - -DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} \ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ -DWITH_CONTRIB=${WITH_CONTRIB:-ON} \ -DWITH_INFERENCE_API_TEST=${WITH_INFERENCE_API_TEST:-ON} \ @@ -398,9 +393,7 @@ EOF pip3.7 install --user ${INSTALL_PREFIX:-/paddle/build}/opt/paddle/share/wheels/*.whl fi - if [[ ${WITH_FLUID_ONLY:-OFF} == "OFF" ]] ; then - paddle version - fi + paddle version if [ "$1" == "cp27-cp27m" ]; then pip uninstall -y paddlepaddle @@ -555,7 +548,6 @@ EOF -DCMAKE_BUILD_TYPE=Release \ -DWITH_GPU=OFF \ -DWITH_MKL=OFF \ - -DWITH_FLUID_ONLY=ON local LIB_TYPE=$1 case $LIB_TYPE in @@ -631,13 +623,8 @@ EOF NCCL_DEPS="true" fi - if [[ ${WITH_FLUID_ONLY:-OFF} == "OFF" ]]; then - PADDLE_VERSION="paddle version" - CMD='"paddle", "version"' - else - PADDLE_VERSION="true" - CMD='"true"' - fi + PADDLE_VERSION="paddle version" + CMD='"paddle", "version"' if [ "$1" == "cp35-cp35m" ]; then cat >> ${PADDLE_ROOT}/build/Dockerfile < Date: Tue, 19 Feb 2019 22:20:17 +0800 Subject: [PATCH 085/117] remove legacy EXTERNAL_LIBS variable test=develop --- CMakeLists.txt | 27 --------------------------- cmake/cuda.cmake | 3 --- cmake/hip.cmake | 2 -- cmake/tensorrt.cmake | 1 - 4 files changed, 33 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index cad0f71702..79054295fd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -219,38 +219,11 @@ include(inference_lib) # add paddle fluid inference libraries include_directories("${PADDLE_SOURCE_DIR}") -set(EXTERNAL_LIBS - gflags - glog - ${CBLAS_LIBRARIES} - protobuf - zlib - ${PYTHON_LIBRARIES} -) - -if(WITH_PSLIB) - list(APPEND EXTERNAL_LIBS pslib) - list(APPEND EXTERNAL_LIBS pslib_brpc) - list(APPEND EXTERNAL_LIBS libmct) -endif(WITH_PSLIB) - if(WITH_AMD_GPU) find_package(HIP) include(hip) endif(WITH_AMD_GPU) -if(WITH_MKLML) - list(APPEND EXTERNAL_LIBS ${MKLML_IOMP_LIB}) -endif() - -if(WITH_LIBXSMM) - list(APPEND EXTERNAL_LIBS ${LIBXSMM_LIBS}) -endif() - -if(WITH_MKLDNN) - list(APPEND EXTERNAL_LIBS ${MKLDNN_LIB}) -endif() - set(PADDLE_PYTHON_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/python/build") set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 -g -DNDEBUG") diff --git a/cmake/cuda.cmake b/cmake/cuda.cmake index ef4192ecc9..735846db1d 100644 --- a/cmake/cuda.cmake +++ b/cmake/cuda.cmake @@ -168,10 +168,7 @@ elseif (${CUDA_VERSION} LESS 11.0) # CUDA 10.x endif() include_directories(${CUDA_INCLUDE_DIRS}) -list(APPEND EXTERNAL_LIBS ${CUDA_LIBRARIES} ${CUDA_rt_LIBRARY}) if(NOT WITH_DSO) - # TODO(panyx0718): CUPTI only allows DSO? - list(APPEND EXTERNAL_LIBS ${CUDNN_LIBRARY} ${CUPTI_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY} ${NCCL_LIBRARY}) if(WIN32) set_property(GLOBAL PROPERTY CUDA_MODULES ${CUDNN_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY}) endif(WIN32) diff --git a/cmake/hip.cmake b/cmake/hip.cmake index 4dc4952346..c3a748db50 100644 --- a/cmake/hip.cmake +++ b/cmake/hip.cmake @@ -11,8 +11,6 @@ include_directories("/opt/rocm/rocrand/include") include_directories("/opt/rocm/rccl/include") include_directories("/opt/rocm/thrust") -list(APPEND EXTERNAL_LIBS "-L/opt/rocm/lib/ -lhip_hcc") - set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -fPIC -DPADDLE_WITH_HIP -std=c++11" ) if(WITH_DSO) diff --git a/cmake/tensorrt.cmake b/cmake/tensorrt.cmake index 3dc7171551..891ff22263 100644 --- a/cmake/tensorrt.cmake +++ b/cmake/tensorrt.cmake @@ -33,6 +33,5 @@ if(TENSORRT_FOUND) message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. " "Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ") include_directories(${TENSORRT_INCLUDE_DIR}) - list(APPEND EXTERNAL_LIBS ${TENSORRT_LIBRARY}) add_definitions(-DPADDLE_WITH_TENSORRT) endif() From c797a1f050a8f1a7c75de58aba5d387c803d678f Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Wed, 20 Feb 2019 11:27:01 +0800 Subject: [PATCH 086/117] remove legacy any.cmake --- CMakeLists.txt | 1 - cmake/external/any.cmake | 31 ---------------------------- paddle/fluid/platform/CMakeLists.txt | 2 +- 3 files changed, 1 insertion(+), 33 deletions(-) delete mode 100644 cmake/external/any.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 61f5e63098..171934b739 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -148,7 +148,6 @@ include(external/openblas) # download, build, install openblas include(external/mkldnn) # download, build, install mkldnn include(external/ngraph) # download, build, install nGraph include(external/boost) # download boost -include(external/any) # download libn::any include(external/eigen) # download eigen3 include(external/pybind11) # download pybind11 include(external/cares) diff --git a/cmake/external/any.cmake b/cmake/external/any.cmake deleted file mode 100644 index 85cce80b70..0000000000 --- a/cmake/external/any.cmake +++ /dev/null @@ -1,31 +0,0 @@ -INCLUDE(ExternalProject) - -SET(ANY_SOURCE_DIR ${THIRD_PARTY_PATH}/any) - -INCLUDE_DIRECTORIES(${ANY_SOURCE_DIR}/src/extern_lib_any) - -ExternalProject_Add( - extern_lib_any - ${EXTERNAL_PROJECT_LOG_ARGS} - GIT_REPOSITORY "https://github.com/PaddlePaddle/any.git" - GIT_TAG "15595d8324be9e8a9a80d9ae442fdd12bd66df5d" - PREFIX ${ANY_SOURCE_DIR} - UPDATE_COMMAND "" - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" - TEST_COMMAND "" -) - -if (${CMAKE_VERSION} VERSION_LESS "3.3.0") - set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/lib_any_dummy.c) - file(WRITE ${dummyfile} "const char * dummy_any = \"${dummyfile}\";") - add_library(lib_any STATIC ${dummyfile}) -else() - add_library(lib_any INTERFACE) -endif() - -add_dependencies(lib_any extern_lib_any) - -add_definitions(-DANY_IMPL_ANY_CAST_MOVEABLE) -LIST(APPEND external_project_dependencies lib_any) diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index fbb2ac3fe8..424b8f0542 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -36,7 +36,7 @@ cc_test(cpu_info_test SRCS cpu_info_test.cc DEPS cpu_info) nv_library(gpu_info SRCS gpu_info.cc DEPS gflags glog enforce) -cc_library(place SRCS place.cc DEPS enforce boost lib_any) +cc_library(place SRCS place.cc DEPS enforce boost) cc_test(place_test SRCS place_test.cc DEPS place glog gflags) add_subdirectory(dynload) From 60cb0b9781437b0864348f05d0a84a4e3f1feab7 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Wed, 20 Feb 2019 11:49:35 +0800 Subject: [PATCH 087/117] remove legacy $external_project_dependencies variable test=develop --- cmake/external/anakin.cmake | 2 -- cmake/external/boost.cmake | 1 - cmake/external/brpc.cmake | 2 -- cmake/external/cub.cmake | 2 -- cmake/external/dlpack.cmake | 2 -- cmake/external/eigen.cmake | 2 -- cmake/external/gflags.cmake | 2 -- cmake/external/glog.cmake | 2 -- cmake/external/gtest.cmake | 1 - cmake/external/leveldb.cmake | 3 --- cmake/external/libmct.cmake | 3 --- cmake/external/libxsmm.cmake | 2 -- cmake/external/mkldnn.cmake | 1 - cmake/external/mklml.cmake | 1 - cmake/external/ngraph.cmake | 1 - cmake/external/openblas.cmake | 1 - cmake/external/protobuf.cmake | 1 - cmake/external/pslib.cmake | 1 - cmake/external/pslib_brpc.cmake | 1 - cmake/external/threadpool.cmake | 2 -- cmake/external/warpctc.cmake | 2 -- cmake/external/xbyak.cmake | 1 - cmake/external/xxhash.cmake | 2 -- cmake/external/zlib.cmake | 2 -- python/CMakeLists.txt | 19 +++---------------- 25 files changed, 3 insertions(+), 56 deletions(-) diff --git a/cmake/external/anakin.cmake b/cmake/external/anakin.cmake index 06fc6061bc..77f4b34537 100644 --- a/cmake/external/anakin.cmake +++ b/cmake/external/anakin.cmake @@ -74,5 +74,3 @@ add_dependencies(anakin_shared extern_anakin) add_library(anakin_saber SHARED IMPORTED GLOBAL) set_property(TARGET anakin_saber PROPERTY IMPORTED_LOCATION ${ANAKIN_SABER_LIB}) add_dependencies(anakin_saber extern_anakin) - -list(APPEND external_project_dependencies anakin_shared anakin_saber) diff --git a/cmake/external/boost.cmake b/cmake/external/boost.cmake index 12412a51a0..fc204dc919 100644 --- a/cmake/external/boost.cmake +++ b/cmake/external/boost.cmake @@ -57,5 +57,4 @@ else() endif() add_dependencies(boost ${BOOST_PROJECT}) -list(APPEND external_project_dependencies boost) set(Boost_INCLUDE_DIR ${BOOST_INCLUDE_DIR}) diff --git a/cmake/external/brpc.cmake b/cmake/external/brpc.cmake index 6b50cff7a6..989d1dbd4c 100644 --- a/cmake/external/brpc.cmake +++ b/cmake/external/brpc.cmake @@ -69,5 +69,3 @@ SET_PROPERTY(TARGET brpc PROPERTY IMPORTED_LOCATION ${BRPC_LIBRARIES}) ADD_DEPENDENCIES(brpc extern_brpc) add_definitions(-DBRPC_WITH_GLOG) - -LIST(APPEND external_project_dependencies brpc) diff --git a/cmake/external/cub.cmake b/cmake/external/cub.cmake index f06728de91..41ad820774 100644 --- a/cmake/external/cub.cmake +++ b/cmake/external/cub.cmake @@ -31,5 +31,3 @@ else() endif() add_dependencies(cub extern_cub) - -LIST(APPEND external_project_dependencies cub) diff --git a/cmake/external/dlpack.cmake b/cmake/external/dlpack.cmake index 4587475d79..63dd16b28e 100644 --- a/cmake/external/dlpack.cmake +++ b/cmake/external/dlpack.cmake @@ -27,5 +27,3 @@ else() endif() add_dependencies(dlpack extern_dlpack) - -LIST(APPEND external_project_dependencies dlpack) diff --git a/cmake/external/eigen.cmake b/cmake/external/eigen.cmake index 6aef97f212..72441160f8 100644 --- a/cmake/external/eigen.cmake +++ b/cmake/external/eigen.cmake @@ -52,5 +52,3 @@ else() endif() add_dependencies(eigen3 extern_eigen3) - -LIST(APPEND external_project_dependencies eigen3) diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake index f3ca74faea..911920ed62 100644 --- a/cmake/external/gflags.cmake +++ b/cmake/external/gflags.cmake @@ -61,8 +61,6 @@ ADD_LIBRARY(gflags STATIC IMPORTED GLOBAL) SET_PROPERTY(TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARIES}) ADD_DEPENDENCIES(gflags extern_gflags) -LIST(APPEND external_project_dependencies gflags) - # On Windows (including MinGW), the Shlwapi library is used by gflags if available. if (WIN32) include(CheckIncludeFileCXX) diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake index d3a4d69d3a..7fa17ce6b7 100644 --- a/cmake/external/glog.cmake +++ b/cmake/external/glog.cmake @@ -72,5 +72,3 @@ ADD_LIBRARY(glog STATIC IMPORTED GLOBAL) SET_PROPERTY(TARGET glog PROPERTY IMPORTED_LOCATION ${GLOG_LIBRARIES}) ADD_DEPENDENCIES(glog extern_glog gflags) LINK_LIBRARIES(glog gflags) - -LIST(APPEND external_project_dependencies glog) diff --git a/cmake/external/gtest.cmake b/cmake/external/gtest.cmake index 9be625b620..e459526583 100644 --- a/cmake/external/gtest.cmake +++ b/cmake/external/gtest.cmake @@ -79,5 +79,4 @@ IF(WITH_TESTING OR (WITH_DISTRIBUTE AND NOT WITH_GRPC)) SET_PROPERTY(TARGET gtest_main PROPERTY IMPORTED_LOCATION ${GTEST_MAIN_LIBRARIES}) ADD_DEPENDENCIES(gtest_main extern_gtest) - LIST(APPEND external_project_dependencies gtest gtest_main) ENDIF(WITH_TESTING OR (WITH_DISTRIBUTE AND NOT WITH_GRPC)) diff --git a/cmake/external/leveldb.cmake b/cmake/external/leveldb.cmake index 0df61b01ab..ac0febd076 100644 --- a/cmake/external/leveldb.cmake +++ b/cmake/external/leveldb.cmake @@ -39,6 +39,3 @@ ADD_DEPENDENCIES(extern_leveldb snappy) ADD_LIBRARY(leveldb STATIC IMPORTED GLOBAL) SET_PROPERTY(TARGET leveldb PROPERTY IMPORTED_LOCATION ${LEVELDB_LIBRARIES}) ADD_DEPENDENCIES(leveldb extern_leveldb) - -LIST(APPEND external_project_dependencies leveldb) - diff --git a/cmake/external/libmct.cmake b/cmake/external/libmct.cmake index 27cff8cfb6..b944f2945b 100644 --- a/cmake/external/libmct.cmake +++ b/cmake/external/libmct.cmake @@ -72,7 +72,4 @@ else() add_library(libmct INTERFACE) endif() -#ADD_LIBRARY(libmct SHARED IMPORTED GLOBAL) ADD_DEPENDENCIES(libmct ${LIBMCT_PROJECT}) -LIST(APPEND external_project_dependencies libmct) - diff --git a/cmake/external/libxsmm.cmake b/cmake/external/libxsmm.cmake index 39f49d210a..69cdba7c59 100644 --- a/cmake/external/libxsmm.cmake +++ b/cmake/external/libxsmm.cmake @@ -53,5 +53,3 @@ MESSAGE(STATUS "Libxsmm library: ${LIBXSMM_LIBS}") include_directories(${LIBXSMM_INCLUDE_DIR}) ADD_DEFINITIONS(-DPADDLE_WITH_LIBXSMM) ADD_DEPENDENCIES(libxsmm extern_libxsmm) -LIST(APPEND external_project_dependencies libxsmm) - diff --git a/cmake/external/mkldnn.cmake b/cmake/external/mkldnn.cmake index 92fe76d05c..94a266c501 100644 --- a/cmake/external/mkldnn.cmake +++ b/cmake/external/mkldnn.cmake @@ -89,7 +89,6 @@ SET_PROPERTY(TARGET shared_mkldnn PROPERTY IMPORTED_LOCATION ${MKLDNN_LIB}) ADD_DEPENDENCIES(shared_mkldnn ${MKLDNN_PROJECT}) MESSAGE(STATUS "MKLDNN library: ${MKLDNN_LIB}") add_definitions(-DPADDLE_WITH_MKLDNN) -LIST(APPEND external_project_dependencies shared_mkldnn) # generate a static dummy target to track mkldnn dependencies # for cc_library(xxx SRCS xxx.c DEPS mkldnn) diff --git a/cmake/external/mklml.cmake b/cmake/external/mklml.cmake index 2caff27357..54826cedb8 100644 --- a/cmake/external/mklml.cmake +++ b/cmake/external/mklml.cmake @@ -73,4 +73,3 @@ INCLUDE_DIRECTORIES(${MKLML_INC_DIR}) ADD_LIBRARY(mklml SHARED IMPORTED GLOBAL) SET_PROPERTY(TARGET mklml PROPERTY IMPORTED_LOCATION ${MKLML_LIB}) ADD_DEPENDENCIES(mklml ${MKLML_PROJECT}) -LIST(APPEND external_project_dependencies mklml) diff --git a/cmake/external/ngraph.cmake b/cmake/external/ngraph.cmake index 14af98b2d7..5812a61f0d 100644 --- a/cmake/external/ngraph.cmake +++ b/cmake/external/ngraph.cmake @@ -77,4 +77,3 @@ add_dependencies(ngraph ${NGRAPH_PROJECT}) target_compile_definitions(ngraph INTERFACE -DPADDLE_WITH_NGRAPH) target_include_directories(ngraph INTERFACE ${NGRAPH_INC_DIR}) target_link_libraries(ngraph INTERFACE ${NGRAPH_SHARED_LIB}) -LIST(APPEND external_project_dependencies ngraph) diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index b347a59292..fdc7f48574 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -91,7 +91,6 @@ ENDIF() IF(NOT ${CBLAS_FOUND}) ADD_DEPENDENCIES(cblas extern_openblas) - LIST(APPEND external_project_dependencies cblas) ELSE() IF("${CBLAS_PROVIDER}" STREQUAL "MKLML") ADD_DEPENDENCIES(cblas mklml) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index 3da3f10d7c..c2511d43e3 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -129,7 +129,6 @@ macro(PROMPT_PROTOBUF_LIB) ADD_DEPENDENCIES(protoc ${dep}) ENDFOREACH() - LIST(APPEND external_project_dependencies protobuf) RETURN() endmacro() macro(SET_PROTOBUF_VERSION) diff --git a/cmake/external/pslib.cmake b/cmake/external/pslib.cmake index b4ea268e5a..0287e5cf2a 100644 --- a/cmake/external/pslib.cmake +++ b/cmake/external/pslib.cmake @@ -70,4 +70,3 @@ ExternalProject_Add( ADD_LIBRARY(pslib SHARED IMPORTED GLOBAL) SET_PROPERTY(TARGET pslib PROPERTY IMPORTED_LOCATION ${PSLIB_LIB}) ADD_DEPENDENCIES(pslib ${PSLIB_PROJECT}) -LIST(APPEND external_project_dependencies pslib) diff --git a/cmake/external/pslib_brpc.cmake b/cmake/external/pslib_brpc.cmake index 8b43f2ef5c..22c8c1b463 100644 --- a/cmake/external/pslib_brpc.cmake +++ b/cmake/external/pslib_brpc.cmake @@ -70,4 +70,3 @@ ExternalProject_Add( ADD_LIBRARY(pslib_brpc SHARED IMPORTED GLOBAL) SET_PROPERTY(TARGET pslib_brpc PROPERTY IMPORTED_LOCATION ${PSLIB_BRPC_LIB}) ADD_DEPENDENCIES(pslib_brpc ${PSLIB_BRPC_PROJECT}) -LIST(APPEND external_project_dependencies pslib_brpc) diff --git a/cmake/external/threadpool.cmake b/cmake/external/threadpool.cmake index 0159815fed..1f56bc7ab0 100644 --- a/cmake/external/threadpool.cmake +++ b/cmake/external/threadpool.cmake @@ -26,5 +26,3 @@ else() endif() add_dependencies(simple_threadpool extern_threadpool) - -LIST(APPEND external_project_dependencies simple_threadpool) diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index 7a25aaf15f..6f2af8670f 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -83,5 +83,3 @@ INCLUDE_DIRECTORIES(${THIRD_PARTY_PATH}/install) # For Paddle code to include wa ADD_LIBRARY(warpctc SHARED IMPORTED GLOBAL) SET_PROPERTY(TARGET warpctc PROPERTY IMPORTED_LOCATION ${WARPCTC_LIBRARIES}) ADD_DEPENDENCIES(warpctc extern_warpctc) - -LIST(APPEND external_project_dependencies warpctc) diff --git a/cmake/external/xbyak.cmake b/cmake/external/xbyak.cmake index 384c2f9328..1d61154c0d 100644 --- a/cmake/external/xbyak.cmake +++ b/cmake/external/xbyak.cmake @@ -55,4 +55,3 @@ else() endif() add_dependencies(xbyak ${XBYAK_PROJECT}) -list(APPEND external_project_dependencies xbyak) diff --git a/cmake/external/xxhash.cmake b/cmake/external/xxhash.cmake index a0f300c2e8..23b1e02108 100644 --- a/cmake/external/xxhash.cmake +++ b/cmake/external/xxhash.cmake @@ -71,5 +71,3 @@ add_library(xxhash STATIC IMPORTED GLOBAL) set_property(TARGET xxhash PROPERTY IMPORTED_LOCATION ${XXHASH_LIBRARIES}) include_directories(${XXHASH_INCLUDE_DIR}) add_dependencies(xxhash extern_xxhash) - -LIST(APPEND external_project_dependencies xxhash) diff --git a/cmake/external/zlib.cmake b/cmake/external/zlib.cmake index 6c8d79c25e..5569fefe99 100644 --- a/cmake/external/zlib.cmake +++ b/cmake/external/zlib.cmake @@ -57,5 +57,3 @@ ENDIF(WIN32) ADD_LIBRARY(zlib STATIC IMPORTED GLOBAL) SET_PROPERTY(TARGET zlib PROPERTY IMPORTED_LOCATION ${ZLIB_LIBRARIES}) ADD_DEPENDENCIES(zlib extern_zlib) - -LIST(APPEND external_project_dependencies zlib) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index bcc997ff45..81c34beeef 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -4,18 +4,6 @@ set(PY_FILES paddle/__init__.py ${UTILS_PY_FILES} ${FLUID_PY_FILES}) -set(MKL_SHARED_LIBS "") -set(MKL_DEPENDS "") -if(WITH_MKLML) - list(APPEND MKL_SHARED_LIBS ${MKLML_LIB} ${MKLML_IOMP_LIB}) - list(APPEND MKL_DEPENDS mklml) -endif() - -if(WITH_MKLDNN) - list(APPEND MKL_SHARED_LIBS "${MKLDNN_SHARED_LIB}") - list(APPEND MKL_DEPENDS mkldnn mkldnn_shared_lib) -endif() - if(WITH_GPU) SET(PACKAGE_NAME "paddlepaddle-gpu") else() @@ -42,7 +30,7 @@ IF(WIN32) COMMAND ${CMAKE_COMMAND} -E env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_PYTHON_BUILD_DIR}/.timestamp COMMAND ${CMAKE_COMMAND} -E remove_directory ${PADDLE_PYTHON_BUILD_DIR}/lib-python - DEPENDS copy_paddle_pybind ${FLUID_CORE} framework_py_proto profiler_py_proto ${PY_FILES} ${external_project_dependencies} ${COPY_PADDLE_MASTER}) + DEPENDS copy_paddle_pybind ${FLUID_CORE} framework_py_proto profiler_py_proto ${PY_FILES}) ELSE(WIN32) add_custom_command(OUTPUT ${PADDLE_PYTHON_BUILD_DIR}/.timestamp COMMAND touch stub.cc @@ -51,11 +39,10 @@ ELSE(WIN32) COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_PYTHON_BUILD_DIR}/.timestamp COMMAND ${CMAKE_COMMAND} -E remove_directory ${PADDLE_PYTHON_BUILD_DIR}/lib-python COMMAND ${CMAKE_COMMAND} -E copy_directory ${PADDLE_PYTHON_BUILD_DIR}/lib* ${PADDLE_PYTHON_BUILD_DIR}/lib-python - DEPENDS copy_paddle_pybind ${FLUID_CORE} framework_py_proto profiler_py_proto ${PY_FILES} ${external_project_dependencies} ${COPY_PADDLE_MASTER}) + DEPENDS copy_paddle_pybind ${FLUID_CORE} framework_py_proto profiler_py_proto ${PY_FILES}) ENDIF() -set(paddle_python_deps ${PADDLE_PYTHON_BUILD_DIR}/.timestamp ${MKL_DEPENDS} ${external_project_dependencies}) -add_custom_target(paddle_python ALL DEPENDS ${paddle_python_deps}) +add_custom_target(paddle_python ALL DEPENDS ${PADDLE_PYTHON_BUILD_DIR}/.timestamp) set(PADDLE_PYTHON_PACKAGE_DIR ${CMAKE_CURRENT_BINARY_DIR}/dist/) From d331e97af85f4ef188edf52535bb04d0ecf26138 Mon Sep 17 00:00:00 2001 From: sneaxiy Date: Wed, 20 Feb 2019 11:08:38 +0800 Subject: [PATCH 088/117] fix compiler place compare test=develop --- paddle/fluid/pybind/pybind.cc | 29 ++++++++++++++++++++++++++++- python/paddle/fluid/compiler.py | 2 +- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index c50c38160e..d8e57a1ac6 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -106,6 +106,11 @@ bool IsCompiledWithDIST() { #endif } +template +static inline bool IsSamePlace(const PlaceType1 &p1, const PlaceType2 &p2) { + return paddle::platform::Place(p1) == paddle::platform::Place(p2); +} + PYBIND11_MODULE(core, m) { // Not used, just make sure cpu_info.cc is linked. paddle::platform::CpuTotalPhysicalMemory(); @@ -732,23 +737,45 @@ All parameter, weight, gradient are variables in Paddle. PADDLE_THROW("Cannot use CUDAPlace in CPU only version"); #endif }) + .def("_equals", &IsSamePlace) + .def("_equals", &IsSamePlace) + .def("_equals", &IsSamePlace) + .def("_equals", + &IsSamePlace) .def("__str__", string::to_string); py::class_(m, "CPUPlace") .def(py::init<>()) + .def("_equals", &IsSamePlace) + .def("_equals", &IsSamePlace) + .def("_equals", &IsSamePlace) + .def("_equals", + &IsSamePlace) .def("__str__", string::to_string); py::class_(m, "CUDAPinnedPlace") .def("__init__", - [](platform::CUDAPinnedPlace &) { + [](platform::CUDAPinnedPlace &self) { #ifndef PADDLE_WITH_CUDA PADDLE_THROW("Cannot use CUDAPinnedPlace in CPU only version"); #endif + new (&self) platform::CUDAPinnedPlace(); }) + .def("_equals", &IsSamePlace) + .def("_equals", + &IsSamePlace) + .def("_equals", + &IsSamePlace) + .def("_equals", + &IsSamePlace) .def("__str__", string::to_string); py::class_(m, "Place") .def(py::init<>()) + .def("_equals", &IsSamePlace) + .def("_equals", &IsSamePlace) + .def("_equals", &IsSamePlace) + .def("_equals", &IsSamePlace) .def("is_gpu_place", [](platform::Place &self) { return platform::is_gpu_place(self); }) .def("gpu_device_id", diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index b24cec044f..0fecff81cf 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -220,7 +220,7 @@ class CompiledProgram(object): if self._compiled: if scope and self._scope != scope: raise ValueError("Cannot compile with different scope") - if place and self._place != place: + if place and not self._place._equals(place): raise ValueError("Cannot compile with different place") return self self._compiled = True From f1df9dba24309e87e91c9e03dda7d94e650c0e15 Mon Sep 17 00:00:00 2001 From: Jiabin Yang Date: Wed, 20 Feb 2019 13:35:59 +0800 Subject: [PATCH 089/117] test=develop, update fluid.layers to LaryerHelper (#15797) --- .../unittests/test_imperative_ptb_rnn.py | 31 ++++++++++++------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index 82aff18b72..7cf3bf13d2 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -40,6 +40,8 @@ class SimpleLSTMRNN(fluid.imperative.Layer): self._dropout = dropout self._input = None self._num_steps = num_steps + from paddle.fluid.layer_helper import LayerHelper + self._helper = LayerHelper('SimpleLSTMRNN', act="tanh") def _build_once(self, input_embedding, init_hidden=None, init_cell=None): self.weight_1_arr = [] @@ -50,17 +52,21 @@ class SimpleLSTMRNN(fluid.imperative.Layer): self.mask_array = [] for i in range(self._num_layers): - weight_1 = fluid.layers.create_parameter( + weight_1 = self._helper.create_parameter( + attr=fluid.ParamAttr( + initializer=fluid.initializer.UniformInitializer( + low=-self._init_scale, high=self._init_scale)), shape=[self._hidden_size * 2, self._hidden_size * 4], dtype="float32", - name="fc_weight1_" + str(i), default_initializer=fluid.initializer.UniformInitializer( low=-self._init_scale, high=self._init_scale)) self.weight_1_arr.append(weight_1) - bias_1 = fluid.layers.create_parameter( - [self._hidden_size * 4], + bias_1 = self._helper.create_parameter( + attr=fluid.ParamAttr( + initializer=fluid.initializer.UniformInitializer( + low=-self._init_scale, high=self._init_scale)), + shape=[self._hidden_size * 4], dtype="float32", - name="fc_bias1_" + str(i), default_initializer=fluid.initializer.Constant(0.0)) self.bias_arr.append(bias_1) @@ -137,6 +143,8 @@ class PtbModel(fluid.imperative.Layer): self.num_layers = num_layers self.num_steps = num_steps self.dropout = dropout + from paddle.fluid.layer_helper import LayerHelper + self._helper = LayerHelper('PtbModel', act="tanh") self.simple_lstm_rnn = SimpleLSTMRNN( hidden_size, num_steps, @@ -151,16 +159,16 @@ class PtbModel(fluid.imperative.Layer): name='embedding_para', initializer=fluid.initializer.UniformInitializer( low=-init_scale, high=init_scale))) - self.softmax_weight = fluid.layers.create_parameter( - [self.hidden_size, self.vocab_size], + self.softmax_weight = self._helper.create_parameter( + attr=fluid.ParamAttr(), + shape=[self.hidden_size, self.vocab_size], dtype="float32", - name="softmax_weight", default_initializer=fluid.initializer.UniformInitializer( low=-self.init_scale, high=self.init_scale)) - self.softmax_bias = fluid.layers.create_parameter( - [self.vocab_size], + self.softmax_bias = self._helper.create_parameter( + attr=fluid.ParamAttr(), + shape=[self.vocab_size], dtype="float32", - name='softmax_bias', default_initializer=fluid.initializer.UniformInitializer( low=-self.init_scale, high=self.init_scale)) @@ -256,7 +264,6 @@ class TestImperativePtbRnn(unittest.TestCase): with new_program_scope(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - # TODO: marsyang1993 Change seed to ptb_model = PtbModel( hidden_size=hidden_size, vocab_size=vocab_size, From 4711d88a2f763aa1922302806b84b96d0ba7a70c Mon Sep 17 00:00:00 2001 From: jerrywgz Date: Wed, 20 Feb 2019 08:19:01 +0000 Subject: [PATCH 090/117] fix nms unittest in py36, test=develop --- .../paddle/fluid/tests/unittests/test_multiclass_nms_op.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py index 8fc391a1ff..69e060341e 100644 --- a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py @@ -173,13 +173,16 @@ def lod_multiclass_nms(boxes, scores, background, score_threshold, normalized, shared=False) if nmsed_num == 0: - #lod.append(1) continue lod.append(nmsed_num) + tmp_det_out = [] for c, indices in nmsed_outs.items(): for idx in indices: xmin, ymin, xmax, ymax = box[idx, c, :] - det_outs.append([c, score[idx][c], xmin, ymin, xmax, ymax]) + tmp_det_out.append([c, score[idx][c], xmin, ymin, xmax, ymax]) + sorted_det_out = sorted( + tmp_det_out, key=lambda tup: tup[0], reverse=False) + det_outs.extend(sorted_det_out) if len(lod) == 0: lod.append(1) From 8b40f2d40e318c36cd4c0a4433453970d42544ee Mon Sep 17 00:00:00 2001 From: Jiabin Yang Date: Wed, 20 Feb 2019 18:37:05 +0800 Subject: [PATCH 091/117] Feature/fast install 1.4 (#15668) * update fast install shell * test=develop, enhance mac fast install * fix pip Failure due to too low version;Add python virtualenv * test=develop * test=develop * test=develop * test=develop * test=develop --- paddle/scripts/fast_install.sh | 669 +++++++++++++++++++++------------ 1 file changed, 436 insertions(+), 233 deletions(-) diff --git a/paddle/scripts/fast_install.sh b/paddle/scripts/fast_install.sh index b960d0f00a..0461944ca8 100644 --- a/paddle/scripts/fast_install.sh +++ b/paddle/scripts/fast_install.sh @@ -1,5 +1,37 @@ #!/bin/bash +## purple to echo +function purple(){ + echo -e "\033[35m$1\033[0m" +} + + +## green to echo +function green(){ + echo -e "\033[32m$1\033[0m" +} + +## Error to warning with blink +function bred(){ + echo -e "\033[31m\033[01m\033[05m$1\033[0m" +} + +## Error to warning with blink +function byellow(){ + echo -e "\033[33m\033[01m\033[05m$1\033[0m" +} + + +## Error +function red(){ + echo -e "\033[31m\033[01m$1\033[0m" +} + +## warning +function yellow(){ + echo -e "\033[33m\033[01m$1\033[0m" +} + path='http://paddlepaddle.org/download?url=' #release_version=`curl -s https://pypi.org/project/paddlepaddle/|grep -E "/project/paddlepaddle/"|grep "release"|awk -F '/' '{print $(NF-1)}'|head -1` release_version=1.2.0 @@ -228,36 +260,128 @@ function checkLinuxPaddleVersion(){ done } -function checkLinuxPip(){ +function checkPythonVirtualenv(){ while true do - echo "请输入您要使用的pip目录(您可以另起终端,并使用which pip来查看):" - read -p "" pip_path - if [ "$pip_path" == "" -o ! -f "$pip_path" ];then - echo "检测结果:pip不存在,请重新输入" - continue - fi - python_version=`$pip_path --version|awk -F "[ |)]" '{print $6}'|sed 's#\.##g'` - if [ "$python_version" == "27" ];then - uncode=`python -c "import pip._internal;print(pip._internal.pep425tags.get_supported())"|grep "cp27mu"` - if [[ "$uncode" == "" ]];then - uncode= - else - uncode=u - fi - fi - if [ "$python_version" == "" ];then - echo "检测结果:pip不存在,请重新输入" - else - version_list=`echo "${python_list[@]}" | grep "$python_version" ` - if [ "$version_list" != "" ];then - echo "检测结果:找到python${python_version}版本" - break - else - echo "检测结果:找不到可用的 pip, 我们只支持Python27/35/36/37及其对应的pip, 请重新输入, 或使用ctrl + c退出 " - fi - fi + read -p " + 是否使用python virtualenv虚环境安装(y/n)": check_virtualenv + case $check_virtualenv in + y) + echo "为您使用python虚环境安装" + ;; + n) + break + ;; + *) + continue + ;; + esac + + virtualenv_path=`which virtualenv 2>&1` + if [ "$virtualenv_path" == "" ];then + $python_path -m pip install virtualenv + if [ "$?" != '0' ];then + echo "安装虚拟环境失败,请检查本地环境" + fi + fi + + while true + do + read -p "请输入虚拟环境名字:" virtualenv_name + if [ "$virtualenv_name" == "" ];then + echo "不能为空" + continue + fi + break + done + + virtualenv -p $python_path ${virtualenv_name} + if [ "$?" != 0 ];then + echo "创建虚环境失败,请检查环境" + exit 2 + fi + cd ${virtualenv_name} + source ./bin/activate + + if [ "$?" == 0 ];then + use_virtualenv= + python_path=`which python` + break + else + echo "创建虚环境失败,请检查环境" + exit 2 + fi + done +} + +function checkLinuxPython(){ + python_path=`which python 2>/dev/null` + while true + do + if [ "$python_path" == '' ];then + while true + do + read -p "没有找到默认的python版本,请输入要安装的python路径:" python_path + python_path=`$python_path -V` + if [ "$python_path" != "" ];then + break + else + echo "输入路径有误,未找到pyrhon" + fi done + fi + + python_version=`$python_path -V 2>&1|awk -F '[ .]' '{print $2$3}'` + pip_version=`$python_path -m pip -V|awk -F '[ .]' '{print $2}'` + while true + do + read -p " + 找到python版本$python_version,使用请输入y,选择其他版本请输n(y/n):" check_python + case $check_python in + n) + read -p "请指定您的python路径:" new_python_path + python_V=`$new_python_path -V 2>/dev/null` + if [ "$python_V" != "" ];then + python_path=$new_python_path + python_version=`$python_path -V 2>&1|awk -F '[ .]' '{print $2$3}'` + pip_version=`python -m pip -V|awk -F '[ .]' '{print $2}'` + echo "您的python版本为${python_version}" + break + else + echo 输入有误,未找到python路径 + fi + ;; + y) + break + ;; + *) + echo "输入有误,请重新输入." + continue + ;; + esac + done + + if [ "$pip_version" -lt 9 ];then + echo "您的pip版本小于9.0.1 请升级pip (pip install --upgrade pip)" + exit 0 + fi + + if [ "$python_version" == "27" ];then + uncode=`python -c "import pip._internal;print(pip._internal.pep425tags.get_supported())"|grep "cp27mu"` + if [[ "$uncode" == "" ]];then + uncode= + else + uncode=u + fi + fi + + version_list=`echo "${python_list[@]}" | grep "$python_version" ` + if [ "$version_list" == "" ];then + echo "找不到可用的 pip, 我们只支持Python27/35/36/37及其对应的pip, 请重新输入, 或使用ctrl + c退出 " + else + break + fi + done } function checkLinuxAVX(){ @@ -287,25 +411,36 @@ function PipLinuxInstall(){ wheel_cpu_develop="http://paddle-wheel.bj.bcebos.com/latest-cpu-${AVX}-${math}/paddlepaddle-latest-cp${python_version}-cp${python_version}m${uncode}-linux_x86_64.whl" wheel_gpu_develop="http://paddle-wheel.bj.bcebos.com/latest-gpu-cuda${CUDA}-cudnn${CUDNN}-${AVX}-${math}/paddlepaddle_gpu-latest-cp${python_version}-cp${python_version}m${uncode}-linux_x86_64.whl" - if [[ "$paddle_version" == "2" ]];then if [[ "$GPU" == "gpu" ]];then if [[ ${AVX} == "avx" ]];then rm -rf `echo $wheel_gpu_release|awk -F '/' '{print $NF}'` wget -q $wheel_gpu_release if [ "$?" == "0" ];then - $pip_path install --user -i https://mirrors.aliyun.com/pypi/simple --trusted-host=mirrors.aliyun.com $wheel_gpu_release + $python_path -m pip install ${use_virtualenv} -i https://mirrors.aliyun.com/pypi/simple --trusted-host=mirrors.aliyun.com $wheel_gpu_release + if [ "$?" == 0 ];then + echo 安装成功 + else + echo 安装失败 + exit 1 + fi else - echo "paddlepaddle whl包下载失败" + echo paddlepaddle whl包下载失败 exit 1 fi else rm -rf `echo $wheel_gpu_release_novax|awk -F '/' '{print $NF}'` wget -q $wheel_gpu_release_novax if [ "$?" == "0" ];then - $pip_path install --user -i https://mirrors.aliyun.com/pypi/simple --trusted-host=mirrors.aliyun.com $wheel_gpu_release_noavx + $python_path -m pip install ${use_virtualenv} -i https://mirrors.aliyun.com/pypi/simple --trusted-host=mirrors.aliyun.com $wheel_gpu_release_noavx + if [ "$?" == 0 ];then + echo 安装成功 + else + echo 安装失败 + exit 1 + fi else - echo "paddlepaddle whl包下载失败" + echo paddlepaddle whl包下载失败 exit 1 fi fi @@ -313,9 +448,15 @@ function PipLinuxInstall(){ rm -rf `echo $wheel_cpu_release|awk -F '/' '{print $NF}'` wget -q $wheel_cpu_release if [ "$?" == "0" ];then - $pip_path install --user -i https://mirrors.aliyun.com/pypi/simple --trusted-host=mirrors.aliyun.com $wheel_cpu_release + $python_path -m pip install ${use_virtualenv} -i https://mirrors.aliyun.com/pypi/simple --trusted-host=mirrors.aliyun.com $wheel_cpu_release + if [ "$?" == 0 ];then + echo 安装成功 + else + echo 安装失败 + exit 1 + fi else - echo "paddlepaddle whl包下载失败" + echo paddlepaddle whl包下载失败 exit 1 fi fi @@ -324,18 +465,30 @@ function PipLinuxInstall(){ rm -rf `echo $wheel_gpu_develop|awk -F '/' '{print $NF}'` wget -q $wheel_gpu_develop if [ "$?" == "0" ];then - $pip_path install --user -i https://mirrors.aliyun.com/pypi/simple --trusted-host=mirrors.aliyun.com $wheel_gpu_develop + $python_path -m pip install ${use_virtualenv} -i https://mirrors.aliyun.com/pypi/simple --trusted-host=mirrors.aliyun.com $wheel_gpu_develop + if [ "$?" == 0 ];then + echo 安装成功 + else + echo 安装失败 + exit 1 + fi else - echo "paddlepaddle whl包下载失败" + echo paddlepaddle whl包下载失败 exit 1 fi else rm -rf `echo $wheel_cpu_develop|awk -F '/' '{print $NF}'` wget -q $wheel_cpu_develop if [ "$?" == "0" ];then - $pip_path install --user -i https://mirrors.aliyun.com/pypi/simple --trusted-host=mirrors.aliyun.com $wheel_cpu_develop + $python_path -m pip install ${use_virtualenv} -i https://mirrors.aliyun.com/pypi/simple --trusted-host=mirrors.aliyun.com $wheel_cpu_develop + if [ "$?" == 0 ];then + echo 安装成功 + else + echo 安装失败 + exit 1 + fi else - echo "paddlepaddle whl包下载失败" + echo paddlepaddle whl包下载失败 exit 1 fi fi @@ -575,95 +728,122 @@ gpu_list=( echo echo "Step 5. 检测pip版本" echo - checkLinuxPip + checkLinuxPython echo checkLinuxAVX + echo + echo "Step 6.是否使用Python的虚拟环境" + use_virtualenv="--user" + checkPythonVirtualenv echo "*********************2. 开始安装*****************************" PipLinuxInstall + if [ "$check_virtualenv" == 'y' ];then + echo "虚环境创建成功,请cd 进入${virtualenv_name}, 执行 source bin/activate 进入虚环境。退出虚环境执行 deactivate命令。 + 更多虚环境使用方法请参考virtualenv官网:https://virtualenv.pypa.io/en/latest/" + fi +} + +function clearMacPythonEnv(){ + python_version="" + python_brief_version="" + python_root="" } function checkMacPython2(){ while true do - read -p " - => 未能在常规路径下找到Python2,请使用ctrl+c命令退出安装程序,并使用brew或pypi.org下载安装Python2(注意Python版本不能低于2.7.15) - 如希望自定义Python路径,请输入路径:" python_root - echo python_version=`$python_root --version 2>&1 1>&1` - if [ $? == "0" ];then - : + if [[ $? == "0" ]];then + if [ "$python_version" == "" ] || [ "$python_root" == "/usr/bin/python" -a "$python_version" == "Python 2.7.10" ];then + clearMacPythonEnv + else + check_python=`echo $python_version | grep "Python 2"` + if [[ -n "$check_python" ]];then + while true + do + echo -e " => 在您的环境中找到 \033[32m[ $python_version ]\033[0m, 确认使用此版本请输入y;如您希望自定义Python路径请输入n。请在这里输入(y/n)并回车: " + read -p "" use_python + echo + use_python=`echo $use_python | tr 'A-Z' 'a-z'` + if [[ "$use_python" == "y" ]]||[[ "$use_python" == "" ]];then + use_python="y" + break + elif [[ "$use_python" == "n" ]];then + clearMacPythonEnv + break + else + red " 输入错误,请重新输入(y/n)" + fi + done + if [[ "$use_python" == "y" ]];then + return 0 + fi + else + red " 您输入Python的不是Python2" + clearMacPythonEnv + fi + fi else - python_version="" + clearMacPythonEnv + red " => 未能在常规路径下找到可用的Python2,请使用ctrl+c命令退出安装程序,并使用brew或pypi.org下载安装Python2(注意Python版本不能低于2.7.15)" + read -p " 如希望自定义Python路径,请输入路径 + 如果希望重新选择Python版本,请回车:" python_root + echo + if [[ "$python_root" == "" ]];then + python_V="" + clearMacPythonEnv + return 1 + fi fi - check_python=`echo $python_version | grep "Python 2"` - if [ "$python_version" == "" ] || [ "$python_root" == "/usr/bin/python" -a "$python_version" == "Python 2.7.10" ] ;then - python_version="" - elif [ -n "$check_python" ];then - while true - do - read -p " - => 在您的环境中找到 $python_version, 确认使用此版本请输入y;如您希望自定义Python路径请输入n。请在这里输入(y/n)并回车: " use_python - echo - use_python=`echo $use_python | tr 'A-Z' 'a-z'` - if [ "$use_python" == "y" ]||[ "$use_python" == "" ];then - use_python="y" - break - elif [ "$use_python" == "n" ];then - python_root="" - break - else - echo "输入错误,请重新输入(y/n)" - fi - done - if [ "$use_python" == "y" ];then - break - fi - else - echo "您输入Python的不是Python2" - python_version="" - fi done } function checkMacPython3(){ while true do - read -p " - => 未能在常规路径下找到Python3,请使用ctrl+c命令退出安装程序,并使用brew或pypi.org下载Python3 - 如希望自定义Python路径,请输入路径:" python_root - python_version=`$python_root --version 2>&1 1>&1` - if [ $? == "0" ];then - : + python_version=`$python_root --version 2>&1 1>&1` + if [[ $? == "0" ]];then + if [ "$python_version" == "" ] || [ "$python_root" == "/usr/bin/python" -a "$python_version" == "Python 2.7.10" ] ;then + clearMacPythonEnv + else + check_python=`echo $python_version | grep "Python 3"` + if [[ -n "$check_python" ]];then + while true + do + echo -e " => 在您的环境中找到 \033[32m[ $python_version ]\033[0m, 确认使用此版本请输入y;如您希望自定义Python路径请输入n。请在这里输入(y/n)并回车: " + read -p "" use_python + echo + use_python=`echo $use_python | tr 'A-Z' 'a-z'` + if [[ "$use_python" == "y" ]]||[[ "$use_python" == "" ]];then + use_python="y" + break + elif [[ "$use_python" == "n" ]];then + clearMacPythonEnv + break + else + red " 输入错误,请重新输入(y/n)" + fi + done + if [[ "$use_python" == "y" ]];then + return 0 + fi + else + red " 您输入Python的不是Python3" + clearMacPythonEnv + fi + fi else - python_version="" + clearMacPythonEnv + red " => 未能在常规路径下找到可用的Python3,请使用ctrl+c命令退出安装程序,并使用brew或pypi.org下载安装Python3(注意Python版本不能低于3.5.x)" + read -p " 如希望自定义Python路径,请输入路径 + 如果希望重新选择Python版本,请回车:" python_root + echo + if [[ "$python_root" == "" ]];then + python_V="" + clearMacPythonEnv + return 1 + fi fi - check_python=`echo $python_version | grep "Python 3"` - if [ "$python_version" == "" ] || [ "$python_root" == "/usr/bin/python" -a "$python_version" == "Python 2.7.10" ] ;then - python_version="" - elif [ -n "$check_python" ] ;then - while true - do - read -p " - => 在您的环境中找到 $python_version, 确认使用此版本请输入y;如您希望自定义Python路径请输入n。请在这里输入(y/n)并回车: " use_python - echo - use_python=`echo $use_python | tr 'A-Z' 'a-z'` - if [ "$use_python" == "y" ]||[ "$use_python" == "" ];then - use_python="y" - break - elif [ "$use_python" == "n" ];then - python_root="" - break - else - echo "输入错误,请重新输入(y/n)" - fi - done - if [ "$use_python" == "y" ];then - break - fi - else - echo "您输入Python的不是Python3" - python_version="" - fi done } @@ -672,145 +852,160 @@ function checkMacPaddleVersion(){ do read -n1 -p "Step 2. 选择PaddlePaddle的版本,请按回车键继续..." echo - read -p " - 1. 开发版:对应Github上develop分支,如您需要开发、或希望使用PaddlePaddle最新功能,请选用此版本 - 2. 稳定版(推荐):如您无特殊开发需求,建议使用此版本,目前最新的版本号为 ${release_version} - - => 请输入数字1或2。如输入其他字符或直接回车,将会默认选择【 2. 稳定版 】 。请在这里输入并回车:" paddle_version - if [ "$paddle_version" == "1" ]||[ "$paddle_version" == "2" ];then + yellow " 1. 开发版:对应Github上develop分支,如您需要开发、或希望使用PaddlePaddle最新功能,请选用此版本" + yellow " 2. 稳定版(推荐):如您无特殊开发需求,建议使用此版本,目前最新的版本号为 ${release_version}" + read -p " => 请输入数字1或2。如输入其他字符或直接回车,将会默认选择【 2. 稳定版 】 。请在这里输入并回车:" paddle_version + if [[ "$paddle_version" == "1" ]]||[[ "$paddle_version" == "2" ]];then echo - echo "您选择了数字【"$paddle_version" 】" + yellow " 您选择了数字【"$paddle_version" 】" echo break else paddle_version="2" echo - echo "您选择了数字【2】" + yellow " 您选择了数字【2】" echo break fi done } +function initCheckMacPython2(){ + echo + yellow " 您选择了Python "$python_V",正在寻找符合要求的Python 2版本" + echo + python_root=`which python2.7` + if [[ "$python_root" == "" ]];then + python_root=`which python` + fi + checkMacPython2 + if [[ "$?" == "1" ]];then + return 1 + else + return 0 + fi +} -function checkMacPythonVersion(){ - while true - do - read -n1 -p "Step 3. 选择Python版本,请按回车键继续..." - read -p " - 2. 使用python 2.x - 3. 使用python 3.x +function initCheckMacPython3(){ + echo + yellow " 您选择了Python "$python_V",正在寻找符合您要求的Python 2版本" + echo + python_root=`which python3` + checkMacPython3 + if [[ "$?" == "1" ]];then + return 1 + else + return 0 + fi +} - => 请输入数字2或3。如输入其他字符或直接回车,将会默认使用【Python 2 】。请在这里输入并回车:" python_V - echo - if [ "$python_V" == "" ];then - python_V="2" +function checkMacPip(){ + if [[ "$python_V" == "2" ]]||[[ "$python_V" == "3" ]];then + + python_brief_version=`$python_root -m pip -V |awk -F "[ |)]" '{print $6}'|sed 's#\.##g'` + if [[ ${python_brief_version} == "" ]];then + red "您输入的python:${python_root} 对应的pip不可用,请检查此pip或重新选择其他python" + echo + return 1 fi - echo "您选择了数字【"$python_V"】,正在寻找符合您要求的Python版本,请按回车键继续..." - echo - if [ "$python_V" == "2" ];then - python_root=`which python2.7` - if [ "$python_root" == "" ];then - python_root=`which python` - fi - python_version=`$python_root --version 2>&1 1>&1` - if [ $? == "0" ];then - : - else - python_version="" - fi - if [ "$python_root" == "" ]||[ "$python_root" == "/usr/bin/python" -a "$python_version" == "Python 2.7.10" ]||[ "$python_root" == "/usr/bin/python2.7" -a "$python_version" == "Python 2.7.10" ];then - checkMacPython2 - fi - while true - do - read -p " - => 在您的环境中找到 $python_version, 确认使用此版本请输入y;如您希望自定义Python路径请输入n。请在这里输入(y/n)并回车:" use_python - echo - use_python=`echo $use_python | tr 'A-Z' 'a-z'` - if [ "$use_python" == "y" ]||[ "$use_python" == "" ];then - break - elif [ "$use_python" == "n" ];then - python_root="" - checkMacPython2 - break + pip_version=`$python_root -m pip -V |awk -F '[ .]' '{print $2}'` + if [[ 9 -le ${pip_version} ]];then + : + else + red "您的pip版本过低,请安装pip 9.0.1及以上的版本" + echo + return 1 + fi + if [[ "$python_brief_version" == "" ]];then + clearMacPythonEnv + red "您的 $python_root 对应的pip存在问题,请按ctrl + c退出后重新安装pip,或切换其他python版本" + echo + return 1 + else + if [[ $python_brief_version == "27" ]];then + uncode=`python -c "import pip._internal;print(pip._internal.pep425tags.get_supported())"|grep "cp27"` + if [[ $uncode == "" ]];then + uncode="mu" else - echo "输入错误,请重新输入(y/n)" + uncode="m" fi - done - - elif [ "$python_V" == "3" ];then - python_root=`which python3` - python_version=`$python_root --version 2>&1 1>&1` - if [ $? == "0" ];then - : - else - python_version="" - fi - if [ "$python_root" == "" ]||[ "$python_root" == "/usr/bin/python" -a "$python_version" == "Python 2.7.10" ];then - checkMacPython3 - fi - while true - do - read -p " - => 在您的环境中找到 $python_version, 确认使用此版本请输入y;如您希望自定义Python路径请输入n。请在这里输入(y/n)并回车:" use_python + fi + version_list=`echo "${python_list[@]}" | grep "$python_brief_version" ` + if [[ "$version_list" != "" ]];then + return 0 + else + red "未找到可用的pip或pip3。PaddlePaddle目前支持:Python2.7/3.5/3.6/3.7及其对应的pip, 请重新输入,或使用ctrl + c退出" echo - use_python=`echo $use_python | tr 'A-Z' 'a-z'` - if [ "$use_python" == "y" ]||[ "$use_python" == "" ];then - break - elif [ "$use_python" == "n" ];then - checkMacPython3 - break - else - echo "输入错误,请重新输入(y/n)" - fi - done - else - : - fi + clearMacPythonEnv + return 1 + fi + fi + fi +} - if [ "$python_V" == "2" ]||[ "$python_V" == "3" ];then - python_brief_version=`$python_root -m pip -V |awk -F "[ |)]" '{print $6}'|sed 's#\.##g'` - if [[ $python_brief_version == "27" ]];then - uncode=`python -c "import pip._internal;print(pip._internal.pep425tags.get_supported())"|grep "cp27"` - if [[ $uncode == "" ]];then - uncode="mu" - else - uncode="m" - fi - fi - version_list=`echo "${python_list[@]}" | grep "$python_brief_version" ` - if [ "$version_list" != "" ];then - break +function checkMacPythonVersion(){ + while true + do + read -n1 -p "Step 3. 选择Python版本,请按回车键继续..." + echo + yellow " 2. 使用python 2.x" + yellow " 3. 使用python 3.x" + read -p " => 请输入数字2或3。如输入其他字符或直接回车,将会默认使用【Python 2 】。请在这里输入并回车:" python_V + if [[ "$python_V" == "" ]];then + python_V="2" + fi + if [[ "$python_V" == "2" ]];then + initCheckMacPython2 + if [[ "$?" == "0" ]];then + checkMacPip + if [[ "$?" == "0" ]];then + return 0 + else + : + fi else - echo "未找到可用的pip或pip3。PaddlePaddle目前支持:Python2.7/3.5/3.6/3.7及其对应的pip, 请重新输入,或使用ctrl + c退出" - fi - else - echo "输入错误,请重新输入" - fi + : + fi + elif [[ "$python_V" == "3" ]];then + initCheckMacPython3 + if [[ "$?" == "0" ]];then + checkMacPip + if [[ "$?" == "0" ]];then + return 0 + else + : + fi + else + : + fi + else + red "输入错误,请重新输入" + fi done } function checkMacAVX(){ read -n1 -p "Step 4. 检测您的Mac是否支持AVX指令集,请按回车键继续..." - echo if [[ $AVX != "" ]];then AVX="avx" - echo "检测结果:支持" + echo "" + green " 检测结果:支持" + echo "" + return 0 else - read -n1 -p "检测结果:不支持。非常抱歉,PaddlePaddle在Mac系统暂不提供no_avx类型的安装包,您可以选择在Linux系统中安装no_avx版的PaddlePaddle, 请按回车键退出..." - exit + red " 检测结果:不支持。非常抱歉,PaddlePaddle在Mac系统暂不提供no_avx类型的安装包,您可以选择在Linux系统中安装no_avx版的PaddlePaddle, 请按回车键退出..." + echo + return 1 fi - echo } function checkMacGPU(){ read -n1 -p "Step 5. 选择CPU/GPU版本,请按回车键继续..." echo if [[ $GPU != "" ]];then - echo "MacOS环境下,暂未提供GPU版本的PaddlePaddle安装包,将为您安装CPU版本的PaddlePaddle" + yellow " MacOS环境下,暂未提供GPU版本的PaddlePaddle安装包,将为您安装CPU版本的PaddlePaddle" else - echo "MacOS环境下,暂未提供GPU版本的PaddlePaddle安装包,将为您安装CPU版本的PaddlePaddle" + yellow " MacOS环境下,暂未提供GPU版本的PaddlePaddle安装包,将为您安装CPU版本的PaddlePaddle" GPU=cpu fi echo @@ -822,38 +1017,44 @@ function macos() { while true do + checkMacPaddleVersion + checkMacPythonVersion + checkMacAVX + checkMacGPU - echo "*********************2. 开始安装*****************************" + green "*********************2. 开始安装*****************************" echo - read -n1 -p "即将为您下载并安装PaddlePaddle,请按回车键继续..." + yellow "即将为您下载并安装PaddlePaddle,请按回车键继续..." + read -n1 -p "" echo if [[ $paddle_version == "2" ]];then $python_root -m pip install paddlepaddle - if [ $? == "0" ];then - echo "安装成功,可以使用: ${python_root} 来启动安装了PaddlePaddle的Python解释器" + if [[ $? == "0" ]];then + green "安装成功,可以使用: ${python_root} 来启动安装了PaddlePaddle的Python解释器" break else rm $whl_cpu_release - echo "未能正常安装PaddlePaddle,请尝试更换您输入的python路径,或者ctrl + c退出后请检查您使用的python对应的pip或pip源是否可用" + red "未能正常安装PaddlePaddle,请尝试更换您输入的python路径,或者ctrl + c退出后请检查您使用的python对应的pip或pip源是否可用" echo"" echo "==========================================================================================" echo"" exit 1 fi else - if [ -f $whl_cpu_develop ];then + if [[ -f $whl_cpu_develop ]];then $python_root -m pip install $whl_cpu_develop - if [ $? == "0" ];then + if [[ $? == "0" ]];then rm -rf $whl_cpu_develop - echo "安装成功!小提示:可以使用: ${python_root} 来启动安装了PaddlePaddle的Python解释器" + # TODO add install success check here + green "安装成功!小提示:可以使用: ${python_root} 来启动安装了PaddlePaddle的Python解释器" break else - echo "未能正常安装PaddlePaddle,请尝试更换您输入的python路径,或者ctrl + c退出后请检查您使用的python对应的pip或pip源是否可用" + red "未能正常安装PaddlePaddle,请尝试更换您输入的python路径,或者ctrl + c退出后请检查您使用的python对应的pip或pip源是否可用" echo"" echo "==========================================================================================" echo"" @@ -861,15 +1062,15 @@ function macos() { fi else wget ${path}$whl_cpu_develop -O $whl_cpu_develop - if [ $? == "0" ];then + if [[ $? == "0" ]];then $python_root -m pip install $whl_cpu_develop - if [ $? == "0" ];then + if [[ $? == "0" ]];then rm $wheel_cpu_develop - echo "安装成功,可以使用: ${python_root} 来启动安装了PaddlePaddle的Python解释器" + green "安装成功,可以使用: ${python_root} 来启动安装了PaddlePaddle的Python解释器" break else rm $whl_cpu_release - echo "未能正常安装PaddlePaddle,请尝试更换您输入的python路径,或者ctrl + c退出后请检查您使用的python对应的pip或pip源是否可用" + red "未能正常安装PaddlePaddle,请尝试更换您输入的python路径,或者ctrl + c退出后请检查您使用的python对应的pip或pip源是否可用" echo"" echo "==========================================================================================" echo"" @@ -877,7 +1078,7 @@ function macos() { fi else rm $whl_cpu_develop - echo "未能正常安装PaddlePaddle,请检查您的网络 或者确认您是否安装有 wget,或者ctrl + c退出后反馈至https://github.com/PaddlePaddle/Paddle/issues" + red "未能正常安装PaddlePaddle,请检查您的网络 或者确认您是否安装有 wget,或者ctrl + c退出后反馈至https://github.com/PaddlePaddle/Paddle/issues" echo"" echo "==========================================================================================" echo"" @@ -890,33 +1091,35 @@ function macos() { function main() { echo "*********************************" - echo "欢迎使用PaddlePaddle快速安装脚本" + green "欢迎使用PaddlePaddle快速安装脚本" echo "*********************************" echo - echo "如果您在安装过程中遇到任何问题,请在https://github.com/PaddlePaddle/Paddle/issues反馈,我们的工作人员将会帮您答疑解惑" + yellow "如果您在安装过程中遇到任何问题,请在https://github.com/PaddlePaddle/Paddle/issues反馈,我们的工作人员将会帮您答疑解惑" echo - echo "本安装包将帮助您在Linux或Mac系统下安装PaddlePaddle,包括 1)安装前的准备和 2)开始安装 两部分" + echo "本安装包将帮助您在Linux或Mac系统下安装PaddlePaddle,包括" + yellow "1)安装前的准备" + yellow "2)开始安装" echo read -n1 -p "请按回车键进行下一步..." echo echo - echo "*********************1. 安装前的准备*****************************" + green "*********************1. 安装前的准备*****************************" echo echo "Step 1. 正在检测您的操作系统信息..." echo SYSTEM=`uname -s` - if [ "$SYSTEM" == "Darwin" ];then - echo "您的系统为:MAC OSX" + if [[ "$SYSTEM" == "Darwin" ]];then + yellow " 您的系统为:MAC OSX" echo macos else - echo "您的系统为:Linux" + yellow " 您的系统为:Linux" echo OS=`cat /etc/issue|awk 'NR==1 {print $1}'` - if [ $OS == "\S" ] || [ "$OS" == "CentOS" ] || [ $OS == "Ubuntu" ];then + if [[ $OS == "\S" ]] || [[ "$OS" == "CentOS" ]] || [[ $OS == "Ubuntu" ]];then linux else - echo "您的系统不在本安装包的支持范围,如您需要在windows环境下安装PaddlePaddle,请您参考PaddlePaddle官网的windows安装文档" + red "您的系统不在本安装包的支持范围,如您需要在windows环境下安装PaddlePaddle,请您参考PaddlePaddle官网的windows安装文档" fi fi } From ba38be72423eb18946cd25553680472cd4b557ac Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Wed, 20 Feb 2019 11:14:24 +0000 Subject: [PATCH 092/117] test=develop, fix protobuf runtime update and keep lib in 3.1.0 --- cmake/external/protobuf.cmake | 4 ++-- cmake/external/python.cmake | 4 ++-- python/requirements.txt | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index c2511d43e3..bc7fe5454f 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -202,7 +202,7 @@ FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST) ENDIF() SET(PROTOBUF_REPO "https://github.com/google/protobuf.git") - SET(PROTOBUF_TAG "v3.6.1") + SET(PROTOBUF_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546") ExternalProject_Add( ${TARGET_NAME} @@ -230,7 +230,7 @@ FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST) ) ENDFUNCTION() -SET(PROTOBUF_VERSION 3.6.1) +SET(PROTOBUF_VERSION 3.1.0) IF(NOT PROTOBUF_FOUND) build_protobuf(extern_protobuf FALSE) diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index 351e7fa3ce..623c53f4f7 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -74,8 +74,8 @@ IF(PYTHONINTERP_FOUND) find_python_module(wheel REQUIRED) find_python_module(google.protobuf REQUIRED) FIND_PACKAGE(NumPy REQUIRED) - IF(${PY_GOOGLE.PROTOBUF_VERSION} AND ${PY_GOOGLE.PROTOBUF_VERSION} VERSION_LESS "3.6.1") - MESSAGE(FATAL_ERROR "Found Python Protobuf ${PY_GOOGLE.PROTOBUF_VERSION} < 3.6.1, " + IF(${PY_GOOGLE.PROTOBUF_VERSION} AND ${PY_GOOGLE.PROTOBUF_VERSION} VERSION_LESS "3.0.0") + MESSAGE(FATAL_ERROR "Found Python Protobuf ${PY_GOOGLE.PROTOBUF_VERSION} < 3.0.0, " "please use pip to upgrade protobuf. pip install -U protobuf") ENDIF() ENDIF(PYTHONINTERP_FOUND) diff --git a/python/requirements.txt b/python/requirements.txt index 6cbda1db54..36bd5d4261 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,6 +1,6 @@ requests==2.9.2 numpy>=1.12 -protobuf>=3.6 +protobuf>=3.1.0 recordio>=0.1.0 matplotlib==2.2.3 # TODO: let python3 paddlepaddle package use latest matplotlib rarfile From e38dd91f0468124bb7333eb3ef97f0329c66200a Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Wed, 20 Feb 2019 19:32:59 +0800 Subject: [PATCH 093/117] Refine cmake's download function. (#15512) * Refine cmake's download function. test=develop * Set DOWNLOAD_NO_EXTRACT to 1 pure download function. test=develop * Fix unpack problem in ExternalProject_Add, and it seem DOWNLOAD_NO_EXTRACT option is not support in cmake-3.5. test=develop --- paddle/fluid/inference/tests/test.cmake | 45 +++++++++++++++++++------ 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/inference/tests/test.cmake b/paddle/fluid/inference/tests/test.cmake index 29f0f034a2..6c5fe043ff 100644 --- a/paddle/fluid/inference/tests/test.cmake +++ b/paddle/fluid/inference/tests/test.cmake @@ -1,18 +1,43 @@ +include(ExternalProject) set(INFERENCE_URL "http://paddle-inference-dist.cdn.bcebos.com" CACHE STRING "inference download url") set(INFERENCE_DEMO_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo" CACHE STRING "A path setting inference demo download directories.") -function (inference_download install_dir url filename) - message(STATUS "Download inference test stuff from ${url}/${filename}") - file(DOWNLOAD "${url}/${filename}" "${install_dir}/${filename}") - message(STATUS "finish downloading ${filename}") + +function(inference_download INSTALL_DIR URL FILENAME) + message(STATUS "Download inference test stuff from ${URL}/${FILENAME}") + string(REGEX REPLACE "[-%.]" "_" FILENAME_EX ${FILENAME}) + ExternalProject_Add( + extern_inference_download_${FILENAME_EX} + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${INSTALL_DIR} + URL ${URL}/${FILENAME} + DOWNLOAD_COMMAND wget -q -O ${INSTALL_DIR}/${FILENAME} ${URL}/${FILENAME} + DOWNLOAD_DIR ${INSTALL_DIR} + DOWNLOAD_NO_PROGRESS 1 + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + UPDATE_COMMAND "" + INSTALL_COMMAND "" + ) endfunction() -function (inference_download_and_uncompress install_dir url filename) - inference_download(${install_dir} ${url} ${filename}) - execute_process( - COMMAND ${CMAKE_COMMAND} -E tar xzf ${install_dir}/${filename} - WORKING_DIRECTORY ${install_dir} - ) +function(inference_download_and_uncompress INSTALL_DIR URL FILENAME) + message(STATUS "Download inference test stuff from ${URL}/${FILENAME}") + string(REGEX REPLACE "[-%.]" "_" FILENAME_EX ${FILENAME}) + set(EXTERNAL_PROJECT_NAME "extern_inference_download_${FILENAME_EX}") + set(UNPACK_DIR "${INSTALL_DIR}/src/${EXTERNAL_PROJECT_NAME}") + ExternalProject_Add( + ${EXTERNAL_PROJECT_NAME} + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${INSTALL_DIR} + URL ${URL}/${FILENAME} + DOWNLOAD_DIR ${INSTALL_DIR} + DOWNLOAD_NO_PROGRESS 1 + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + UPDATE_COMMAND "" + INSTALL_COMMAND ${CMAKE_COMMAND} -E copy_directory ${UNPACK_DIR} ${INSTALL_DIR} + ) endfunction() set(WORD2VEC_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/word2vec") From 13ec2d331b3d423b541c1aa89c464429a61e2a22 Mon Sep 17 00:00:00 2001 From: mozga-intel Date: Wed, 20 Feb 2019 13:02:52 +0100 Subject: [PATCH 094/117] Enable momentum operator for a ngraph engine (#15673) * Enable momentum operator for a ngraph engine test=develop * Update tests test=develop * Unnecessary line of the code as intended was removed test=develop --- .../fluid/operators/ngraph/ngraph_bridge.cc | 1 + paddle/fluid/operators/ngraph/ngraph_ops.h | 1 + .../fluid/operators/ngraph/ops/momentum_op.h | 101 +++++++ paddle/fluid/platform/ngraph_helper.h | 7 + .../ngraph/test_cross_entropy_ngraph_op.py | 258 +----------------- .../ngraph/test_momentum_ngraph_op.py | 21 ++ 6 files changed, 133 insertions(+), 256 deletions(-) create mode 100644 paddle/fluid/operators/ngraph/ops/momentum_op.h create mode 100644 python/paddle/fluid/tests/unittests/ngraph/test_momentum_ngraph_op.py diff --git a/paddle/fluid/operators/ngraph/ngraph_bridge.cc b/paddle/fluid/operators/ngraph/ngraph_bridge.cc index 36a2efc0ce..4bfcba6c3c 100644 --- a/paddle/fluid/operators/ngraph/ngraph_bridge.cc +++ b/paddle/fluid/operators/ngraph/ngraph_bridge.cc @@ -43,6 +43,7 @@ std::map +#include +#include "ngraph/ngraph.hpp" +#include "paddle/fluid/platform/ngraph_helper.h" + +namespace paddle { +namespace operators { +namespace ngraphs { + +void BuildMomentumNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto op_attrs = paddle::framework::AttrReader(op->Attrs()); + auto param = paddle::platform::GetInputNode(op, "Param", ngb_node_map); + auto grad = paddle::platform::GetInputNode(op, "Grad", ngb_node_map); + auto velocity = paddle::platform::GetInputNode(op, "Velocity", ngb_node_map); + auto learning_rate = + paddle::platform::GetInputNode(op, "LearningRate", ngb_node_map); + + auto mu = op_attrs.Get("mu"); + bool use_nesterov = op_attrs.Get("use_nesterov"); + + auto param_shape = param->get_shape(); + auto velocity_shape = velocity->get_shape(); + auto grad_shape = grad->get_shape(); + auto lr_shape = learning_rate->get_shape(); + + auto shape_velocity = ngraph::Shape{velocity_shape}; + auto mu_create = + ngraph::op::Constant::create(ngraph::element::f32, shape_velocity, {mu}); + + auto vel_mul = std::make_shared(velocity, mu_create); + auto vel_out = std::make_shared(vel_mul, grad); + + ngraph::NodeVector result; + if (use_nesterov) { + auto mul_res = std::make_shared(vel_out, mu_create); + auto add_res = std::make_shared(grad, mul_res); + + auto add_2d = paddle::platform::FlattenTo2d(add_res->get_shape(), 0); + auto vel_reshape = paddle::platform::NgReshaper(vel_out, add_2d); + + auto lr_bcast = std::make_shared( + learning_rate, vel_reshape->get_shape(), + ngraph::AxisSet{vel_reshape->get_shape().size() - 1}); + + auto lr_1d = paddle::platform::FlattenTo1d(lr_bcast->get_shape(), 0); + auto lr_reshape = std::make_shared( + lr_bcast, ngraph::AxisVector{0, 1}, lr_1d); + + lr_reshape = std::make_shared( + lr_reshape, ngraph::AxisVector{0}, param->get_shape()); + + auto mul_res1 = std::make_shared(add_res, lr_reshape); + auto res = std::make_shared(param, mul_res1); + paddle::platform::SetOutputNode(op, "ParamOut", res, ngb_node_map); + } else { + auto vel_2d = paddle::platform::FlattenTo2d(vel_out->get_shape(), 0); + auto vel_reshape = paddle::platform::NgReshaper(vel_out, vel_2d); + + auto lr_bcast = std::make_shared( + learning_rate, vel_reshape->get_shape(), + ngraph::AxisSet{vel_reshape->get_shape().size() - 1}); + + auto lr_1d = paddle::platform::FlattenTo1d(lr_bcast->get_shape(), 0); + auto lr_reshape = std::make_shared( + lr_bcast, ngraph::AxisVector{0, 1}, lr_1d); + + lr_reshape = std::make_shared( + lr_reshape, ngraph::AxisVector{0}, param->get_shape()); + + auto mul_result = + std::make_shared(lr_reshape, vel_out); + + auto res = std::make_shared(param, mul_result); + paddle::platform::SetOutputNode(op, "ParamOut", res, ngb_node_map); + } + paddle::platform::SetOutputNode(op, "VelocityOut", vel_out, ngb_node_map); +} + +} // namespace ngraphs +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/platform/ngraph_helper.h b/paddle/fluid/platform/ngraph_helper.h index 5ee985ea71..e74f57a79a 100644 --- a/paddle/fluid/platform/ngraph_helper.h +++ b/paddle/fluid/platform/ngraph_helper.h @@ -43,6 +43,13 @@ std::shared_ptr Nchw2Nhwc(std::shared_ptr in) { return std::make_shared(in, axis_vec, in_shape); } +ngraph::Shape FlattenTo1d(ngraph::Shape sh, int num) { + auto x1 = std::accumulate(std::begin(sh), std::end(sh) + num, 1, + std::multiplies()); + size_t x1_l = (size_t)x1; + return ngraph::Shape{x1_l}; +} + ngraph::Shape FlattenTo2d(ngraph::Shape sh, int num) { auto x1 = std::accumulate(std::begin(sh), std::begin(sh) + num, 1, std::multiplies()); diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py index 9a185eb97c..3057218a1d 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,261 +15,7 @@ from __future__ import print_function import unittest -import numpy as np -import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest, randomize_probability - - -class TestCrossEntropyOp(OpTest): - """Test cross-entropy with discrete one-hot labels. - """ - - def setUp(self): - self.op_type = "cross_entropy" - self.soft_label = False - self.ignore_index = -100 - self.dtype = np.float64 - self.batch_size = 30 - self.class_num = 10 - self._cpu_only = True - - self.init_dtype_type() - self.init_attr_type() - self.init_bs_class_num() - self.init_x() - self.init_label() - self.get_cross_entropy() - - self.inputs = {"X": self.x, "Label": self.label} - self.outputs = {"Y": self.cross_entropy} - self.attrs = { - "soft_label": self.soft_label, - "ignore_index": self.ignore_index - } - - def init_x(self): - self.x = randomize_probability( - self.batch_size, self.class_num, dtype=self.dtype) - - def init_label(self): - self.label = np.random.randint( - 0, self.class_num, (self.batch_size, 1), dtype="int64") - - def get_cross_entropy(self): - self.cross_entropy = np.asmatrix( - [[-np.log(self.x[i][self.label[i][0]])] - for i in range(self.x.shape[0])], - dtype="float64") - - def init_attr_type(self): - pass - - def init_dtype_type(self): - pass - - def init_bs_class_num(self): - pass - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(["X"], "Y", numeric_grad_delta=0.001) - - -class TestCrossEntropyOp2(TestCrossEntropyOp): - """Test cross-entropy with vectorized soft labels. - """ - - def init_label(self): - self.label = np.random.uniform( - 0.1, 1.0, [self.batch_size, self.class_num]).astype(self.dtype) - self.label /= self.label.sum(axis=1, keepdims=True) - - def get_cross_entropy(self): - self.cross_entropy = (-self.label * np.log(self.x)).sum( - axis=1, keepdims=True).astype(self.dtype) - - def init_attr_type(self): - self.soft_label = True - - def init_dtype_type(self): - self.dtype = np.float32 - - def init_bs_class_num(self): - self.batch_size = 5 - self.class_num = 37 - - def test_check_grad(self): - self.check_grad( - ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) - - -class TestCrossEntropyOp3(TestCrossEntropyOp): - """Test cross-entropy with vectorized one-hot representation of labels. - """ - - def init_label(self): - self.label_index = np.random.randint(0, self.class_num, - (self.batch_size)) - self.label = np.zeros(self.x.shape).astype(self.dtype) - self.label[np.arange(self.batch_size), self.label_index] = 1 - - def get_cross_entropy(self): - self.cross_entropy = np.asmatrix( - [[-np.log(self.x[i][self.label_index[i]])] - for i in range(self.x.shape[0])]).astype(self.dtype) - - def init_attr_type(self): - self.soft_label = True - - def init_dtype_type(self): - self.dtype = np.float32 - - def init_bs_class_num(self): - self.batch_size = 5 - self.class_num = 17 - - def test_check_grad(self): - self.check_grad( - ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) - - -class TestCrossEntropyOp4(TestCrossEntropyOp): - """Test high rank tensor cross-entropy with discrete one-hot labels. - """ - - def init_x(self): - self.shape = [10, 2, 4] - self.ins_num = np.prod(np.array(self.shape)) - self.X_2d = randomize_probability(self.ins_num, - self.class_num).astype(self.dtype) - self.x = self.X_2d.reshape(self.shape + [self.class_num]) - - def init_label(self): - self.label_2d = np.random.randint( - 0, self.class_num, (self.ins_num, 1), dtype="int64") - self.label = self.label_2d.reshape(self.shape + [1]) - - def get_cross_entropy(self): - cross_entropy_2d = np.asmatrix( - [[-np.log(self.X_2d[i][self.label_2d[i][0]])] - for i in range(self.X_2d.shape[0])]).astype(self.dtype) - self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape + - [1]) - - def init_attr_type(self): - self.soft_label = False - - def init_dtype_type(self): - self.dtype = np.float64 - - def init_bs_class_num(self): - self.class_num = 10 - - -class TestCrossEntropyOp5(TestCrossEntropyOp): - """Test high rank tensor cross-entropy with vectorized soft labels. - """ - - def init_x(self): - self.shape = [4, 3] - self.ins_num = np.prod(np.array(self.shape)) - self.X_2d = randomize_probability(self.ins_num, - self.class_num).astype(self.dtype) - self.x = self.X_2d.reshape(self.shape + [self.class_num]) - - def init_label(self): - self.label_2d = np.random.uniform( - 0.1, 1.0, [self.ins_num, self.class_num]).astype(self.dtype) - self.label_2d /= self.label_2d.sum(axis=1, keepdims=True) - self.label = self.label_2d.reshape(self.shape + [self.class_num]) - - def get_cross_entropy(self): - cross_entropy_2d = (-self.label_2d * np.log(self.X_2d)).sum( - axis=1, keepdims=True).astype(self.dtype) - self.cross_entropy = np.array(cross_entropy_2d).reshape(self.shape + - [1]) - - def init_attr_type(self): - self.soft_label = True - - def init_dtype_type(self): - self.dtype = np.float32 - - def init_bs_class_num(self): - self.class_num = 37 - - def test_check_grad(self): - self.check_grad( - ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) - - -class TestCrossEntropyOp6(TestCrossEntropyOp): - """Test high rank tensor cross-entropy with vectorized one-hot representation of labels. - """ - - def init_x(self): - self.shape = [4, 3, 2] - self.ins_num = np.prod(np.array(self.shape)) - self.X_2d = randomize_probability(self.ins_num, - self.class_num).astype(self.dtype) - self.x = self.X_2d.reshape(self.shape + [self.class_num]) - - def init_label(self): - self.label_index_2d = np.random.randint( - 0, self.class_num, (self.ins_num), dtype="int64") - label_2d = np.zeros(self.X_2d.shape) - label_2d[np.arange(self.ins_num), self.label_index_2d] = 1 - self.label = label_2d.reshape(self.shape + [self.class_num]).astype( - self.dtype) - - def get_cross_entropy(self): - cross_entropy_2d = np.asmatrix( - [[-np.log(self.X_2d[i][self.label_index_2d[i]])] - for i in range(self.X_2d.shape[0])]) - self.cross_entropy = np.array(cross_entropy_2d).reshape( - self.shape + [1]).astype(self.dtype) - - def init_attr_type(self): - self.soft_label = True - - def init_dtype_type(self): - self.dtype = np.float32 - - def init_bs_class_num(self): - self.class_num = 17 - - def test_check_grad(self): - self.check_grad( - ["X"], "Y", max_relative_error=0.05, numeric_grad_delta=0.001) - - -class TestCrossEntropyOp7(TestCrossEntropyOp): - """Test cross-entropy with ignore index. - """ - - def init_label(self): - self.label = np.random.randint( - 0, self.class_num, (self.batch_size, 1), dtype="int64") - - def get_cross_entropy(self): - self.cross_entropy = np.asmatrix( - [[-np.log(self.x[i][self.label[i][0]])] - if self.label[i][0] != self.ignore_index else [0] - for i in range(self.x.shape[0])]).astype(self.dtype) - - def init_attr_type(self): - self.soft_label = False - self.ignore_index = 3 - - def init_dtype_type(self): - self.dtype = np.float64 - - def init_bs_class_num(self): - self.batch_size = 30 - self.class_num = 10 - +from paddle.fluid.tests.unittests.test_cross_entropy_op import TestCrossEntropyOp, TestCrossEntropyOp2, TestCrossEntropyOp3, TestCrossEntropyOp4, TestCrossEntropyOp5, TestCrossEntropyOp6, TestCrossEntropyOp7 if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_momentum_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_momentum_ngraph_op.py new file mode 100644 index 0000000000..2c3549d907 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ngraph/test_momentum_ngraph_op.py @@ -0,0 +1,21 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +from paddle.fluid.tests.unittests.test_momentum_op import TestMomentumOp1, TestMomentumOp2, TestLarsMomentumOp, TestSparseMomentumOp, TestSparseMomentumOp2 + +if __name__ == '__main__': + unittest.main() From fbb5404652e3cc4f7ba7fc0a6e92a3539243566d Mon Sep 17 00:00:00 2001 From: chengduo Date: Wed, 20 Feb 2019 08:52:47 -0600 Subject: [PATCH 095/117] fix test_parallel_executor_seresnex timeout (#15812) test=develop --- python/paddle/fluid/tests/unittests/CMakeLists.txt | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 289a48aac9..a1cf5fad13 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -113,12 +113,11 @@ py_test_modules(test_ir_memory_optimize_transformer MODULES test_ir_memory_optim endif() if(NOT APPLE) py_test_modules(test_image_classification_resnet MODULES test_image_classification_resnet SERIAL) - if(CMAKE_BUILD_TYPE STREQUAL "Debug") - # change the timeout from 600 to 1200, because in debug mode, this test need more time. - set_tests_properties(test_image_classification_resnet PROPERTIES TIMEOUT 1200) - endif() endif() - +if(CMAKE_BUILD_TYPE STREQUAL "Debug") + # change the timeout from 600 to 1200, because in debug mode, this test need more time. + set_tests_properties(test_parallel_executor_seresnext PROPERTIES TIMEOUT 1200) +endif() if (WITH_NGRAPH) add_subdirectory(ngraph) From 971f3bc9b0823c921a4c8e31cef5e6e9797462d5 Mon Sep 17 00:00:00 2001 From: tangwei12 Date: Wed, 20 Feb 2019 23:59:14 +0800 Subject: [PATCH 096/117] fix params with only 1 dim (#15828) * fix params with only 1 dim * test=develop --- python/paddle/fluid/io.py | 5 ++++- python/paddle/fluid/transpiler/distribute_transpiler.py | 6 +++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index a2abbf36c0..24e102b6c2 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -766,7 +766,10 @@ def _load_distributed_persistables(executor, dirname, main_program=None): dtype=slice_var.dtype, persistable=True) - dim1_flatten = reduce(lambda x, y: x * y, slice.shape[1:]) + dim1_flatten = 1 + if len(slice.shape) >= 2: + dim1_flatten = reduce(lambda x, y: x * y, slice.shape[1:]) + start = int(offset / dim1_flatten) end = int(offset / dim1_flatten + slice.shape[0]) diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index a3293afbbd..eb54068650 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -1020,7 +1020,11 @@ class DistributeTranspiler(object): skip_dim0 = 0 slice_vars = self.param_var_mapping[orig_var_name] - orig_dim1_flatten = reduce(lambda x, y: x * y, slice_vars[0].shape[1:]) + orig_dim1_flatten = 1 + + if len(slice_vars[0].shape) >= 2: + orig_dim1_flatten = reduce(lambda x, y: x * y, + slice_vars[0].shape[1:]) for slice_var in slice_vars[:block_idx]: skip_dim0 += slice_var.shape[0] From 46fcadec185a9c4347004a4c093dbf8a36005eb2 Mon Sep 17 00:00:00 2001 From: xuezhong Date: Wed, 20 Feb 2019 17:00:48 +0000 Subject: [PATCH 097/117] add parameter description test=develop --- python/paddle/fluid/optimizer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 4fb570d957..cb799b6396 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -649,6 +649,7 @@ class AdagradOptimizer(Optimizer): regularization: A Regularizer, such as fluid.regularizer.L2DecayRegularizer. name: A optional name prefix. + initial_accumulator_value (float): Initial value for moment accumulator. Examples: .. code-block:: python From a83e4704056c48c7afa457ec5c7b2f6926a8c102 Mon Sep 17 00:00:00 2001 From: Dun Date: Thu, 21 Feb 2019 12:52:47 +0800 Subject: [PATCH 098/117] Profiler refine and add CUDA runtime api tracer (#15301) * refine profiler && add runtime tracer * test=develop * test=develop * test=develop * test=develop * test=develop * test=develop * test=develop * test=develop * fix bug && test=develop * add thread id map && test=develop * test=develop * testing * bug fix * remove cuda event && refine code && test=develop * test=develop * test=develop * test=develop * fix windows temp file && test=develop * test=develop * fix windows bug && test=develop * fix start up issue && test=develop * code polish && test=develop * remove unused code && test=develop * add some cupti cbid && test=develop * add FLAGS_multiple_of_cupti_buffer_size && test=develop * fix compile error && test=develop * add keyword && test=develop * fix && test=develop * code polish && test=develop --- .../framework/details/all_reduce_op_handle.cc | 2 +- .../framework/details/broadcast_op_handle.cc | 2 +- .../details/fused_broadcast_op_handle.cc | 2 +- .../framework/details/reduce_op_handle.cc | 2 +- .../scope_buffered_ssa_graph_executor.cc | 2 +- .../details/threaded_ssa_graph_executor.cc | 2 +- paddle/fluid/framework/operator.cc | 4 +- paddle/fluid/inference/tests/test_helper.h | 8 +- .../operators/distributed/brpc/brpc_client.cc | 10 +- .../operators/distributed/grpc/grpc_client.cc | 16 +- .../operators/distributed/grpc/grpc_serde.cc | 4 +- paddle/fluid/operators/reader/read_op.cc | 4 +- paddle/fluid/platform/CMakeLists.txt | 6 +- paddle/fluid/platform/device_tracer.cc | 365 ++++++++++++++---- paddle/fluid/platform/device_tracer.h | 20 +- paddle/fluid/platform/init.cc | 29 ++ paddle/fluid/platform/profiler.cc | 125 +++--- paddle/fluid/platform/profiler.cu | 50 +++ paddle/fluid/platform/profiler.h | 36 +- paddle/fluid/platform/profiler.proto | 1 + paddle/fluid/platform/profiler_test.cc | 55 +-- python/paddle/fluid/__init__.py | 3 +- .../fluid/tests/unittests/test_profiler.py | 36 +- tools/timeline.py | 16 +- 24 files changed, 556 insertions(+), 244 deletions(-) create mode 100644 paddle/fluid/platform/profiler.cu diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.cc b/paddle/fluid/framework/details/all_reduce_op_handle.cc index dd77f7099f..c1f9c2b60c 100644 --- a/paddle/fluid/framework/details/all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/all_reduce_op_handle.cc @@ -53,7 +53,7 @@ AllReduceOpHandle::AllReduceOpHandle(ir::Node *node, #endif void AllReduceOpHandle::RunImpl() { - platform::RecordEvent record_event(Name(), dev_ctxes_.cbegin()->second); + platform::RecordEvent record_event(Name()); WaitInputVarGenerated(); auto in_var_handles = DynamicCast(this->Inputs()); diff --git a/paddle/fluid/framework/details/broadcast_op_handle.cc b/paddle/fluid/framework/details/broadcast_op_handle.cc index c42a691be2..fdff83b928 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle.cc @@ -22,7 +22,7 @@ namespace framework { namespace details { void BroadcastOpHandle::RunImpl() { - platform::RecordEvent record_event(Name(), dev_ctxes_.begin()->second); + platform::RecordEvent record_event(Name()); if (places_.size() == 1) return; diff --git a/paddle/fluid/framework/details/fused_broadcast_op_handle.cc b/paddle/fluid/framework/details/fused_broadcast_op_handle.cc index 51dfa2d071..f48561ea32 100644 --- a/paddle/fluid/framework/details/fused_broadcast_op_handle.cc +++ b/paddle/fluid/framework/details/fused_broadcast_op_handle.cc @@ -22,7 +22,7 @@ namespace framework { namespace details { void FusedBroadcastOpHandle::RunImpl() { - platform::RecordEvent record_event(Name(), dev_ctxes_.begin()->second); + platform::RecordEvent record_event(Name()); if (places_.size() == 1UL) return; diff --git a/paddle/fluid/framework/details/reduce_op_handle.cc b/paddle/fluid/framework/details/reduce_op_handle.cc index ae76fad450..4e2477c205 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.cc +++ b/paddle/fluid/framework/details/reduce_op_handle.cc @@ -139,7 +139,7 @@ void ReduceOpHandle::GatherSelectedRows( #endif void ReduceOpHandle::RunImpl() { - platform::RecordEvent record_event(Name(), dev_ctxes_.cbegin()->second); + platform::RecordEvent record_event(Name()); if (places_.size() == 1) return; // the input and output may have dummy var. diff --git a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc index 91e4f9adb4..7b13112986 100644 --- a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc @@ -63,7 +63,7 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run( eptr = std::current_exception(); } - platform::RecordEvent e("ScopeBufferedSSAGraphExecutorAfterRun", nullptr); + platform::RecordEvent e("ScopeBufferedSSAGraphExecutorAfterRun"); ++drop_scope_counter_; bool stream_end = false; diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index 677a293794..50bab832c2 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -37,7 +37,7 @@ ThreadedSSAGraphExecutor::ThreadedSSAGraphExecutor( FeedFetchList ThreadedSSAGraphExecutor::Run( const std::vector &fetch_tensors) { std::unique_ptr event( - new platform::RecordEvent("ThreadedSSAGraphExecutorPrepare", nullptr)); + new platform::RecordEvent("ThreadedSSAGraphExecutorPrepare")); std::unordered_map pending_ops; std::unordered_set pending_vars; auto ready_vars = std::make_shared>(); diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index e15c838f4f..9a0348871b 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -177,9 +177,7 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) { // in concurrency scenerio. Here use an `if` to fix this issue. // Please not remove the `if`, ask @Superjomn if there are any concern. if (platform::IsProfileEnabled()) { - platform::DeviceContextPool& pool = - platform::DeviceContextPool::Instance(); - platform::RecordEvent record_event(Type(), pool.Get(place)); + platform::RecordEvent record_event(Type()); RunImpl(scope, place); } else { RunImpl(scope, place); diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index 75fa611c0d..861f69f4d2 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -171,9 +171,7 @@ void TestInference(const std::string& dirname, // Enable the profiler paddle::platform::EnableProfiler(state); { - paddle::platform::RecordEvent record_event( - "init_program", - paddle::platform::DeviceContextPool::Instance().Get(place)); + paddle::platform::RecordEvent record_event("init_program"); inference_program = InitProgram(&executor, scope, dirname, is_combined); } @@ -230,9 +228,7 @@ void TestInference(const std::string& dirname, // Run repeat times to profile the performance for (int i = 0; i < repeat; ++i) { - paddle::platform::RecordEvent record_event( - "run_inference", - paddle::platform::DeviceContextPool::Instance().Get(place)); + paddle::platform::RecordEvent record_event("run_inference"); if (PrepareContext) { // Note: if you change the inference_program, you need to call diff --git a/paddle/fluid/operators/distributed/brpc/brpc_client.cc b/paddle/fluid/operators/distributed/brpc/brpc_client.cc index b8e63f42e2..a1a3443348 100644 --- a/paddle/fluid/operators/distributed/brpc/brpc_client.cc +++ b/paddle/fluid/operators/distributed/brpc/brpc_client.cc @@ -80,7 +80,7 @@ VarHandlePtr BRPCClient::AsyncSendVar(const std::string& ep, google::protobuf::Closure* done = brpc::NewCallback( &HandleSendResponse, cntl, response, var_h, ch_ptr, ch_ctx, this); - platform::RecordRPCEvent record_event(method, p_ctx); + platform::RecordRPCEvent record_event(method); ch_ctx->stub->SendVariable(cntl, &request, response, done); @@ -184,7 +184,7 @@ VarHandlePtr BRPCClient::_AsyncGetVar(const std::string& ep, google::protobuf::Closure* done = brpc::NewCallback( &HandleGetResponse, cntl, response, var_h, ch_ptr, ch_ctx, this); - platform::RecordRPCEvent record_event(method, p_ctx); + platform::RecordRPCEvent record_event(method); if (method_name == kGetMonomerRPC) { ch_ctx->stub->GetMonomerVariable(cntl, &req, response, done); @@ -272,7 +272,7 @@ VarHandlePtr BRPCClient::AsyncPrefetchVar(const std::string& ep, &cntl->request_attachment(), out_var_name_val, false, 0, table_name_val); - platform::RecordRPCEvent record_event(method, p_ctx); + platform::RecordRPCEvent record_event(method); google::protobuf::Closure* done = brpc::NewCallback( &HandleGetResponse, cntl, response, var_h, ch_ptr, ch_ctx, this); @@ -311,7 +311,7 @@ VarHandlePtr BRPCClient::AsyncSendFetchBarrier(const std::string& ep, VarHandlePtr var_h( new VarHandle(ep, method, FETCH_BARRIER_MESSAGE, nullptr, nullptr)); - platform::RecordRPCEvent record_event(method, nullptr); + platform::RecordRPCEvent record_event(method); google::protobuf::Closure* done = brpc::NewCallback( &HandleFetchBarrierResponse, cntl, response, var_h, ch_ptr, ch_ctx, this); @@ -406,7 +406,7 @@ VarHandlePtr BRPCClient::AsyncSendVarMessage( sendrecv::VoidMessage* response = new sendrecv::VoidMessage(); cntl->set_timeout_ms(time_out); - platform::RecordRPCEvent record_event(method_name, nullptr); + platform::RecordRPCEvent record_event(method_name); VarHandlePtr var_h( new VarHandle(ep, method_name, req.varname(), nullptr, nullptr)); diff --git a/paddle/fluid/operators/distributed/grpc/grpc_client.cc b/paddle/fluid/operators/distributed/grpc/grpc_client.cc index 52310f8d04..61e94dae3c 100644 --- a/paddle/fluid/operators/distributed/grpc/grpc_client.cc +++ b/paddle/fluid/operators/distributed/grpc/grpc_client.cc @@ -89,7 +89,7 @@ VarHandlePtr GRPCClient::AsyncSendVar(const std::string& ep, // stub context s->response_call_back_ = nullptr; - platform::RecordRPCEvent record_event(method, p_ctx); + platform::RecordRPCEvent record_event(method); auto call = s->stub_g_.PrepareUnaryCall( s->context_.get(), "/sendrecv.SendRecvService/SendVariable", req, &cq_); @@ -184,7 +184,7 @@ VarHandlePtr GRPCClient::_AsyncGetVar( // stub context s->response_call_back_ = ProcGetResponse; - platform::RecordRPCEvent record_event(method, p_ctx); + platform::RecordRPCEvent record_event(method); auto call = s->stub_g_.PrepareUnaryCall(s->context_.get(), rpc_path, buf, &cq_); @@ -235,7 +235,7 @@ VarHandlePtr GRPCClient::AsyncPrefetchVar(const std::string& ep, // stub context s->response_call_back_ = ProcGetResponse; - platform::RecordRPCEvent record_event(method, p_ctx); + platform::RecordRPCEvent record_event(method); auto call = s->stub_g_.PrepareUnaryCall( s->context_.get(), "/sendrecv.SendRecvService/PrefetchVariable", req, @@ -265,7 +265,7 @@ VarHandlePtr GRPCClient::AsyncSendBatchBarrier(const std::string& ep, sendrecv::VariableMessage req; req.set_varname(BATCH_BARRIER_MESSAGE); - platform::RecordRPCEvent record_event(method, nullptr); + platform::RecordRPCEvent record_event(method); auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_); rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); @@ -290,7 +290,7 @@ VarHandlePtr GRPCClient::AsyncSendFetchBarrier(const std::string& ep, sendrecv::VariableMessage req; req.set_varname(FETCH_BARRIER_MESSAGE); - platform::RecordRPCEvent record_event(method, nullptr); + platform::RecordRPCEvent record_event(method); auto rpc = s->stub_->AsyncGetVariable(s->context_.get(), req, &cq_); rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); @@ -317,7 +317,7 @@ VarHandlePtr GRPCClient::AsyncGetMonomerBarrier(const std::string& ep, sendrecv::VariableMessage req; req.set_varname(var_name); - platform::RecordRPCEvent record_event(method, nullptr); + platform::RecordRPCEvent record_event(method); auto rpc = s->stub_->AsyncGetMonomerBarrier(s->context_.get(), req, &cq_); rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); @@ -342,7 +342,7 @@ VarHandlePtr GRPCClient::AsyncSendComplete(const std::string& ep, sendrecv::VariableMessage req; req.set_varname(COMPLETE_MESSAGE); - platform::RecordRPCEvent record_event(method, nullptr); + platform::RecordRPCEvent record_event(method); auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_); rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); @@ -372,7 +372,7 @@ VarHandlePtr GRPCClient::AsyncCheckpointNotify(const std::string& ep, req.set_varname(CHECKPOINT_SAVE_MESSAGE); req.set_out_varname(dir); - platform::RecordRPCEvent record_event(method, nullptr); + platform::RecordRPCEvent record_event(method); auto rpc = s->stub_->AsyncCheckpointNotify(s->context_.get(), req, &cq_); rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); diff --git a/paddle/fluid/operators/distributed/grpc/grpc_serde.cc b/paddle/fluid/operators/distributed/grpc/grpc_serde.cc index 6df4fd36f9..6e65aa5fae 100644 --- a/paddle/fluid/operators/distributed/grpc/grpc_serde.cc +++ b/paddle/fluid/operators/distributed/grpc/grpc_serde.cc @@ -38,7 +38,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, ::grpc::ByteBuffer* msg, const std::string& out_name, const int trainer_id, const std::string& table_name) { - platform::RecordRPCEvent record_event("serial", &ctx); + platform::RecordRPCEvent record_event("serial"); VarMsg request; TensorPayload* payload = nullptr; @@ -147,7 +147,7 @@ void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, const platform::DeviceContext& ctx, const framework::Scope* scope, framework::Variable** var, int* trainer_id) { - platform::RecordRPCEvent record_event("deserial", &ctx); + platform::RecordRPCEvent record_event("deserial"); operators::distributed::GRPCVariableResponse resp(scope, &ctx); PADDLE_ENFORCE(resp.Parse(msg) == 0, "parse bytebuffer to tensor error!"); *var = resp.GetVar(); diff --git a/paddle/fluid/operators/reader/read_op.cc b/paddle/fluid/operators/reader/read_op.cc index 8fe638ac2f..846b2ed77e 100644 --- a/paddle/fluid/operators/reader/read_op.cc +++ b/paddle/fluid/operators/reader/read_op.cc @@ -85,9 +85,7 @@ class ReadOp : public framework::OperatorBase { std::vector ins; // For profiling - platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - auto& ctx = *pool.Get(dev_place); - platform::RecordEvent record_event(Type(), &ctx); + platform::RecordEvent record_event(Type()); reader->ReadNext(&ins); if (ins.empty()) { diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 424b8f0542..5833fee35b 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -88,7 +88,11 @@ cc_library(timer SRCS timer.cc) cc_test(timer_test SRCS timer_test.cc DEPS timer) cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto framework_proto ${GPU_CTX_DEPS}) -cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer) +if(WITH_GPU) + nv_library(profiler SRCS profiler.cc profiler.cu DEPS device_context device_tracer) +else() + cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer) +endif() cc_test(profiler_test SRCS profiler_test.cc DEPS profiler) nv_test(float16_gpu_test SRCS float16_test.cu DEPS lod_tensor) diff --git a/paddle/fluid/platform/device_tracer.cc b/paddle/fluid/platform/device_tracer.cc index 0a4563ead6..f42212d095 100644 --- a/paddle/fluid/platform/device_tracer.cc +++ b/paddle/fluid/platform/device_tracer.cc @@ -14,17 +14,23 @@ limitations under the License. */ #include "paddle/fluid/platform/device_tracer.h" #include +#include #include +#include #include #include // NOLINT #include +#include #include #include // NOLINT +#include +#include #include #include "glog/logging.h" #include "google/protobuf/text_format.h" #include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/string/printf.h" namespace paddle { @@ -33,17 +39,31 @@ namespace { // Tracking the nested block stacks of each thread. thread_local std::deque block_id_stack; // Tracking the nested event stacks. -thread_local std::deque annotation_stack; +thread_local std::deque annotation_stack; + +std::map system_thread_id_map; std::once_flag tracer_once_flag; DeviceTracer *tracer = nullptr; + +void PrintCuptiHint() { + static bool showed = false; + if (showed) return; + showed = true; + LOG(WARNING) << "Invalid timestamp occured. Please try increasing the " + "FLAGS_multiple_of_cupti_buffer_size."; +} + } // namespace #ifdef PADDLE_WITH_CUPTI namespace { -// TODO(panyx0718): Revisit the buffer size here. -uint64_t kBufSize = 32 * 1024; +// The experimental best performance is +// the same size with CUPTI device buffer size(8M) +uint64_t kBufSize = 1024 * 1024 * 8; uint64_t kAlignSize = 8; +std::unordered_map runtime_cbid_str, + driver_cbid_str; #define ALIGN_BUFFER(buffer, align) \ (((uintptr_t)(buffer) & ((align)-1)) \ @@ -92,15 +112,33 @@ std::string MemcpyKind(CUpti_ActivityMemcpyKind kind) { return "MEMCPY"; } +std::string DriverKind(CUpti_CallbackId cbid) { + auto iter = driver_cbid_str.find(cbid); + if (iter == driver_cbid_str.end()) + return "Driver API " + std::to_string(cbid); + return iter->second; +} + +std::string RuntimeKind(CUpti_CallbackId cbid) { + auto iter = runtime_cbid_str.find(cbid); + if (iter == runtime_cbid_str.end()) + return "Runtime API " + std::to_string(cbid); + return iter->second; +} + void EnableActivity() { // Device activity record is created when CUDA initializes, so we // want to enable it before cuInit() or any CUDA runtime call. CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY)); - CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL)); - CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_DEVICE)); - CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_MEMSET)); - CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_OVERHEAD)); + CUPTI_CALL( + dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL)); + // CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL)); + CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_DRIVER)); + CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_RUNTIME)); // We don't track these activities for now. + // CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_MEMSET)); + // CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_OVERHEAD)); + // CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_DEVICE)); // CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_CONTEXT)); // CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_DRIVER)); // CUPTI_CALL(dynload::cuptiActivityEnable(CUPTI_ACTIVITY_KIND_RUNTIME)); @@ -110,16 +148,17 @@ void EnableActivity() { void DisableActivity() { CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_MEMCPY)); - CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_KERNEL)); - CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_DEVICE)); + CUPTI_CALL( + dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL)); + // CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_DEVICE)); // Disable all other activity record kinds. - CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_CONTEXT)); + // CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_CONTEXT)); CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_DRIVER)); CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_RUNTIME)); - CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_MEMSET)); - CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_NAME)); - CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_MARKER)); - CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_OVERHEAD)); + // CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_MEMSET)); + // CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_NAME)); + // CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_MARKER)); + // CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_OVERHEAD)); } void CUPTIAPI bufferRequested(uint8_t **buffer, size_t *size, @@ -132,6 +171,11 @@ void CUPTIAPI bufferRequested(uint8_t **buffer, size_t *size, void CUPTIAPI bufferCompleted(CUcontext ctx, uint32_t streamId, uint8_t *buffer, size_t size, size_t validSize) { + static std::thread::id cupti_thread_id(0); + if (cupti_thread_id == std::thread::id(0)) + cupti_thread_id = std::this_thread::get_id(); + PADDLE_ENFORCE_EQ(std::this_thread::get_id(), cupti_thread_id, + "Only one thread is allowed to call bufferCompleted()"); CUptiResult status; CUpti_Activity *record = NULL; if (validSize > 0) { @@ -168,6 +212,23 @@ void CUPTIAPI bufferCompleted(CUcontext ctx, uint32_t streamId, uint8_t *buffer, memcpy->correlationId, memcpy->bytes); break; } + case CUPTI_ACTIVITY_KIND_DRIVER: { + auto *api = reinterpret_cast(record); + if (api->start != 0 && api->end != 0) + // -1 device id represents CUDA api call + tracer->AddCPURecords( + DriverKind(api->cbid), api->start, api->end, -1, + GetThreadIdFromSystemThreadId(api->threadId)); + break; + } + case CUPTI_ACTIVITY_KIND_RUNTIME: { + auto *api = reinterpret_cast(record); + if (api->start != 0 && api->end != 0) + tracer->AddCPURecords( + RuntimeKind(api->cbid), api->start, api->end, -1, + GetThreadIdFromSystemThreadId(api->threadId)); + break; + } default: { break; } } } else if (status == CUPTI_ERROR_MAX_LIMIT_REACHED) { @@ -183,21 +244,35 @@ void CUPTIAPI bufferCompleted(CUcontext ctx, uint32_t streamId, uint8_t *buffer, dynload::cuptiActivityGetNumDroppedRecords(ctx, streamId, &dropped)); if (dropped != 0) { fprintf(stderr, "Dropped %u activity records\n", (unsigned int)dropped); + PrintCuptiHint(); } } free(buffer); } + +void initCuptiCbidStr(); + } // namespace #endif // PADDLE_WITH_CUPTI class DeviceTracerImpl : public DeviceTracer { public: - DeviceTracerImpl() : enabled_(false) {} + DeviceTracerImpl() : enabled_(false) { +#ifdef PADDLE_WITH_CUPTI + initCuptiCbidStr(); +#endif + } - void AddAnnotation(uint64_t id, const std::string &anno) { - std::lock_guard l(trace_mu_); - correlations_[id] = anno; + void AddAnnotation(uint32_t id, Event *event) { + thread_local std::forward_list> + *local_correlations_pairs = nullptr; + if (local_correlations_pairs == nullptr) { + std::lock_guard l(trace_mu_); + correlations_pairs.emplace_front(); + local_correlations_pairs = &correlations_pairs.front(); + } + local_correlations_pairs->push_front(std::make_pair(id, event)); } void AddCPURecords(const std::string &anno, uint64_t start_ns, @@ -206,8 +281,13 @@ class DeviceTracerImpl : public DeviceTracer { VLOG(1) << "Empty timeline annotation."; return; } - std::lock_guard l(trace_mu_); - cpu_records_.push_back( + thread_local std::forward_list *local_cpu_records_ = nullptr; + if (local_cpu_records_ == nullptr) { + std::lock_guard l(trace_mu_); + cpu_records_.emplace_front(); + local_cpu_records_ = &cpu_records_.front(); + } + local_cpu_records_->push_front( CPURecord{anno, start_ns, end_ns, device_id, thread_id}); } @@ -215,25 +295,27 @@ class DeviceTracerImpl : public DeviceTracer { uint64_t end_ns, int64_t device_id, int64_t stream_id, uint32_t correlation_id, uint64_t bytes) { // 0 means timestamp information could not be collected for the kernel. - if (start_ns == 0 || end_ns == 0) { + if (start_ns == 0 || end_ns == 0 || start_ns == end_ns) { VLOG(3) << name << " cannot be traced"; + PrintCuptiHint(); return; } - std::lock_guard l(trace_mu_); - mem_records_.push_back(MemRecord{name, start_ns, end_ns, device_id, - stream_id, correlation_id, bytes}); + // NOTE(liangdun): lock is not needed, only one thread call this function. + mem_records_.push_front(MemRecord{name, start_ns, end_ns, device_id, + stream_id, correlation_id, bytes}); } void AddKernelRecords(std::string name, uint64_t start, uint64_t end, int64_t device_id, int64_t stream_id, uint32_t correlation_id) { // 0 means timestamp information could not be collected for the kernel. - if (start == 0 || end == 0) { + if (start == 0 || end == 0 || start == end) { VLOG(3) << correlation_id << " cannot be traced"; + PrintCuptiHint(); return; } - std::lock_guard l(trace_mu_); - kernel_records_.push_back( + // NOTE(liangdun): lock is not needed, only one thread call this function. + kernel_records_.push_front( KernelRecord{name, start, end, device_id, stream_id, correlation_id}); } @@ -263,25 +345,80 @@ class DeviceTracerImpl : public DeviceTracer { } else if (ret != CUPTI_SUCCESS) { fprintf(stderr, "Failed to create CUPTI subscriber.\n"); } - CUPTI_CALL( - dynload::cuptiEnableCallback(1, subscriber_, CUPTI_CB_DOMAIN_DRIVER_API, - CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel)); + const std::vector cbids { + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpy_v3020, + CUPTI_RUNTIME_TRACE_CBID_cudaMemcpyAsync_v3020, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunchKernel_v7000 +#if CUDA_VERSION >= 9000 + , + CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernel_v9000, + CUPTI_RUNTIME_TRACE_CBID_cudaLaunchCooperativeKernelMultiDevice_v9000 +#endif + }; + for (auto cbid : cbids) + CUPTI_CALL(dynload::cuptiEnableCallback( + 1, subscriber_, CUPTI_CB_DOMAIN_RUNTIME_API, cbid)); CUPTI_CALL(dynload::cuptiGetTimestamp(&start_ns_)); #endif // PADDLE_WITH_CUPTI enabled_ = true; } + void Reset() { +#ifdef PADDLE_WITH_CUPTI + CUPTI_CALL( + dynload::cuptiActivityFlushAll(CUPTI_ACTIVITY_FLAG_FLUSH_FORCED)); +#endif + std::lock_guard l(trace_mu_); + kernel_records_.clear(); + mem_records_.clear(); + correlations_.clear(); + for (auto &tmp : correlations_pairs) tmp.clear(); + for (auto &tmp : cpu_records_) tmp.clear(); + } + + void GenEventKernelCudaElapsedTime() { +#ifdef PADDLE_WITH_CUPTI + if (correlations_.empty()) + for (auto &tmp : correlations_pairs) + for (auto &pair : tmp) correlations_[pair.first] = pair.second; + for (const KernelRecord &r : kernel_records_) { + auto c = correlations_.find(r.correlation_id); + if (c != correlations_.end() && c->second != nullptr) { + Event *e = c->second; + e->AddCudaElapsedTime(r.start_ns, r.end_ns); + } + } + for (const auto &r : mem_records_) { + auto c = correlations_.find(r.correlation_id); + if (c != correlations_.end() && c->second != nullptr) { + Event *e = c->second; + e->AddCudaElapsedTime(r.start_ns, r.end_ns); + } + } +#endif + } + proto::Profile GenProfile(const std::string &profile_path) { + int miss = 0, find = 0; std::lock_guard l(trace_mu_); proto::Profile profile_pb; profile_pb.set_start_ns(start_ns_); profile_pb.set_end_ns(end_ns_); + if (correlations_.empty()) + for (auto &tmp : correlations_pairs) + for (auto &pair : tmp) correlations_[pair.first] = pair.second; for (const KernelRecord &r : kernel_records_) { auto *event = profile_pb.add_events(); event->set_type(proto::Event::GPUKernel); - if (correlations_.find(r.correlation_id) != correlations_.end()) { - event->set_name(correlations_.at(r.correlation_id)); + auto c = correlations_.find(r.correlation_id); + if (c != correlations_.end() && c->second != nullptr) { + event->set_name(c->second->name()); + event->set_detail_info(r.name); + find++; } else { + VLOG(10) << "Missing Kernel Event: " + r.name; + miss++; event->set_name(r.name); } event->set_start_ns(r.start_ns); @@ -289,31 +426,41 @@ class DeviceTracerImpl : public DeviceTracer { event->set_sub_device_id(r.stream_id); event->set_device_id(r.device_id); } - - for (const CPURecord &r : cpu_records_) { - auto *event = profile_pb.add_events(); - event->set_type(proto::Event::CPU); - event->set_name(r.name); - event->set_start_ns(r.start_ns); - event->set_end_ns(r.end_ns); - event->set_sub_device_id(r.thread_id); - event->set_device_id(r.device_id); - } + VLOG(1) << "KernelRecord event miss: " << miss << " find: " << find; + for (auto &tmp : cpu_records_) + for (const CPURecord &r : tmp) { + auto *event = profile_pb.add_events(); + event->set_type(proto::Event::CPU); + event->set_name(r.name); + event->set_start_ns(r.start_ns); + event->set_end_ns(r.end_ns); + event->set_sub_device_id(r.thread_id); + event->set_device_id(r.device_id); + } + miss = find = 0; for (const MemRecord &r : mem_records_) { auto *event = profile_pb.add_events(); event->set_type(proto::Event::GPUKernel); - event->set_name(r.name); + auto c = correlations_.find(r.correlation_id); + if (c != correlations_.end() && c->second != nullptr) { + event->set_name(c->second->name()); + event->set_detail_info(r.name); + find++; + } else { + miss++; + event->set_name(r.name); + } event->set_start_ns(r.start_ns); event->set_end_ns(r.end_ns); event->set_sub_device_id(r.stream_id); event->set_device_id(r.device_id); event->mutable_memcopy()->set_bytes(r.bytes); } + VLOG(1) << "MemRecord event miss: " << miss << " find: " << find; std::ofstream profile_f; - profile_f.open(profile_path, std::ios::out | std::ios::trunc); - std::string profile_str; - profile_pb.SerializeToString(&profile_str); - profile_f << profile_str; + profile_f.open(profile_path, + std::ios::out | std::ios::trunc | std::ios::binary); + profile_pb.SerializeToOstream(&profile_f); profile_f.close(); return profile_pb; } @@ -321,12 +468,13 @@ class DeviceTracerImpl : public DeviceTracer { void Disable() { #ifdef PADDLE_WITH_CUPTI // flush might cause additional calls to DeviceTracker. - dynload::cuptiActivityFlushAll(CUPTI_ACTIVITY_FLAG_FLUSH_FORCED); + CUPTI_CALL( + dynload::cuptiActivityFlushAll(CUPTI_ACTIVITY_FLAG_FLUSH_FORCED)); #endif // PADDLE_WITH_CUPTI std::lock_guard l(trace_mu_); #ifdef PADDLE_WITH_CUPTI DisableActivity(); - dynload::cuptiUnsubscribe(subscriber_); + CUPTI_CALL(dynload::cuptiUnsubscribe(subscriber_)); CUPTI_CALL(dynload::cuptiGetTimestamp(&end_ns_)); #endif // PADDLE_WITH_CUPTI enabled_ = false; @@ -337,18 +485,10 @@ class DeviceTracerImpl : public DeviceTracer { static void CUPTIAPI ApiCallback(void *userdata, CUpti_CallbackDomain domain, CUpti_CallbackId cbid, const void *cbdata) { auto *cbInfo = reinterpret_cast(cbdata); - DeviceTracer *tracer = reinterpret_cast(userdata); - - if ((domain == CUPTI_CB_DOMAIN_DRIVER_API) && - (cbid == CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel)) { - if (cbInfo->callbackSite == CUPTI_API_ENTER) { - const std::string anno = !annotation_stack.empty() - ? annotation_stack.back() - : cbInfo->symbolName; - tracer->AddAnnotation(cbInfo->correlationId, anno); - } - } else { - VLOG(1) << "Unhandled API Callback for " << domain << " " << cbid; + DeviceTracerImpl *tracer = reinterpret_cast(userdata); + if (cbInfo->callbackSite == CUPTI_API_ENTER) { + Event *event = CurAnnotation(); + tracer->AddAnnotation(cbInfo->correlationId, event); } } CUpti_SubscriberHandle subscriber_; @@ -357,10 +497,12 @@ class DeviceTracerImpl : public DeviceTracer { bool enabled_; uint64_t start_ns_; uint64_t end_ns_; - std::vector kernel_records_; - std::vector mem_records_; - std::vector cpu_records_; - std::unordered_map correlations_; + std::forward_list kernel_records_; + std::forward_list mem_records_; + std::forward_list> cpu_records_; + std::forward_list>> + correlations_pairs; + std::unordered_map correlations_; }; void CreateTracer(DeviceTracer **t) { *t = new DeviceTracerImpl(); } @@ -370,21 +512,104 @@ DeviceTracer *GetDeviceTracer() { return tracer; } -void SetCurAnnotation(const std::string &anno) { - annotation_stack.push_back(anno); -} +void SetCurAnnotation(Event *event) { annotation_stack.push_back(event); } void ClearCurAnnotation() { annotation_stack.pop_back(); } -std::string CurAnnotation() { - if (annotation_stack.empty()) return ""; +Event *CurAnnotation() { + if (annotation_stack.empty()) return nullptr; return annotation_stack.back(); } +std::string CurAnnotationName() { + if (annotation_stack.empty()) return ""; + return annotation_stack.back()->name(); +} void SetCurBlock(int block_id) { block_id_stack.push_back(block_id); } void ClearCurBlock() { block_id_stack.pop_back(); } int BlockDepth() { return block_id_stack.size(); } + +uint32_t GetCurSystemThreadId() { + std::stringstream ss; + ss << std::this_thread::get_id(); + uint32_t id = static_cast(std::stoull(ss.str())); + return id; +} + +void RecoreCurThreadId(int32_t id) { + auto gid = GetCurSystemThreadId(); + VLOG(1) << "RecoreCurThreadId: " << gid << " -> " << id; + system_thread_id_map[gid] = id; +} + +int32_t GetThreadIdFromSystemThreadId(uint32_t id) { + auto it = system_thread_id_map.find(id); + if (it != system_thread_id_map.end()) return it->second; + // return origin id if no event is recorded in this thread. + return static_cast(id); +} + +#ifdef PADDLE_WITH_CUPTI +namespace { + +void initCuptiCbidStr() { + static bool called = false; + if (called) return; + called = true; +#define REGISTER_RUNTIME_CBID_STR(cbid) \ + runtime_cbid_str[CUPTI_RUNTIME_TRACE_CBID_##cbid] = #cbid + + REGISTER_RUNTIME_CBID_STR(cudaBindTexture_v3020); + REGISTER_RUNTIME_CBID_STR(cudaConfigureCall_v3020); + REGISTER_RUNTIME_CBID_STR(cudaDeviceGetAttribute_v5000); + REGISTER_RUNTIME_CBID_STR(cudaDeviceGetStreamPriorityRange_v5050); + REGISTER_RUNTIME_CBID_STR(cudaDeviceSynchronize_v3020); + REGISTER_RUNTIME_CBID_STR(cudaDriverGetVersion_v3020); + REGISTER_RUNTIME_CBID_STR(cudaEventCreateWithFlags_v3020); + REGISTER_RUNTIME_CBID_STR(cudaEventDestroy_v3020); + REGISTER_RUNTIME_CBID_STR(cudaEventDestroy_v3020); + REGISTER_RUNTIME_CBID_STR(cudaEventQuery_v3020); + REGISTER_RUNTIME_CBID_STR(cudaEventRecord_v3020); + REGISTER_RUNTIME_CBID_STR(cudaFreeHost_v3020); + REGISTER_RUNTIME_CBID_STR(cudaFree_v3020); + REGISTER_RUNTIME_CBID_STR(cudaFuncGetAttributes_v3020); + REGISTER_RUNTIME_CBID_STR(cudaGetDeviceCount_v3020); + REGISTER_RUNTIME_CBID_STR(cudaGetDeviceProperties_v3020); + REGISTER_RUNTIME_CBID_STR(cudaGetDevice_v3020); + REGISTER_RUNTIME_CBID_STR(cudaGetErrorString_v3020); + REGISTER_RUNTIME_CBID_STR(cudaGetLastError_v3020); + REGISTER_RUNTIME_CBID_STR(cudaHostAlloc_v3020); + REGISTER_RUNTIME_CBID_STR(cudaHostGetDevicePointer_v3020); + REGISTER_RUNTIME_CBID_STR(cudaLaunchKernel_v7000); + REGISTER_RUNTIME_CBID_STR(cudaMallocHost_v3020); + REGISTER_RUNTIME_CBID_STR(cudaMalloc_v3020); + REGISTER_RUNTIME_CBID_STR(cudaMemcpyAsync_v3020); + REGISTER_RUNTIME_CBID_STR(cudaMemcpy_v3020); + REGISTER_RUNTIME_CBID_STR(cudaMemsetAsync_v3020); + REGISTER_RUNTIME_CBID_STR(cudaMemset_v3020); + REGISTER_RUNTIME_CBID_STR( + cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags_v7000); + REGISTER_RUNTIME_CBID_STR(cudaPeekAtLastError_v3020); + REGISTER_RUNTIME_CBID_STR(cudaRuntimeGetVersion_v3020); + REGISTER_RUNTIME_CBID_STR(cudaSetDevice_v3020); + REGISTER_RUNTIME_CBID_STR(cudaStreamCreate_v3020); + REGISTER_RUNTIME_CBID_STR(cudaStreamCreateWithFlags_v5000); + REGISTER_RUNTIME_CBID_STR(cudaStreamCreateWithPriority_v5050); + REGISTER_RUNTIME_CBID_STR(cudaStreamDestroy_v5050); + REGISTER_RUNTIME_CBID_STR(cudaStreamSynchronize_v3020); + REGISTER_RUNTIME_CBID_STR(cudaStreamWaitEvent_v3020); + REGISTER_RUNTIME_CBID_STR(cudaUnbindTexture_v3020); +#if CUDA_VERSION >= 9000 + REGISTER_RUNTIME_CBID_STR(cudaLaunchCooperativeKernel_v9000); + REGISTER_RUNTIME_CBID_STR(cudaLaunchCooperativeKernelMultiDevice_v9000); +#endif + +#undef REGISTER_RUNTIME_CBID_STR +} +} // namespace +#endif // PADDLE_WITH_CUPTI + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/device_tracer.h b/paddle/fluid/platform/device_tracer.h index bf0786be2d..6ee2c36146 100644 --- a/paddle/fluid/platform/device_tracer.h +++ b/paddle/fluid/platform/device_tracer.h @@ -32,6 +32,8 @@ inline uint64_t PosixInNsec() { return 1000 * (static_cast(tv.tv_sec) * 1000000 + tv.tv_usec); } +class Event; + // DeviceTracer performs the following tasks: // 1. Register cuda callbacks for various events: kernel, memcpy, etc. // 2. Collect cuda statistics: start/end ts, memory, etc. @@ -68,11 +70,13 @@ class DeviceTracer { virtual void Enable() = 0; // Needs to be called once after use. virtual void Disable() = 0; + // Needs to be called once before reuse. + virtual void Reset() = 0; // Add a pair to correlate internal cuda id with high level - // annotation (string). So cuda statistics can be represented by + // annotation event(with string). So cuda statistics can be represented by // human-readable annotations. - virtual void AddAnnotation(uint64_t id, const std::string& anno) = 0; + virtual void AddAnnotation(uint32_t id, Event* event) = 0; virtual void AddMemRecords(const std::string& name, uint64_t start_ns, uint64_t end_ns, int64_t device_id, @@ -92,6 +96,9 @@ class DeviceTracer { // Generate a proto after done (Disabled). virtual proto::Profile GenProfile(const std::string& profile_path) = 0; + // generate kernel elapsed time into Event + virtual void GenEventKernelCudaElapsedTime() = 0; + virtual bool IsEnabled() = 0; }; @@ -99,14 +106,19 @@ class DeviceTracer { DeviceTracer* GetDeviceTracer(); // Set a name for the cuda kernel operation being launched by the thread. -void SetCurAnnotation(const std::string& anno); +void SetCurAnnotation(Event* event); // Clear the name after the operation is done. void ClearCurAnnotation(); // Current name of the operation being run in the thread. -std::string CurAnnotation(); +std::string CurAnnotationName(); +Event* CurAnnotation(); void SetCurBlock(int block_id); void ClearCurBlock(); int BlockDepth(); + +// Set current thread id, so we can map the system thread id to thread id. +void RecoreCurThreadId(int32_t id); +int32_t GetThreadIdFromSystemThreadId(uint32_t id); } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/init.cc b/paddle/fluid/platform/init.cc index ac86b38a61..4dcf7e7904 100644 --- a/paddle/fluid/platform/init.cc +++ b/paddle/fluid/platform/init.cc @@ -22,6 +22,7 @@ limitations under the License. */ #include "paddle/fluid/string/split.h" #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/cuda_device_guard.h" +#include "paddle/fluid/platform/dynload/cupti.h" #endif #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/init.h" @@ -30,6 +31,9 @@ limitations under the License. */ DEFINE_int32(paddle_num_threads, 1, "Number of threads for each paddle instance."); +DEFINE_int32(multiple_of_cupti_buffer_size, 1, + "Multiple of the CUPTI device buffer size. If the timestamps have " + "been dropped when you are profiling, try increasing this value."); namespace paddle { namespace framework { @@ -78,7 +82,32 @@ void InitP2P(std::vector devices) { #endif } +void InitCupti() { +#ifdef PADDLE_WITH_CUPTI + if (FLAGS_multiple_of_cupti_buffer_size == 1) return; + size_t attrValue = 0, attrValueSize = sizeof(size_t); +#define MULTIPLY_ATTR_VALUE(attr) \ + { \ + PADDLE_ENFORCE(!platform::dynload::cuptiActivityGetAttribute( \ + attr, &attrValueSize, &attrValue)); \ + attrValue *= FLAGS_multiple_of_cupti_buffer_size; \ + LOG(WARNING) << "Set " #attr " " << attrValue << " byte"; \ + PADDLE_ENFORCE(!platform::dynload::cuptiActivitySetAttribute( \ + attr, &attrValueSize, &attrValue)); \ + } + MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE); + MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE_CDP); +#if CUDA_VERSION >= 9000 + MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_SIZE); +#endif +#undef MULTIPLY_ATTR_VALUE +#endif +} + void InitDevices(bool init_p2p) { + // CUPTI attribute should be set before any CUDA context is created (see CUPTI + // documentation about CUpti_ActivityAttribute). + InitCupti(); /*Init all available devices by default */ std::vector devices; #ifdef PADDLE_WITH_CUDA diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 85977366e6..436654d102 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/fluid/platform/profiler.h" + #include #include #include @@ -27,7 +29,6 @@ limitations under the License. */ #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/platform/device_tracer.h" #include "paddle/fluid/platform/port.h" -#include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/string/printf.h" DEFINE_bool(enable_rpc_profiler, false, "Enable rpc profiler or not."); @@ -66,12 +67,13 @@ struct EventList { ((kEventSize + kEventAlign - 1) / kEventAlign * kEventAlign); template - void Record(Args&&... args) { + Event* Record(Args&&... args) { if (event_blocks.empty() || event_blocks.front().size() == kNumBlock) { event_blocks.emplace_front(); event_blocks.front().reserve(kNumBlock); } event_blocks.front().emplace_back(std::forward(args)...); + return &event_blocks.front().back(); } std::vector Reduce() { @@ -98,21 +100,8 @@ inline uint64_t GetTimeInNsec() { .count(); } -Event::Event(EventType type, std::string name, uint32_t thread_id, - const DeviceContext* dev_ctx) - : type_(type), name_(name), thread_id_(thread_id), has_cuda_(false) { -#ifdef PADDLE_WITH_CUDA - has_cuda_ = dev_ctx ? platform::is_gpu_place(dev_ctx->GetPlace()) : false; - if (has_cuda_) { - auto* cuda_dev_ctx = static_cast(dev_ctx); - PADDLE_ENFORCE(cudaSetDevice( - boost::get(cuda_dev_ctx->GetPlace()).device)); - PADDLE_ENFORCE(cudaGetDevice(&device_)); - PADDLE_ENFORCE(cudaEventCreate(&event_)); - auto stream = cuda_dev_ctx->stream(); - PADDLE_ENFORCE(cudaEventRecord(event_, stream)); - } -#endif +Event::Event(EventType type, std::string name, uint32_t thread_id) + : type_(type), name_(name), thread_id_(thread_id) { cpu_ns_ = GetTimeInNsec(); } @@ -124,88 +113,70 @@ double Event::CpuElapsedMs(const Event& e) const { double Event::CudaElapsedMs(const Event& e) const { #ifdef PADDLE_WITH_CUDA - if (!has_cuda_) return 0.0; - PADDLE_ENFORCE(e.has_cuda() && has_cuda()); - PADDLE_ENFORCE(e.device() == device()); - PADDLE_ENFORCE(cudaEventSynchronize(event_)); - PADDLE_ENFORCE(cudaEventSynchronize(e.event())); - float ms; - PADDLE_ENFORCE(cudaEventElapsedTime(&ms, event_, e.event())); - return ms; +#ifdef PADDLE_WITH_CUPTI + return gpu_ns_ / 1000000.0; +#endif #else PADDLE_THROW("CUDA is not enabled"); #endif } -#ifdef PADDLE_WITH_CUDA -static void ForEachDevice(std::function func) { - auto original_device = GetCurrentDeviceId(); - int count = GetCUDADeviceCount(); - for (int i = 0; i < count; i++) { - SetDeviceId(i); - func(i); - } - SetDeviceId(original_device); -} -#endif - inline EventList& GetEventList() { if (!g_event_list) { std::lock_guard guard(g_all_event_lists_mutex); g_event_list = std::make_shared(); g_thread_id = g_next_thread_id++; g_all_event_lists.emplace_front(g_event_list); + RecoreCurThreadId(g_thread_id); } return *g_event_list; } -void Mark(const std::string& name, const DeviceContext* dev_ctx) { - GetEventList().Record(EventType::kMark, name, g_thread_id, dev_ctx); +void Mark(const std::string& name) { + GetEventList().Record(EventType::kMark, name, g_thread_id); } -void PushEvent(const std::string& name, const DeviceContext* dev_ctx) { - GetEventList().Record(EventType::kPushRange, name, g_thread_id, dev_ctx); +Event* PushEvent(const std::string& name) { + return GetEventList().Record(EventType::kPushRange, name, g_thread_id); } -void PopEvent(const std::string& name, const DeviceContext* dev_ctx) { - GetEventList().Record(EventType::kPopRange, name, g_thread_id, dev_ctx); +void PopEvent(const std::string& name) { + GetEventList().Record(EventType::kPopRange, name, g_thread_id); } -RecordEvent::RecordEvent(const std::string& name, const DeviceContext* dev_ctx) +RecordEvent::RecordEvent(const std::string& name) : is_enabled_(false), start_ns_(PosixInNsec()) { if (g_state == ProfilerState::kDisabled) return; - std::lock_guard l(profiler_mu); + // lock is not needed, the code below is thread-safe is_enabled_ = true; - dev_ctx_ = dev_ctx; name_ = name; - PushEvent(name_, dev_ctx_); + Event* e = PushEvent(name_); // Maybe need the same push/pop behavior. - SetCurAnnotation(name_); + SetCurAnnotation(e); } RecordEvent::~RecordEvent() { if (g_state == ProfilerState::kDisabled || !is_enabled_) return; - std::lock_guard l(profiler_mu); + // lock is not needed, the code below is thread-safe DeviceTracer* tracer = GetDeviceTracer(); if (tracer) { - tracer->AddCPURecords(CurAnnotation(), start_ns_, PosixInNsec(), + tracer->AddCPURecords(CurAnnotationName(), start_ns_, PosixInNsec(), BlockDepth(), g_thread_id); } ClearCurAnnotation(); - PopEvent(name_, dev_ctx_); + PopEvent(name_); } -RecordRPCEvent::RecordRPCEvent(const std::string& name, - const DeviceContext* dev_ctx) { +RecordRPCEvent::RecordRPCEvent(const std::string& name) { if (FLAGS_enable_rpc_profiler) { - event_.reset(new platform::RecordEvent(name, dev_ctx)); + event_.reset(new platform::RecordEvent(name)); } } RecordBlock::RecordBlock(int block_id) : is_enabled_(false), start_ns_(PosixInNsec()) { - std::lock_guard l(profiler_mu); + // lock is not needed, the code below is thread-safe if (g_state == ProfilerState::kDisabled) return; is_enabled_ = true; SetCurBlock(block_id); @@ -213,7 +184,7 @@ RecordBlock::RecordBlock(int block_id) } RecordBlock::~RecordBlock() { - std::lock_guard l(profiler_mu); + // lock is not needed, the code below is thread-safe if (g_state == ProfilerState::kDisabled || !is_enabled_) return; DeviceTracer* tracer = GetDeviceTracer(); if (tracer) { @@ -225,11 +196,21 @@ RecordBlock::~RecordBlock() { ClearCurBlock(); } +void SynchronizeAllDevice() { +#ifdef PADDLE_WITH_CUDA + int count = GetCUDADeviceCount(); + for (int i = 0; i < count; i++) { + SetDeviceId(i); + PADDLE_ENFORCE(cudaDeviceSynchronize()); + } +#endif +} + void EnableProfiler(ProfilerState state) { PADDLE_ENFORCE(state != ProfilerState::kDisabled, "Can't enable profiling, since the input state is ", "ProfilerState::kDisabled"); - + SynchronizeAllDevice(); std::lock_guard l(profiler_mu); if (state == g_state) { return; @@ -238,23 +219,20 @@ void EnableProfiler(ProfilerState state) { should_send_profile_state = true; GetDeviceTracer()->Enable(); #ifdef PADDLE_WITH_CUDA - if (g_state == ProfilerState::kCUDA) { + if (g_state == ProfilerState::kCUDA || g_state == ProfilerState::kAll || + g_state == ProfilerState::kCPU) { // Generate some dummy events first to reduce the startup overhead. - for (int i = 0; i < 5; i++) { - ForEachDevice([](int d) { - DeviceContext* dev_ctx = new CUDADeviceContext(CUDAPlace(d)); - Mark("_cuda_startup_", dev_ctx); - dev_ctx->Wait(); - delete dev_ctx; - }); - } + DummyKernelAndEvent(); + GetDeviceTracer()->Reset(); } #endif // Mark the profiling start. - Mark("_start_profiler_", nullptr); + Mark("_start_profiler_"); } void ResetProfiler() { + SynchronizeAllDevice(); + GetDeviceTracer()->Reset(); std::lock_guard guard(g_all_event_lists_mutex); for (auto it = g_all_event_lists.begin(); it != g_all_event_lists.end(); ++it) { @@ -481,20 +459,23 @@ void ParseEvents(const std::vector>& events, void DisableProfiler(EventSortingKey sorted_key, const std::string& profile_path) { + SynchronizeAllDevice(); std::lock_guard l(profiler_mu); if (g_state == ProfilerState::kDisabled) return; // Mark the profiling stop. - Mark("_stop_profiler_", nullptr); + Mark("_stop_profiler_"); - std::vector> all_events = GetAllEvents(); - ParseEvents(all_events, true, sorted_key); - ParseEvents(all_events, false, sorted_key); - ResetProfiler(); DeviceTracer* tracer = GetDeviceTracer(); if (tracer->IsEnabled()) { tracer->Disable(); tracer->GenProfile(profile_path); + tracer->GenEventKernelCudaElapsedTime(); } + + std::vector> all_events = GetAllEvents(); + ParseEvents(all_events, true, sorted_key); + ParseEvents(all_events, false, sorted_key); + ResetProfiler(); g_state = ProfilerState::kDisabled; should_send_profile_state = true; } diff --git a/paddle/fluid/platform/profiler.cu b/paddle/fluid/platform/profiler.cu new file mode 100644 index 0000000000..e115c554ca --- /dev/null +++ b/paddle/fluid/platform/profiler.cu @@ -0,0 +1,50 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/platform/profiler.h" + +#include + +namespace paddle { +namespace platform { + +__global__ void DummyKernel(int *a) { a[0] = 0; } + +static void ForEachDevice(std::function func) { + auto original_device = GetCurrentDeviceId(); + int count = GetCUDADeviceCount(); + for (int i = 0; i < count; i++) { + SetDeviceId(i); + func(i); + } + SetDeviceId(original_device); +} + +void DummyKernelAndEvent() { + for (int i = 0; i < 5; i++) { + ForEachDevice([](int d) { + CUDADeviceContext *dev_ctx = new CUDADeviceContext(CUDAPlace(d)); + Mark("_cuda_startup_"); + int *ptr; + PADDLE_ENFORCE(cudaMalloc(&ptr, sizeof(int))); + DummyKernel<<<1, 1, 0, dev_ctx->stream()>>>(ptr); + dev_ctx->Wait(); + PADDLE_ENFORCE(cudaFree(ptr)); + delete dev_ctx; + }); + } +} + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h index f5d3490634..55d94f0fd8 100644 --- a/paddle/fluid/platform/profiler.h +++ b/paddle/fluid/platform/profiler.h @@ -28,17 +28,17 @@ class Event { public: // The DeviceContext is used to get the cuda stream. // If CPU profiling mode, can pass nullptr. - Event(EventType type, std::string name, uint32_t thread_id, - const DeviceContext* dev_ctx); + Event(EventType type, std::string name, uint32_t thread_id); const EventType& type() const; std::string name() const { return name_; } uint32_t thread_id() const { return thread_id_; } - bool has_cuda() const { return has_cuda_; } #ifdef PADDLE_WITH_CUDA +#ifndef PADDLE_WITH_CUPTI cudaEvent_t event() const { return event_; } int device() const { return device_; } +#endif #endif double CpuElapsedMs(const Event& e) const; @@ -49,11 +49,21 @@ class Event { std::string name_; uint32_t thread_id_; int64_t cpu_ns_; - bool has_cuda_; #ifdef PADDLE_WITH_CUDA +#ifdef PADDLE_WITH_CUPTI + int64_t gpu_ns_ = 0; + + public: + void AddCudaElapsedTime(int64_t start_ns, int64_t end_ns) { + gpu_ns_ += end_ns - start_ns; + } + + private: +#else cudaEvent_t event_ = nullptr; int device_ = -1; #endif +#endif }; enum ProfilerState { @@ -63,22 +73,19 @@ enum ProfilerState { kAll, // Profile both CPU and GPU. (Currently experimental). }; -void Mark(const std::string& name, const DeviceContext* dev_ctx); +void Mark(const std::string& name); -void PushEvent(const std::string& name, const DeviceContext* dev_ctx); +Event* PushEvent(const std::string& name); -void PopEvent(const std::string& name, const DeviceContext* dev_ctx); +void PopEvent(const std::string& name); struct RecordEvent { - // dev_ctx can be set to nullptr if device is cpu. - RecordEvent(const std::string& name, const DeviceContext* dev_ctx); + explicit RecordEvent(const std::string& name); ~RecordEvent(); bool is_enabled_; uint64_t start_ns_; - // The device context is used by Event to get the current cuda stream. - const DeviceContext* dev_ctx_; // Event name std::string name_; // Need to distinguish name by op type, block_id, program_id and perhaps @@ -88,8 +95,7 @@ struct RecordEvent { class RecordRPCEvent { public: - // dev_ctx can be set to nullptr if device is cpu. - RecordRPCEvent(const std::string& name, const DeviceContext* dev_ctx); + explicit RecordRPCEvent(const std::string& name); ~RecordRPCEvent() {} private: @@ -132,5 +138,9 @@ bool ShouldSendProfileState(); void SetProfileListener(); int64_t ListenerId(); +#ifdef PADDLE_WITH_CUDA +void DummyKernelAndEvent(); +#endif + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/profiler.proto b/paddle/fluid/platform/profiler.proto index 7b42aa785e..e761d7b266 100644 --- a/paddle/fluid/platform/profiler.proto +++ b/paddle/fluid/platform/profiler.proto @@ -31,6 +31,7 @@ message Event { optional int64 sub_device_id = 6; optional MemCopy memcopy = 7; + optional string detail_info = 9; } message Profile { diff --git a/paddle/fluid/platform/profiler_test.cc b/paddle/fluid/platform/profiler_test.cc index 61f467814b..528fe03c67 100644 --- a/paddle/fluid/platform/profiler_test.cc +++ b/paddle/fluid/platform/profiler_test.cc @@ -23,76 +23,49 @@ TEST(Event, CpuElapsedTime) { using paddle::platform::Event; using paddle::platform::EventType; - Event start_event(EventType::kPushRange, "test", 0, nullptr); - EXPECT_TRUE(start_event.has_cuda() == false); + Event start_event(EventType::kPushRange, "test", 0); int counter = 0; while (counter != 1000) { counter++; } - Event stop_event(EventType::kPopRange, "test", 0, nullptr); + Event stop_event(EventType::kPopRange, "test", 0); EXPECT_GT(start_event.CpuElapsedMs(stop_event), 0); } -#ifdef PADDLE_WITH_CUDA -TEST(Event, CudaElapsedTime) { - using paddle::platform::DeviceContext; - using paddle::platform::CUDADeviceContext; - using paddle::platform::CUDAPlace; - using paddle::platform::Event; - using paddle::platform::EventType; - - DeviceContext* dev_ctx = new CUDADeviceContext(CUDAPlace(0)); - Event start_event(EventType::kPushRange, "test", 0, dev_ctx); - EXPECT_TRUE(start_event.has_cuda() == true); - int counter = 0; - while (counter != 1000) { - counter++; - } - Event stop_event(EventType::kPopRange, "test", 0, dev_ctx); - EXPECT_GT(start_event.CudaElapsedMs(stop_event), 0); -} -#endif - TEST(RecordEvent, RecordEvent) { using paddle::platform::DeviceContext; using paddle::platform::Event; using paddle::platform::EventType; using paddle::platform::RecordEvent; + using paddle::platform::PushEvent; + using paddle::platform::PopEvent; using paddle::platform::ProfilerState; using paddle::platform::EventSortingKey; ProfilerState state = ProfilerState::kCPU; - DeviceContext* dev_ctx = nullptr; -#ifdef PADDLE_WITH_CUDA - using paddle::platform::CUDADeviceContext; - using paddle::platform::CUDAPlace; - state = ProfilerState::kCUDA; - dev_ctx = - new paddle::platform::CUDADeviceContext(paddle::platform::CUDAPlace(0)); -#endif EnableProfiler(state); /* Usage 1: - * PushEvent(evt_name, dev_ctx); + * PushEvent(evt_name); * ... * code to be analyzed * ... - * PopEvent(evt_name, dev_ctx); + * PopEvent(evt_name); */ LOG(INFO) << "Usage 1: PushEvent & PopEvent"; for (int loop = 0; loop < 3; ++loop) { for (int i = 1; i < 5; ++i) { std::string name = "op_" + std::to_string(i); - PushEvent(name, dev_ctx); + PushEvent(name); int counter = 1; while (counter != i * 1000) counter++; - PopEvent(name, dev_ctx); + PopEvent(name); } } /* Usage 2: * { - * RecordEvent record_event(name, dev_ctx); + * RecordEvent record_event(name); * ... * code to be analyzed * ... @@ -101,7 +74,7 @@ TEST(RecordEvent, RecordEvent) { LOG(INFO) << "Usage 2: RecordEvent"; for (int i = 1; i < 5; ++i) { std::string name = "evs_op_" + std::to_string(i); - RecordEvent record_event(name, dev_ctx); + RecordEvent record_event(name); int counter = 1; while (counter != i * 1000) counter++; } @@ -123,20 +96,20 @@ TEST(RecordEvent, RecordEvent) { LOG(INFO) << "Usage 3: nested RecordEvent"; for (int i = 1; i < 5; ++i) { std::string name = "ano_evs_op_" + std::to_string(i); - RecordEvent record_event(name, dev_ctx); + RecordEvent record_event(name); int counter = 1; while (counter != i * 100) counter++; { std::string nested_name = "nested_ano_evs_op_" + std::to_string(i); - RecordEvent nested_record_event(nested_name, dev_ctx); + RecordEvent nested_record_event(nested_name); int nested_counter = 1; while (nested_counter != i * 100) nested_counter++; } } // Bad Usage: - PushEvent("event_without_pop", dev_ctx); - PopEvent("event_without_push", dev_ctx); + PushEvent("event_without_pop"); + PopEvent("event_without_push"); std::vector> events = paddle::platform::GetAllEvents(); int cuda_startup_count = 0; diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index aa1f85734d..a9c92efb72 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -131,7 +131,8 @@ def __bootstrap__(): 'eager_delete_tensor_gb', 'fast_eager_deletion_mode', 'allocator_strategy', 'reader_queue_speed_test_mode', 'print_sub_graph_dir', 'pe_profile_fname', 'warpctc_dir', - 'inner_op_parallelism', 'enable_parallel_graph' + 'inner_op_parallelism', 'enable_parallel_graph', + 'multiple_of_cupti_buffer_size' ] if 'Darwin' not in sysstr: read_env_flags.append('use_pinned_memory') diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index 7934164b84..39d778b82a 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -16,15 +16,19 @@ from __future__ import print_function import unittest import os +import tempfile import numpy as np import paddle.fluid as fluid import paddle.fluid.profiler as profiler import paddle.fluid.layers as layers import paddle.fluid.core as core +import paddle.fluid.proto.profiler.profiler_pb2 as profiler_pb2 class TestProfiler(unittest.TestCase): - def net_profiler(self, state, profile_path='/tmp/profile'): + def net_profiler(self, state, use_parallel_executor=False): + profile_path = os.path.join(tempfile.gettempdir(), "profile") + open(profile_path, "w").write("") startup_program = fluid.Program() main_program = fluid.Program() @@ -60,6 +64,11 @@ class TestProfiler(unittest.TestCase): place = fluid.CPUPlace() if state == 'CPU' else fluid.CUDAPlace(0) exe = fluid.Executor(place) exe.run(startup_program) + if use_parallel_executor: + pe = fluid.ParallelExecutor( + state != 'CPU', + loss_name=avg_cost.name, + main_program=main_program) pass_acc_calculator = fluid.average.WeightedAverage() with profiler.profiler(state, 'total', profile_path) as prof: @@ -69,6 +78,9 @@ class TestProfiler(unittest.TestCase): x = np.random.random((32, 784)).astype("float32") y = np.random.randint(0, 10, (32, 1)).astype("int64") + if use_parallel_executor: + pe.run(feed={'x': x, 'y': y}, fetch_list=[avg_cost.name]) + continue outs = exe.run(main_program, feed={'x': x, 'y': y}, @@ -77,21 +89,37 @@ class TestProfiler(unittest.TestCase): b_size = np.array(outs[2]) pass_acc_calculator.add(value=acc, weight=b_size) pass_acc = pass_acc_calculator.eval() + data = open(profile_path, 'rb').read() + self.assertGreater(len(data), 0) + profile_pb = profiler_pb2.Profile() + profile_pb.ParseFromString(data) + self.assertGreater(len(profile_pb.events), 0) + for event in profile_pb.events: + if event.type == profiler_pb2.Event.GPUKernel: + if not event.detail_info and not event.name.startswith("MEM"): + raise Exception( + "Kernel %s missing event. Has this kernel been recorded by RecordEvent?" + % event.name) + elif event.type == profiler_pb2.Event.CPU and ( + event.name.startswith("Driver API") or + event.name.startswith("Runtime API")): + print("Warning: unregister", event.name) def test_cpu_profiler(self): self.net_profiler('CPU') + self.net_profiler('CPU', use_parallel_executor=True) @unittest.skipIf(not core.is_compiled_with_cuda(), "profiler is enabled only with GPU") def test_cuda_profiler(self): self.net_profiler('GPU') + self.net_profiler('GPU', use_parallel_executor=True) @unittest.skipIf(not core.is_compiled_with_cuda(), "profiler is enabled only with GPU") def test_all_profiler(self): - self.net_profiler('All', '/tmp/profile_out') - with open('/tmp/profile_out', 'rb') as f: - self.assertGreater(len(f.read()), 0) + self.net_profiler('All') + self.net_profiler('All', use_parallel_executor=True) if __name__ == '__main__': diff --git a/tools/timeline.py b/tools/timeline.py index f850476831..ebadb29bdb 100644 --- a/tools/timeline.py +++ b/tools/timeline.py @@ -131,8 +131,12 @@ class Timeline(object): if (k, event.device_id, "CPU") not in self._devices: pid = self._allocate_pid() self._devices[(k, event.device_id, "CPU")] = pid - self._chrome_trace.emit_pid("%s:cpu:block:%d" % - (k, event.device_id), pid) + # -1 device id represents CUDA api call + if event.device_id == -1: + self._chrome_trace.emit_pid("%s:cuda_api" % k, pid) + else: + self._chrome_trace.emit_pid( + "%s:cpu:block:%d" % (k, event.device_id), pid) elif event.type == profiler_pb2.Event.GPUKernel: if (k, event.device_id, "GPUKernel") not in self._devices: pid = self._allocate_pid() @@ -150,7 +154,9 @@ class Timeline(object): pid = self._devices[(k, event.device_id, type)] args = {'name': event.name} if event.memcopy.bytes > 0: - args = {'mem_bytes': event.memcopy.bytes} + args['mem_bytes'] = event.memcopy.bytes + if event.detail_info: + args['detail_info'] = event.detail_info # TODO(panyx0718): Chrome tracing only handles ms. However, some # ops takes micro-seconds. Hence, we keep the ns here. self._chrome_trace.emit_region( @@ -173,7 +179,7 @@ if args.timeline_path: profile_paths = profile_path.split(',') profile_dict = dict() if len(profile_paths) == 1: - with open(profile_path, 'r') as f: + with open(profile_path, 'rb') as f: profile_s = f.read() profile_pb = profiler_pb2.Profile() profile_pb.ParseFromString(profile_s) @@ -181,7 +187,7 @@ if len(profile_paths) == 1: else: for profile_path in profile_paths: k, v = profile_path.split('=') - with open(v, 'r') as f: + with open(v, 'rb') as f: profile_s = f.read() profile_pb = profiler_pb2.Profile() profile_pb.ParseFromString(profile_s) From 646b1f014802a50c2bb5bb53954177d25b68e8e4 Mon Sep 17 00:00:00 2001 From: Wu Yi Date: Thu, 21 Feb 2019 13:00:15 +0800 Subject: [PATCH 099/117] Add manylinux cuda10 (#15787) * add cuda10 * add manylinux cuda10 test=develop --- tools/manylinux1/build_all.sh | 5 +++++ tools/manylinux1/build_scripts/build.sh | 12 +++++++----- tools/manylinux1/build_scripts/build_utils.sh | 2 ++ 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/tools/manylinux1/build_all.sh b/tools/manylinux1/build_all.sh index 097bedb526..caf2172215 100755 --- a/tools/manylinux1/build_all.sh +++ b/tools/manylinux1/build_all.sh @@ -24,3 +24,8 @@ sed 's//9.0-cudnn7-devel-centos6/g' Dockerfile.x64 | \ sed 's//NVCC_GENCODE="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_60,code=compute_60 -gencode=arch=compute_61,code=sm_61 -gencode=arch=compute_62,code=sm_62 -gencode=arch=compute_70,code=sm_70"/g'> Dockerfile.tmp docker build -t ${REPO}/paddle_manylinux_devel:cuda9.0_cudnn7 -f Dockerfile.tmp . docker push ${REPO}/paddle_manylinux_devel:cuda9.0_cudnn7 + +sed 's//10.0-devel-centos6/g' Dockerfile.x64 | \ +sed 's//NVCC_GENCODE="-gencode=arch=compute_35,code=sm_35 -gencode=arch=compute_50,code=sm_50 -gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_60,code=compute_60 -gencode=arch=compute_61,code=sm_61 -gencode=arch=compute_62,code=sm_62 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75"/g'> Dockerfile.tmp +docker build -t ${REPO}/paddle_manylinux_devel:cuda10.0_cudnn7 -f Dockerfile.tmp . +docker push ${REPO}/paddle_manylinux_devel:cuda10.0_cudnn7 diff --git a/tools/manylinux1/build_scripts/build.sh b/tools/manylinux1/build_scripts/build.sh index 5b676c0243..1b0059a8c6 100644 --- a/tools/manylinux1/build_scripts/build.sh +++ b/tools/manylinux1/build_scripts/build.sh @@ -107,11 +107,13 @@ curl-config --features rm -rf /usr/local/ssl # Install patchelf (latest with unreleased bug fixes) -curl -sLO https://nixos.org/releases/patchelf/patchelf-0.9/patchelf-0.9.tar.gz -check_sha256sum patchelf-0.9.tar.gz $PATCHELF_HASH -tar -xzf patchelf-0.9.tar.gz -(cd patchelf-0.9 && ./configure && make && make install) -rm -rf patchelf-0.9.tar.gz patchelf-0.9 +# FIXME(typhoonzero): restore this when the link is fixed. +# curl -sLO http://nipy.bic.berkeley.edu/manylinux/patchelf-0.9njs2.tar.gz +# check_sha256sum patchelf-0.9njs2.tar.gz $PATCHELF_HASH +# tar -xzf patchelf-0.9njs2.tar.gz +# (cd patchelf-0.9njs2 && ./configure && make && make install) +# rm -rf patchelf-0.9njs2.tar.gz patchelf-0.9njs2 +yum install -y patchelf # Install latest pypi release of auditwheel LD_LIBRARY_PATH="${ORIGINAL_LD_LIBRARY_PATH}:$(dirname ${PY35_BIN})/lib" $PY35_BIN/pip install auditwheel diff --git a/tools/manylinux1/build_scripts/build_utils.sh b/tools/manylinux1/build_scripts/build_utils.sh index 48cce15a14..083101249c 100755 --- a/tools/manylinux1/build_scripts/build_utils.sh +++ b/tools/manylinux1/build_scripts/build_utils.sh @@ -87,6 +87,8 @@ function do_cpython_build { # NOTE Make libpython shared library visible to python calls below LD_LIBRARY_PATH="${prefix}/lib" ${prefix}/bin/python get-pip.py LD_LIBRARY_PATH="${prefix}/lib" ${prefix}/bin/pip install wheel + cd / + ls ${MY_DIR} local abi_tag=$(LD_LIBRARY_PATH="${prefix}/lib" ${prefix}/bin/python ${MY_DIR}/python-tag-abi-tag.py) ln -s ${prefix} /opt/python/${abi_tag} } From 1c7bb0e40cacd10bfa210b2b922c18207d59f541 Mon Sep 17 00:00:00 2001 From: Dun Liang Date: Thu, 21 Feb 2019 16:43:24 +0800 Subject: [PATCH 100/117] test=develop --- paddle/fluid/platform/profiler.cc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 436654d102..9617d91b76 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -112,12 +112,10 @@ double Event::CpuElapsedMs(const Event& e) const { } double Event::CudaElapsedMs(const Event& e) const { -#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUPTI return gpu_ns_ / 1000000.0; #endif -#else - PADDLE_THROW("CUDA is not enabled"); + PADDLE_THROW("CUDA CUPTI is not enabled"); #endif } From c9080f516b3b3afffc97899ee03db469ce38d3db Mon Sep 17 00:00:00 2001 From: Dun Liang Date: Thu, 21 Feb 2019 16:44:33 +0800 Subject: [PATCH 101/117] test=develop --- paddle/fluid/platform/profiler.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 9617d91b76..42a93ad76c 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -114,7 +114,7 @@ double Event::CpuElapsedMs(const Event& e) const { double Event::CudaElapsedMs(const Event& e) const { #ifdef PADDLE_WITH_CUPTI return gpu_ns_ / 1000000.0; -#endif +#else PADDLE_THROW("CUDA CUPTI is not enabled"); #endif } From 35a90e06bf66d56684c8fc30bd74d7245443f85f Mon Sep 17 00:00:00 2001 From: Dun Liang Date: Thu, 21 Feb 2019 17:03:16 +0800 Subject: [PATCH 102/117] test=develop --- paddle/fluid/platform/profiler.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 42a93ad76c..28f93b4b12 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -115,7 +115,8 @@ double Event::CudaElapsedMs(const Event& e) const { #ifdef PADDLE_WITH_CUPTI return gpu_ns_ / 1000000.0; #else - PADDLE_THROW("CUDA CUPTI is not enabled"); + LOG_FIRST_N(WARNING, 1) << "CUDA CUPTI is not enabled"; + return 0; #endif } From 1578c60bdda12501e5951aa9b75f6bed39833b22 Mon Sep 17 00:00:00 2001 From: Krzysztof Binias Date: Thu, 21 Feb 2019 12:36:56 +0100 Subject: [PATCH 103/117] Add new ut and remove unnecessary code test=develop --- .../operators/mkldnn/activation_mkldnn_op.cc | 10 --- .../mkldnn/test_activation_mkldnn_op.py | 61 ++++++++++++++++++- 2 files changed, 60 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc index e16b6f78d1..223adcaa6b 100644 --- a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc @@ -52,11 +52,6 @@ class MKLDNNActivationKernel "Wrong layout/format set for Input x tensor"); Functor functor; - - auto attrs = functor.GetAttrs(); - for (auto &attr : attrs) { - *attr.second = ctx.Attr(attr.first); - } functor(ctx); } }; @@ -76,11 +71,6 @@ class MKLDNNActivationGradKernel "is_test attribute should be set to False in training phase."); Functor functor; - - auto attrs = functor.GetAttrs(); - for (auto &attr : attrs) { - *attr.second = ctx.Attr(attr.first); - } functor(ctx); } }; diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py index ad94a4b21c..4c211ef68b 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py @@ -18,8 +18,8 @@ import unittest import numpy as np import paddle.fluid.core as core from paddle.fluid.tests.unittests.op_test import OpTest -from scipy.special import expit from paddle.fluid.tests.unittests.test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs +import paddle.fluid as fluid class TestMKLDNNReluDim2(TestRelu): @@ -97,5 +97,64 @@ class TestMKLDNNAbsDim4(TestAbs): self.attrs = {"use_mkldnn": True} +# Check if primitives already exist in backward +class TestMKLDNNReluPrimitivesAlreadyExist(unittest.TestCase): + def __assert_close(self, tensor, np_array, msg, atol=1e-4): + self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) + + def test_check_forward_backward(self): + place = core.CPUPlace() + + np.random.seed(123) + x = np.random.uniform(-1, 1, [2, 2]).astype(np.float32) + out = np.abs(x) + + out_grad = np.random.random_sample(x.shape).astype(np.float32) + x_grad = out_grad * np.sign(x) # Abs grad calculation + + var_dict = {'x':x, 'out':out, 'out@GRAD':out_grad, 'x@GRAD':x_grad} + var_names = list(var_dict.keys()) + ground_truth = {name: var_dict[name] for name in var_names} + + program = fluid.Program() + with fluid.program_guard(program): + block = program.global_block() + for name in ground_truth: + block.create_var( + name=name, + dtype='float32', + shape=ground_truth[name].shape) + + relu_op = block.append_op( + type="abs", + inputs={"X": block.var('x'),}, + outputs={"Out": block.var('out') }, + attrs={"use_mkldnn": True}) + + # Generate backward op_desc + grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc( + relu_op.desc, set(), []) + grad_op_desc = grad_op_desc_list[0] + new_op_desc = block.desc.append_op() + new_op_desc.copy_from(grad_op_desc) + for var_name in grad_op_desc.output_arg_names(): + block.desc.var(var_name.encode("ascii")) + grad_op_desc.infer_var_type(block.desc) + grad_op_desc.infer_shape(block.desc) + for arg in grad_op_desc.output_arg_names(): + grad_var = block.desc.find_var(arg.encode("ascii")) + grad_var.set_dtype(core.VarDesc.VarType.FP32) + + exe = fluid.Executor(place) + + # Do at least 2 iterations + for i in range(2): + out = exe.run(program, + feed={name: var_dict[name] for name in ['x', 'out@GRAD']}, + fetch_list=['x@GRAD']) + + self.__assert_close(x_grad, out[0], "x@GRAD") + + if __name__ == '__main__': unittest.main() From 543e53db05bc52aa727182267e61efc73205b186 Mon Sep 17 00:00:00 2001 From: Sylwester Fraczek Date: Thu, 21 Feb 2019 11:15:44 +0100 Subject: [PATCH 104/117] fix typo releated->related --- paddle/fluid/framework/op_proto_maker.h | 2 +- paddle/fluid/inference/api/analysis_config.cc | 6 +++--- paddle/fluid/inference/api/paddle_analysis_config.h | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/framework/op_proto_maker.h b/paddle/fluid/framework/op_proto_maker.h index 0a0f8f4655..5f3ce60e1d 100644 --- a/paddle/fluid/framework/op_proto_maker.h +++ b/paddle/fluid/framework/op_proto_maker.h @@ -27,7 +27,7 @@ enum class OpRole { kForward = 0x0000, kBackward = 0x0001, kOptimize = 0x0002, - // RPC role is for send/recv releated op + // RPC role is for send/recv related op kRPC = 0x0004, // Dist role is for split_byref/split_selected_rows/concat // used for distributed training. diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index e92273b4dd..522ab49522 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -89,7 +89,7 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { CP_MEMBER(params_file_); CP_MEMBER(model_from_memory_); // the memory model reuses prog_file_ and // params_file_ fields. - // Gpu releated. + // Gpu related. CP_MEMBER(use_gpu_); CP_MEMBER(device_id_); CP_MEMBER(memory_pool_init_size_mb_); @@ -97,13 +97,13 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { CP_MEMBER(enable_memory_optim_); CP_MEMBER(static_memory_optim_); CP_MEMBER(static_memory_optim_force_update_); - // TensorRT releated. + // TensorRT related. CP_MEMBER(use_tensorrt_); CP_MEMBER(tensorrt_workspace_size_); CP_MEMBER(tensorrt_max_batchsize_); CP_MEMBER(tensorrt_min_subgraph_size_); CP_MEMBER(tensorrt_precision_mode_); - // MKLDNN releated. + // MKLDNN related. CP_MEMBER(use_mkldnn_); CP_MEMBER(mkldnn_enabled_op_types_); diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index 47361b3279..c1c6227cdd 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -212,12 +212,12 @@ struct AnalysisConfig { std::string prog_file_; std::string params_file_; - // GPU releated. + // GPU related. bool use_gpu_{false}; int device_id_{0}; uint64_t memory_pool_init_size_mb_{100}; // initial size is 100MB. - // TensorRT releated. + // TensorRT related. bool use_tensorrt_{false}; // For workspace_size, refer it from here: // https://docs.nvidia.com/deeplearning/sdk/tensorrt-developer-guide/index.html#troubleshooting From 8bc604571fea9283434b5fb47f29d1bff844e6bc Mon Sep 17 00:00:00 2001 From: Sylwester Fraczek Date: Thu, 21 Feb 2019 11:16:38 +0100 Subject: [PATCH 105/117] fix typo seriazlized->serialized --- paddle/fluid/inference/api/analysis_predictor.cc | 2 +- paddle/fluid/inference/api/analysis_predictor.h | 2 +- paddle/fluid/inference/api/analysis_predictor_tester.cc | 4 ++-- paddle/fluid/inference/api/paddle_api.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 712e010db4..cd6e958779 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -726,7 +726,7 @@ bool AnalysisPredictor::need_collect_var_shapes_for_memory_optim() { return need; } -std::string AnalysisPredictor::GetSeriazlizedProgram() const { +std::string AnalysisPredictor::GetSerializedProgram() const { return inference_program_->Proto()->SerializeAsString(); } diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index 014df4ee8b..d5445c58e4 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -74,7 +74,7 @@ class AnalysisPredictor : public PaddlePredictor { void SetMkldnnThreadID(int tid); - std::string GetSeriazlizedProgram() const override; + std::string GetSerializedProgram() const override; protected: // For memory optimization. diff --git a/paddle/fluid/inference/api/analysis_predictor_tester.cc b/paddle/fluid/inference/api/analysis_predictor_tester.cc index 002ba90e40..6696839b53 100644 --- a/paddle/fluid/inference/api/analysis_predictor_tester.cc +++ b/paddle/fluid/inference/api/analysis_predictor_tester.cc @@ -214,8 +214,8 @@ TEST(AnalysisPredictor, memory_optim) { { // The first predictor help to cache the memory optimize strategy. auto predictor = CreatePaddlePredictor(config); - LOG(INFO) << "serialized program: " << predictor->GetSeriazlizedProgram(); - ASSERT_FALSE(predictor->GetSeriazlizedProgram().empty()); + LOG(INFO) << "serialized program: " << predictor->GetSerializedProgram(); + ASSERT_FALSE(predictor->GetSerializedProgram().empty()); // Run several times to check the parameters are not reused by mistake. for (int i = 0; i < 5; i++) { diff --git a/paddle/fluid/inference/api/paddle_api.h b/paddle/fluid/inference/api/paddle_api.h index f90a74b910..c9a45b4aa3 100644 --- a/paddle/fluid/inference/api/paddle_api.h +++ b/paddle/fluid/inference/api/paddle_api.h @@ -248,7 +248,7 @@ class PaddlePredictor { /** \brief Get the serialized model program that executes in inference phase. * Its data type is ProgramDesc, which is a protobuf message. */ - virtual std::string GetSeriazlizedProgram() const { + virtual std::string GetSerializedProgram() const { assert(false); // Force raise error. return "NotImplemented"; } From 0b926114c0e8b4a1b39b07d931bd59e9c86505ed Mon Sep 17 00:00:00 2001 From: Sylwester Fraczek Date: Thu, 21 Feb 2019 14:20:47 +0100 Subject: [PATCH 106/117] add override to ApplyImpl and #pragma once in edited headers add #include in edited headers test=develop --- paddle/fluid/framework/ir/attention_lstm_fuse_pass.h | 3 ++- paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.h | 6 ++++-- paddle/fluid/framework/ir/conv_bn_fuse_pass.h | 6 ++++-- .../framework/ir/conv_elementwise_add2_act_fuse_pass.h | 3 ++- .../framework/ir/conv_elementwise_add_act_fuse_pass.h | 3 ++- .../fluid/framework/ir/conv_elementwise_add_fuse_pass.h | 3 ++- paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.h | 5 ++++- paddle/fluid/framework/ir/fc_fuse_pass.h | 5 ++++- paddle/fluid/framework/ir/fc_gru_fuse_pass.h | 6 ++++-- paddle/fluid/framework/ir/fc_lstm_fuse_pass.h | 8 ++++++-- paddle/fluid/framework/ir/fuse_elewise_add_act_pass.h | 3 ++- paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.h | 3 ++- paddle/fluid/framework/ir/identity_scale_op_clean_pass.h | 3 ++- paddle/fluid/framework/ir/lock_free_optimize_pass.h | 3 ++- .../framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.h | 3 ++- paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.h | 3 ++- paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.h | 5 ++++- paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.h | 3 ++- paddle/fluid/framework/ir/seqpool_concat_fuse_pass.h | 3 ++- paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.h | 3 ++- .../framework/ir/transpose_flatten_concat_fuse_pass.h | 3 ++- 21 files changed, 58 insertions(+), 25 deletions(-) diff --git a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.h b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.h index a756dfc1b9..39b0585d3a 100644 --- a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.h +++ b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.h @@ -22,7 +22,8 @@ namespace ir { class AttentionLSTMFusePass : public FusePassBase { protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; }; } // namespace ir diff --git a/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.h b/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.h index ad966e11e6..8c3c8b56c0 100644 --- a/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.h +++ b/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.h @@ -31,7 +31,8 @@ class ConvAffineChannelFusePass : public FusePassBase { virtual ~ConvAffineChannelFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"conv_affine_channel_fuse"}; }; @@ -40,7 +41,8 @@ class ConvEltwiseAddAffineChannelFusePass : public FusePassBase { virtual ~ConvEltwiseAddAffineChannelFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"conv_eltwiseadd_affine_channel_fuse"}; }; diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass.h b/paddle/fluid/framework/ir/conv_bn_fuse_pass.h index 2c9eb574fe..cf425a2730 100644 --- a/paddle/fluid/framework/ir/conv_bn_fuse_pass.h +++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass.h @@ -31,7 +31,8 @@ class ConvBNFusePass : public FusePassBase { virtual ~ConvBNFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"conv_bn_fuse"}; }; @@ -40,7 +41,8 @@ class ConvEltwiseAddBNFusePass : public FusePassBase { virtual ~ConvEltwiseAddBNFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"conv_eltwiseadd_bn_fuse"}; }; diff --git a/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.h b/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.h index 3b40a5a926..9259a4ac5c 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.h +++ b/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.h @@ -25,7 +25,8 @@ class ConvElementwiseAdd2ActFusePass : public FusePassBase { virtual ~ConvElementwiseAdd2ActFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; }; } // namespace ir diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.h b/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.h index ac69aa6458..9c0b50f155 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.h +++ b/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.h @@ -25,7 +25,8 @@ class ConvElementwiseAddActFusePass : public FusePassBase { virtual ~ConvElementwiseAddActFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; }; } // namespace ir diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.h b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.h index f234603f58..bf43bd5ce2 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.h +++ b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.h @@ -25,7 +25,8 @@ class ConvElementwiseAddFusePass : public FusePassBase { virtual ~ConvElementwiseAddFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; }; } // namespace ir diff --git a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.h b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.h index e5ad3067ec..fde2a0a4ee 100644 --- a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.h +++ b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.h @@ -14,6 +14,8 @@ #pragma once +#include + #include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph_pattern_detector.h" @@ -30,7 +32,8 @@ class EmbeddingFCLSTMFusePass : public FusePassBase { virtual ~EmbeddingFCLSTMFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"embedding_fc_lstm_fuse"}; }; diff --git a/paddle/fluid/framework/ir/fc_fuse_pass.h b/paddle/fluid/framework/ir/fc_fuse_pass.h index 6c69539d1e..783a052edc 100644 --- a/paddle/fluid/framework/ir/fc_fuse_pass.h +++ b/paddle/fluid/framework/ir/fc_fuse_pass.h @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#pragma once + #include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph_pattern_detector.h" @@ -29,7 +31,8 @@ class FCFusePass : public FusePassBase { virtual ~FCFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; }; } // namespace ir diff --git a/paddle/fluid/framework/ir/fc_gru_fuse_pass.h b/paddle/fluid/framework/ir/fc_gru_fuse_pass.h index 63e1c72bfb..e359a32894 100644 --- a/paddle/fluid/framework/ir/fc_gru_fuse_pass.h +++ b/paddle/fluid/framework/ir/fc_gru_fuse_pass.h @@ -30,7 +30,8 @@ class FCGRUFusePass : public FusePassBase { virtual ~FCGRUFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"fc_gru_fuse"}; }; @@ -41,7 +42,8 @@ class MulGRUFusePass : public FusePassBase { virtual ~MulGRUFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"fc_nobias_gru_fuse"}; }; diff --git a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.h b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.h index 3ee32c63a4..21482615a6 100644 --- a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.h +++ b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.h @@ -14,6 +14,8 @@ #pragma once +#include + #include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph_pattern_detector.h" @@ -30,7 +32,8 @@ class FCLstmFusePass : public FusePassBase { virtual ~FCLstmFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"fc_lstm_fuse"}; }; @@ -40,7 +43,8 @@ class MulLstmFusePass : public FusePassBase { virtual ~MulLstmFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"fc_nobias_lstm_fuse"}; }; diff --git a/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.h b/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.h index b2fecc076e..0fee527447 100644 --- a/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.h +++ b/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.h @@ -32,7 +32,8 @@ class FuseElewiseAddActPass : public FusePassBase { virtual ~FuseElewiseAddActPass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; std::unique_ptr FuseElewiseAddAct( std::unique_ptr graph, diff --git a/paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.h b/paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.h index 6bd653775e..efb49b8300 100644 --- a/paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.h +++ b/paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.h @@ -32,7 +32,8 @@ class FuseReluDepthwiseConvPass : public FusePassBase { virtual ~FuseReluDepthwiseConvPass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; std::unique_ptr FuseReluDepthwiseConv( std::unique_ptr graph, bool only_forward) const; }; diff --git a/paddle/fluid/framework/ir/identity_scale_op_clean_pass.h b/paddle/fluid/framework/ir/identity_scale_op_clean_pass.h index 50a654d82f..6da592561d 100644 --- a/paddle/fluid/framework/ir/identity_scale_op_clean_pass.h +++ b/paddle/fluid/framework/ir/identity_scale_op_clean_pass.h @@ -22,7 +22,8 @@ namespace ir { class IdentityScaleOpCleanPass : public FusePassBase { protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; private: virtual ~IdentityScaleOpCleanPass() = default; diff --git a/paddle/fluid/framework/ir/lock_free_optimize_pass.h b/paddle/fluid/framework/ir/lock_free_optimize_pass.h index 7310f596f8..f9157b10d9 100644 --- a/paddle/fluid/framework/ir/lock_free_optimize_pass.h +++ b/paddle/fluid/framework/ir/lock_free_optimize_pass.h @@ -60,7 +60,8 @@ class LockFreeOptimizePass : public Pass { virtual ~LockFreeOptimizePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; private: // Create a new sgd node via current optimizer node diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.h b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.h index f3ad9f1c2b..0ef5c177bf 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.h +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.h @@ -29,7 +29,8 @@ class ConvBiasFusePass : public FusePassBase { virtual bool is_conv3d() const { return false; } protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"conv_bias_mkldnn_fuse"}; }; /* diff --git a/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.h b/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.h index 3f3f0846eb..ede0bea07f 100644 --- a/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.h +++ b/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.h @@ -31,7 +31,8 @@ class RepeatedFCReluFusePass : public FusePassBase { virtual ~RepeatedFCReluFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"repeated_fc_relu_fuse"}; }; diff --git a/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.h b/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.h index 9f5fd1a29a..06e18f9dc3 100644 --- a/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.h +++ b/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.h @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#pragma once + #include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/pass.h" @@ -25,7 +27,8 @@ class SeqConcatFcFusePass : public FusePassBase { virtual ~SeqConcatFcFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; }; } // namespace ir diff --git a/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.h b/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.h index dac9de7193..c36c6b76a2 100644 --- a/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.h +++ b/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.h @@ -28,7 +28,8 @@ class SeqConvEltAddReluFusePass : public FusePassBase { virtual ~SeqConvEltAddReluFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"seqconv_eltadd_relu_fuse"}; }; diff --git a/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.h b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.h index ba2154045e..a5db3528da 100644 --- a/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.h +++ b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.h @@ -42,7 +42,8 @@ class SeqPoolConcatFusePass : public FusePassBase { virtual ~SeqPoolConcatFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"seqpool_concat_fuse"}; }; diff --git a/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.h b/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.h index fb49adc376..c21ba65c40 100644 --- a/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.h +++ b/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.h @@ -31,7 +31,8 @@ class SquaredMatSubFusePass : public FusePassBase { virtual ~SquaredMatSubFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; const std::string name_scope_{"squared_mat_sub_fuse"}; }; diff --git a/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.h b/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.h index fb0f0ae9ef..a7d18ec86d 100644 --- a/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.h +++ b/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.h @@ -30,7 +30,8 @@ class TransposeFlattenConcatFusePass : public FusePassBase { virtual ~TransposeFlattenConcatFusePass() {} protected: - std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; }; } // namespace ir From 1943119fc5f98f6b552ebb6d180346b9c27adb8e Mon Sep 17 00:00:00 2001 From: Sylwester Fraczek Date: Thu, 21 Feb 2019 12:58:40 +0100 Subject: [PATCH 107/117] fix typo memeroy->memory test=develop --- paddle/fluid/inference/api/analysis_predictor.cc | 2 +- paddle/fluid/inference/api/api_impl.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index cd6e958779..e8964c4ace 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -392,7 +392,7 @@ std::unique_ptr CreatePaddlePredictor< AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig &config) { VLOG(3) << "create AnalysisConfig"; if (config.use_gpu()) { - // 1. GPU memeroy + // 1. GPU memory PADDLE_ENFORCE_GT(config.memory_pool_init_size_mb(), 0.f); PADDLE_ENFORCE_GE(config.gpu_device_id(), 0, "Invalid device id %d", config.gpu_device_id()); diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index e18bc02d92..97c164bdef 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -290,7 +290,7 @@ std::unique_ptr CreatePaddlePredictor< NativeConfig, PaddleEngineKind::kNative>(const NativeConfig &config) { VLOG(3) << "create NativePaddlePredictor"; if (config.use_gpu) { - // 1. GPU memeroy + // 1. GPU memory PADDLE_ENFORCE_GE( config.fraction_of_gpu_memory, 0.f, "fraction_of_gpu_memory in the config should be set to range (0., 1.]"); From a9bee3a2e28ee2cbd11ec1447c09d21c3c993cb3 Mon Sep 17 00:00:00 2001 From: Sylwester Fraczek Date: Wed, 20 Feb 2019 18:02:02 +0100 Subject: [PATCH 108/117] update AUTHORS.md add sfraczek add wojtuss test=develop --- AUTHORS.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/AUTHORS.md b/AUTHORS.md index deafa64120..da91933f46 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -44,6 +44,7 @@ | qingqing01 | Qing-Qing Dang | | reyoung | Yang Yu | | Sand3r- | Michal Gallus | +| sfraczek | Sylwester Fraczek | | Superjom | Chun-Wei Yan | | tensor-tang | Jian Tang | | tianbingsz | Tian-Bing Xu | @@ -54,6 +55,7 @@ | wangyang59 | Yang Wang | | wangzhen-nlp | Zhen Wang | | wen-bo-yang | Wen-Bo Yang | +| wojtuss | Wojciech Uss | | wwhu | Wei-Wei Hu | | xinghai-sun | Xing-Hai Sun | | Xreki | Yi-Qun Liu | From 309ea6f2debdc2821af6cc2a904697bf32ad0730 Mon Sep 17 00:00:00 2001 From: Krzysztof Binias Date: Thu, 21 Feb 2019 15:44:10 +0100 Subject: [PATCH 109/117] Fix for pylint Failed test=develop --- .../mkldnn/test_activation_mkldnn_op.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py index 4c211ef68b..0f301de47f 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py @@ -110,9 +110,9 @@ class TestMKLDNNReluPrimitivesAlreadyExist(unittest.TestCase): out = np.abs(x) out_grad = np.random.random_sample(x.shape).astype(np.float32) - x_grad = out_grad * np.sign(x) # Abs grad calculation + x_grad = out_grad * np.sign(x) # Abs grad calculation - var_dict = {'x':x, 'out':out, 'out@GRAD':out_grad, 'x@GRAD':x_grad} + var_dict = {'x': x, 'out': out, 'out@GRAD': out_grad, 'x@GRAD': x_grad} var_names = list(var_dict.keys()) ground_truth = {name: var_dict[name] for name in var_names} @@ -121,14 +121,12 @@ class TestMKLDNNReluPrimitivesAlreadyExist(unittest.TestCase): block = program.global_block() for name in ground_truth: block.create_var( - name=name, - dtype='float32', - shape=ground_truth[name].shape) - + name=name, dtype='float32', shape=ground_truth[name].shape) + relu_op = block.append_op( type="abs", - inputs={"X": block.var('x'),}, - outputs={"Out": block.var('out') }, + inputs={"X": block.var('x'), }, + outputs={"Out": block.var('out')}, attrs={"use_mkldnn": True}) # Generate backward op_desc @@ -146,11 +144,13 @@ class TestMKLDNNReluPrimitivesAlreadyExist(unittest.TestCase): grad_var.set_dtype(core.VarDesc.VarType.FP32) exe = fluid.Executor(place) - + # Do at least 2 iterations for i in range(2): - out = exe.run(program, - feed={name: var_dict[name] for name in ['x', 'out@GRAD']}, + out = exe.run( + program, + feed={name: var_dict[name] + for name in ['x', 'out@GRAD']}, fetch_list=['x@GRAD']) self.__assert_close(x_grad, out[0], "x@GRAD") From e3dd6970fcbc9ae084558c3b3b4b83bc8ab6dc0c Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Thu, 21 Feb 2019 23:21:35 +0800 Subject: [PATCH 110/117] disable dam temporarily (#15860) test=develop --- paddle/fluid/inference/tests/api/CMakeLists.txt | 5 ++++- paddle/fluid/platform/CMakeLists.txt | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 7ecd9e3533..55ab04bfe1 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -60,10 +60,13 @@ set(RNN2_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/rnn2") download_model_and_data(${RNN2_INSTALL_DIR} "rnn2_model.tar.gz" "rnn2_data.txt.tar.gz") inference_analysis_api_test(test_analyzer_rnn2 ${RNN2_INSTALL_DIR} analyzer_rnn2_tester.cc) +# TODO(luotao, Superjom) Disable DAM test, temporarily fix +# https://github.com/PaddlePaddle/Paddle/issues/15032#issuecomment-455990914. +# After inference framework refactor, will reopen it. # normal DAM set(DAM_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/dam") download_model_and_data(${DAM_INSTALL_DIR} "DAM_model.tar.gz" "DAM_data.txt.tar.gz") -inference_analysis_api_test(test_analyzer_dam ${DAM_INSTALL_DIR} analyzer_dam_tester.cc EXTRA_DEPS legacy_allocator SERIAL) +#inference_analysis_api_test(test_analyzer_dam ${DAM_INSTALL_DIR} analyzer_dam_tester.cc EXTRA_DEPS legacy_allocator SERIAL) # small DAM set(DAM_SMALL_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/small_dam") diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 5833fee35b..b7e84031e7 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -87,7 +87,7 @@ nv_test(transform_test SRCS transform_test.cu DEPS memory place device_context) cc_library(timer SRCS timer.cc) cc_test(timer_test SRCS timer_test.cc DEPS timer) -cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto framework_proto ${GPU_CTX_DEPS}) +cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto framework_proto device_context ${GPU_CTX_DEPS}) if(WITH_GPU) nv_library(profiler SRCS profiler.cc profiler.cu DEPS device_context device_tracer) else() From 006c32f93d71091591725f0f6dc6afde33e3545f Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Tue, 19 Feb 2019 14:38:28 +0800 Subject: [PATCH 111/117] polish parameter names parameters within a Layer instance should be unique. test=develop --- python/paddle/fluid/imperative/layers.py | 27 +++++++++-- python/paddle/fluid/imperative/nn.py | 37 +++++++------- python/paddle/fluid/layer_helper.py | 3 ++ .../fluid/tests/unittests/test_base_layer.py | 37 ++++++++------ .../fluid/tests/unittests/test_imperative.py | 47 +++++++++--------- .../tests/unittests/test_imperative_gan.py | 30 ++++++------ .../unittests/test_imperative_optimizer.py | 20 ++++---- .../unittests/test_imperative_ptb_rnn.py | 10 +++- .../tests/unittests/test_imperative_resnet.py | 48 ++++++++++++++----- 9 files changed, 161 insertions(+), 98 deletions(-) diff --git a/python/paddle/fluid/imperative/layers.py b/python/paddle/fluid/imperative/layers.py index 59fe6bbf74..46640ce37a 100644 --- a/python/paddle/fluid/imperative/layers.py +++ b/python/paddle/fluid/imperative/layers.py @@ -17,7 +17,7 @@ import contextlib import sys import numpy as np import collections - +from .. import unique_name from paddle.fluid import core from paddle.fluid import framework from paddle.fluid.imperative import base @@ -26,14 +26,33 @@ __all__ = ['Layer', 'PyLayer'] class Layer(core.Layer): - """Layers composed of operators.""" - - def __init__(self, dtype=core.VarDesc.VarType.FP32, name=None): + """Layers composed of operators. + + Args: + name_scope: prefix name used by the layer to name parameters. + If prefix is "my_model/layer_1", parameter name in MyLayer + can be "my_model/layer_1/MyLayer/w_n", where w is the parameter + base name and n is an unique suffix auto-generated. + dtype: data type for the variables in the layer. + """ + + def __init__(self, name_scope, dtype=core.VarDesc.VarType.FP32): + self._full_name = unique_name.generate(name_scope + "/" + + self.__class__.__name__) self._built = False self._dtype = dtype self._parameters = collections.OrderedDict() self._sub_layers = collections.OrderedDict() + def full_name(self): + """Full name for this layers. + + Full name is composed by name_scope + "/" + MyLayer.__class__.__name__ + + Returns full name of this name. + """ + return self._full_name + def parameters(self, include_sublayers=True): """Returns a list of Parameters from current and sub-layers. diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index c86a373ae4..41655c4f54 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -27,6 +27,7 @@ __all__ = ['Conv2D', 'Pool2D', 'FC', 'BatchNorm', 'Embedding'] class Conv2D(layers.Layer): def __init__(self, + name_scope, num_channels, num_filters, filter_size, @@ -38,19 +39,17 @@ class Conv2D(layers.Layer): act=None, param_attr=None, bias_attr=None, - name=None, dtype=core.VarDesc.VarType.FP32): assert param_attr is not False, "param_attr should not be False here." - super(Conv2D, self).__init__(name=name, dtype=dtype) + super(Conv2D, self).__init__(name_scope, dtype=dtype) # TODO(minqiyang): Move this to the top. from ..layer_helper import LayerHelper self._helper = LayerHelper( - type(self).__name__, + self.full_name(), param_attr=param_attr, bias_attr=bias_attr, dtype=dtype, - name=name, act=act) self._groups = groups @@ -143,6 +142,7 @@ class Conv2D(layers.Layer): class Pool2D(layers.Layer): def __init__(self, + name_scope, pool_size=-1, pool_type="max", pool_stride=1, @@ -151,7 +151,6 @@ class Pool2D(layers.Layer): use_cudnn=True, ceil_mode=False, exclusive=True, - name=None, dtype=core.VarDesc.VarType.FP32): if pool_type not in ["max", "avg"]: raise ValueError( @@ -166,10 +165,10 @@ class Pool2D(layers.Layer): if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") - super(Pool2D, self).__init__(name=name, dtype=dtype) + super(Pool2D, self).__init__(name_scope, dtype=dtype) from ..layer_helper import LayerHelper - self._helper = LayerHelper(type(self).__name__, dtype=dtype, name=name) + self._helper = LayerHelper(self.full_name(), dtype=dtype) self._pool_type = pool_type self._pool_size = utils.convert_to_list(pool_size, 2, 'pool_size') @@ -205,25 +204,24 @@ class Pool2D(layers.Layer): class FC(layers.Layer): def __init__(self, + name_scope, size, param_attr=None, bias_attr=None, num_flatten_dims=1, dtype=core.VarDesc.VarType.FP32, - act=None, - name=None): - super(FC, self).__init__() + act=None): + super(FC, self).__init__(name_scope) self._size = size self._num_flatten_dims = num_flatten_dims self._dtype = dtype from ..layer_helper import LayerHelper self._helper = LayerHelper( - 'FC', + self.full_name(), param_attr=param_attr, bias_attr=bias_attr, - act=act, - name=name) + act=act) def _build_once(self, input): input_shape = input.shape @@ -282,6 +280,7 @@ class FC(layers.Layer): class BatchNorm(layers.Layer): def __init__(self, + name_scope, num_channels, act=None, is_test=False, @@ -292,22 +291,20 @@ class BatchNorm(layers.Layer): dtype=core.VarDesc.VarType.FP32, data_layout='NCHW', in_place=False, - name=None, moving_mean_name=None, moving_variance_name=None, do_model_average_for_mean_and_var=False, fuse_with_relu=False, use_global_stats=False): - super(BatchNorm, self).__init__() + super(BatchNorm, self).__init__(name_scope) assert bias_attr is not False, "bias_attr should not be False in batch_norm." from ..layer_helper import LayerHelper self._helper = LayerHelper( - 'batch_norm', + self.full_name(), param_attr=param_attr, bias_attr=bias_attr, - name=name, act=act) if dtype == core.VarDesc.VarType.FP16: @@ -419,6 +416,7 @@ class Embedding(layers.Layer): constructor. Args: + name_scope: See base class. size(tuple|list): The shape of the look up table parameter. It should have two elements which indicate the size of the dictionary of embeddings and the size of each embedding vector respectively. @@ -446,6 +444,7 @@ class Embedding(layers.Layer): """ def __init__(self, + name_scope, size, is_sparse=False, is_distributed=False, @@ -453,7 +452,7 @@ class Embedding(layers.Layer): param_attr=None, dtype='float32'): - super(Embedding, self).__init__() + super(Embedding, self).__init__(name_scope) self._size = size self._is_sparse = is_sparse self._is_distributed = is_distributed @@ -468,7 +467,7 @@ class Embedding(layers.Layer): assert self._is_sparse is True and self._is_distributed is False from ..layer_helper import LayerHelper - self._helper = LayerHelper('embedding', param_attr=param_attr) + self._helper = LayerHelper(self.full_name(), param_attr=param_attr) self._w = self._helper.create_parameter( attr=self._param_attr, shape=self._size, diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index 7d1636774c..65864ca7e0 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -34,6 +34,9 @@ class LayerHelper(object): self.kwargs = kwargs self.layer_type = layer_type name = self.kwargs.get('name', None) + # TODO(panyx0718, minqiyang): imperative mode + # can not use both `layer_type` and `name`. Deprecate LayerHelper + # and write a Helper for imperative mode. if name is None: self.kwargs['name'] = unique_name.generate(self.layer_type) diff --git a/python/paddle/fluid/tests/unittests/test_base_layer.py b/python/paddle/fluid/tests/unittests/test_base_layer.py index bf00698d63..caf9750e58 100644 --- a/python/paddle/fluid/tests/unittests/test_base_layer.py +++ b/python/paddle/fluid/tests/unittests/test_base_layer.py @@ -20,10 +20,10 @@ from paddle.fluid.layer_helper import LayerHelper class L1(fluid.imperative.Layer): - def __init__(self): - super(L1, self).__init__() + def __init__(self, prefix): + super(L1, self).__init__(prefix) self._helper = LayerHelper( - 'MyLayer', + self.full_name(), param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.1))) @@ -43,20 +43,20 @@ class L1(fluid.imperative.Layer): class L2(fluid.imperative.Layer): - def __init__(self): - super(L2, self).__init__() - self.layer1 = L1() - self.layer2 = L1() + def __init__(self, prefix): + super(L2, self).__init__(prefix) + self.layer1 = L1(self.full_name()) + self.layer2 = L1(self.full_name()) def forward(self): return self.layer1() + self.layer2() class L3(fluid.imperative.Layer): - def __init__(self): - super(L3, self).__init__() - self.layer1 = L2() - self.layer2 = L2() + def __init__(self, prefix): + super(L3, self).__init__(prefix) + self.layer1 = L2(self.full_name()) + self.layer2 = L2(self.full_name()) def forward(self): return self.layer1() + self.layer2() @@ -65,16 +65,23 @@ class L3(fluid.imperative.Layer): class TestBaseLayer(unittest.TestCase): def test_one_level(self): with fluid.imperative.guard(): - l = L1() + l = L1('test_one_level') ret = l() - self.assertEqual(l.w1.name, "MyLayer_0.w_0") - self.assertEqual(l.w2.name, "MyLayer_0.w_1") + self.assertEqual(l.w1.name, "test_one_level/L1_0_0.w_0") + self.assertEqual(l.w2.name, "test_one_level/L1_0_0.w_1") self.assertTrue(np.allclose(ret._numpy(), 0.2 * np.ones([2, 2]))) def test_three_level(self): with fluid.imperative.guard(): - l = L3() + l = L3('test_three_level') + names = [p.name for p in l.parameters()] ret = l() + self.assertEqual(names[0], "test_three_level/L3_0/L2_0/L1_0_0.w_0") + self.assertEqual(names[1], "test_three_level/L3_0/L2_0/L1_0_0.w_1") + self.assertEqual(names[2], "test_three_level/L3_0/L2_0/L1_1_0.w_0") + self.assertEqual(names[3], "test_three_level/L3_0/L2_0/L1_1_0.w_1") + self.assertEqual(names[4], "test_three_level/L3_0/L2_1/L1_0_0.w_0") + self.assertEqual(names[5], "test_three_level/L3_0/L2_1/L1_0_0.w_1") self.assertTrue(np.allclose(ret._numpy(), 0.8 * np.ones([2, 2]))) diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index c54e998ea8..dae0c466ee 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -15,7 +15,6 @@ import contextlib import unittest import numpy as np -import sys import paddle.fluid as fluid from paddle.fluid import core @@ -24,8 +23,8 @@ from test_imperative_base import new_program_scope class MyLayer(fluid.imperative.Layer): - def __init__(self): - super(MyLayer, self).__init__() + def __init__(self, name_scope): + super(MyLayer, self).__init__(name_scope) def forward(self, inputs): x = fluid.layers.relu(inputs) @@ -50,12 +49,14 @@ class MyPyLayer(fluid.imperative.PyLayer): class MLP(fluid.imperative.Layer): - def __init__(self): - super(MLP, self).__init__() - self._fc1 = FC(3, + def __init__(self, name_scope): + super(MLP, self).__init__(name_scope) + self._fc1 = FC(self.full_name(), + 3, fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.1))) - self._fc2 = FC(4, + self._fc2 = FC(self.full_name(), + 4, fluid.ParamAttr( initializer=fluid.initializer.Constant(value=0.1))) @@ -67,8 +68,9 @@ class MLP(fluid.imperative.Layer): class SimpleRNNCell(fluid.imperative.Layer): - def __init__(self, step_input_size, hidden_size, output_size, param_attr): - super(SimpleRNNCell, self).__init__() + def __init__(self, name_scope, step_input_size, hidden_size, output_size, + param_attr): + super(SimpleRNNCell, self).__init__(name_scope) self.step_input_size = step_input_size self.hidden_size = hidden_size self.output_size = output_size @@ -158,10 +160,11 @@ class SimpleRNNCell(fluid.imperative.Layer): class SimpleRNN(fluid.imperative.Layer): - def __init__(self): - super(SimpleRNN, self).__init__() + def __init__(self, name_scope): + super(SimpleRNN, self).__init__(name_scope) self.seq_len = 4 self._cell = SimpleRNNCell( + self.full_name(), 3, 3, 3, @@ -205,7 +208,7 @@ class TestImperative(unittest.TestCase): with fluid.imperative.guard(): cl = core.Layer() cl.forward([]) - l = fluid.imperative.Layer() + l = fluid.imperative.Layer("l") self.assertRaises(NotImplementedError, l.forward, []) def test_pylayer_func_id(self): @@ -281,7 +284,7 @@ class TestImperative(unittest.TestCase): np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32) with fluid.imperative.guard(): var_inp = fluid.imperative.base.to_variable(np_inp) - l = MyLayer() + l = MyLayer("my_layer") x = l(var_inp)[0] self.assertIsNotNone(x) dy_out = x._numpy() @@ -291,7 +294,7 @@ class TestImperative(unittest.TestCase): with new_program_scope(): inp = fluid.layers.data( name="inp", shape=[3], append_batch_size=False) - l = MyLayer() + l = MyLayer("my_layer") x = l(inp)[0] param_grads = fluid.backward.append_backward( x, parameter_list=[l._x_for_debug.name])[0] @@ -309,7 +312,7 @@ class TestImperative(unittest.TestCase): np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) with fluid.imperative.guard(): var_inp = fluid.imperative.base.to_variable(np_inp) - mlp = MLP() + mlp = MLP("mlp") out = mlp(var_inp) dy_out = out._numpy() out._backward() @@ -318,7 +321,7 @@ class TestImperative(unittest.TestCase): with new_program_scope(): inp = fluid.layers.data( name="inp", shape=[2, 2], append_batch_size=False) - mlp = MLP() + mlp = MLP("mlp") out = mlp(inp) param_grads = fluid.backward.append_backward( out, parameter_list=[mlp._fc1._w.name])[0] @@ -334,10 +337,10 @@ class TestImperative(unittest.TestCase): self.assertTrue(np.allclose(dy_grad, static_grad)) params = mlp.parameters(True) - self.assertEqual("FC_0.w_0", params[0].name) - self.assertEqual("FC_0.b_0", params[1].name) - self.assertEqual("FC_1.w_0", params[2].name) - self.assertEqual("FC_1.b_0", params[3].name) + self.assertEqual("mlp/MLP_0/FC_0_0.w_0", params[0].name) + self.assertEqual("mlp/MLP_0/FC_0_0.b_0", params[1].name) + self.assertEqual("mlp/MLP_0/FC_1_0.w_0", params[2].name) + self.assertEqual("mlp/MLP_0/FC_1_0.b_0", params[3].name) self.assertEqual(len(params), 4) sublayers = mlp.sublayers(True) @@ -353,7 +356,7 @@ class TestImperative(unittest.TestCase): with fluid.imperative.guard(): var_inp = fluid.imperative.base.to_variable(np_inp) var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3]) - simple_rnn = SimpleRNN() + simple_rnn = SimpleRNN("simple_rnn") outs, pre_hiddens = simple_rnn.forward(var_inp) dy_out = outs[3]._numpy() outs[3]._backward() @@ -364,7 +367,7 @@ class TestImperative(unittest.TestCase): with new_program_scope(): inp = fluid.layers.data( name="inp", shape=[1, 4, 3], append_batch_size=False) - simple_rnn = SimpleRNN() + simple_rnn = SimpleRNN("simple_rnn") outs, pre_hiddens = simple_rnn(inp) param_grads = fluid.backward.append_backward(outs[3]) exe = fluid.Executor(fluid.CPUPlace()) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gan.py b/python/paddle/fluid/tests/unittests/test_imperative_gan.py index 33c196d1ab..a80202d6dd 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gan.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gan.py @@ -28,10 +28,10 @@ from paddle.fluid.imperative.base import to_variable class Discriminator(fluid.imperative.Layer): - def __init__(self): - super(Discriminator, self).__init__() - self._fc1 = FC(size=32, act='elu', name="d_fc1") - self._fc2 = FC(size=1, name="d_fc2") + def __init__(self, name_scope): + super(Discriminator, self).__init__(name_scope) + self._fc1 = FC(self.full_name(), size=32, act='elu') + self._fc2 = FC(self.full_name(), size=1) def forward(self, inputs): x = self._fc1(inputs) @@ -39,11 +39,11 @@ class Discriminator(fluid.imperative.Layer): class Generator(fluid.imperative.Layer): - def __init__(self): - super(Generator, self).__init__() - self._fc1 = FC(size=64, act='elu', name="g_fc1") - self._fc2 = FC(size=64, act='elu', name="g_fc2") - self._fc3 = FC(size=1, name="g_fc3") + def __init__(self, name_scope): + super(Generator, self).__init__(name_scope) + self._fc1 = FC(self.full_name(), size=64, act='elu') + self._fc2 = FC(self.full_name(), size=64, act='elu') + self._fc3 = FC(self.full_name(), size=1) def forward(self, inputs): x = self._fc1(inputs) @@ -65,8 +65,8 @@ class TestImperativeMnist(unittest.TestCase): scope = fluid.core.Scope() with new_program_scope( main=discriminate_p, startup=startup, scope=scope): - discriminator = Discriminator() - generator = Generator() + discriminator = Discriminator("d") + generator = Generator("g") img = fluid.layers.data( name="img", shape=[2, 1], append_batch_size=False) @@ -93,8 +93,8 @@ class TestImperativeMnist(unittest.TestCase): sgd.minimize(d_loss) with new_program_scope(main=generate_p, startup=startup, scope=scope): - discriminator = Discriminator() - generator = Generator() + discriminator = Discriminator("d") + generator = Generator("g") noise = fluid.layers.data( name="noise", shape=[2, 2], append_batch_size=False) @@ -134,8 +134,8 @@ class TestImperativeMnist(unittest.TestCase): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - discriminator = Discriminator() - generator = Generator() + discriminator = Discriminator("d") + generator = Generator("g") sgd = SGDOptimizer(learning_rate=1e-3) d_real = discriminator(to_variable(np.ones([2, 1], np.float32))) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index 08b155acc6..780c6a6be5 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -28,6 +28,7 @@ from test_imperative_base import new_program_scope class SimpleImgConvPool(fluid.imperative.Layer): def __init__(self, + name_scope, num_channels, num_filters, filter_size, @@ -44,9 +45,10 @@ class SimpleImgConvPool(fluid.imperative.Layer): use_cudnn=False, param_attr=None, bias_attr=None): - super(SimpleImgConvPool, self).__init__() + super(SimpleImgConvPool, self).__init__(name_scope) self._conv2d = Conv2D( + self.full_name(), num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, @@ -59,6 +61,7 @@ class SimpleImgConvPool(fluid.imperative.Layer): use_cudnn=use_cudnn) self._pool2d = Pool2D( + self.full_name(), pool_size=pool_size, pool_type=pool_type, pool_stride=pool_stride, @@ -73,19 +76,20 @@ class SimpleImgConvPool(fluid.imperative.Layer): class MNIST(fluid.imperative.Layer): - def __init__(self, param_attr=None, bias_attr=None): - super(MNIST, self).__init__() + def __init__(self, name_scope, param_attr=None, bias_attr=None): + super(MNIST, self).__init__(name_scope) self._simple_img_conv_pool_1 = SimpleImgConvPool( - 1, 20, 5, 2, 2, act="relu") + self.full_name(), 1, 20, 5, 2, 2, act="relu") self._simple_img_conv_pool_2 = SimpleImgConvPool( - 20, 50, 5, 2, 2, act="relu") + self.full_name(), 20, 50, 5, 2, 2, act="relu") pool_2_shape = 50 * 4 * 4 SIZE = 10 scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5 - self._fc = FC(10, + self._fc = FC(self.full_name(), + 10, param_attr=fluid.param_attr.ParamAttr( initializer=fluid.initializer.NormalInitializer( loc=0.0, scale=scale)), @@ -106,7 +110,7 @@ class TestImperativeMnist(unittest.TestCase): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - mnist = MNIST() + mnist = MNIST("mnist") sgd = SGDOptimizer(learning_rate=1e-3) train_reader = paddle.batch( paddle.dataset.mnist.train(), batch_size=128) @@ -150,7 +154,7 @@ class TestImperativeMnist(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) - mnist = MNIST() + mnist = MNIST("mnist") sgd = SGDOptimizer(learning_rate=1e-3) train_reader = paddle.batch( paddle.dataset.mnist.train(), batch_size=128) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index 7cf3bf13d2..c8e42d5ede 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -28,12 +28,13 @@ from paddle.fluid.backward import append_backward class SimpleLSTMRNN(fluid.imperative.Layer): def __init__(self, + name_scope, hidden_size, num_steps, num_layers=2, init_scale=0.1, dropout=None): - super(SimpleLSTMRNN, self).__init__() + super(SimpleLSTMRNN, self).__init__(name_scope) self._hidden_size = hidden_size self._num_layers = num_layers self._init_scale = init_scale @@ -130,13 +131,14 @@ class SimpleLSTMRNN(fluid.imperative.Layer): class PtbModel(fluid.imperative.Layer): def __init__(self, + name_scope, hidden_size, vocab_size, num_layers=2, num_steps=20, init_scale=0.1, dropout=None): - super(PtbModel, self).__init__() + super(PtbModel, self).__init__(name_scope) self.hidden_size = hidden_size self.vocab_size = vocab_size self.init_scale = init_scale @@ -146,12 +148,14 @@ class PtbModel(fluid.imperative.Layer): from paddle.fluid.layer_helper import LayerHelper self._helper = LayerHelper('PtbModel', act="tanh") self.simple_lstm_rnn = SimpleLSTMRNN( + self.full_name(), hidden_size, num_steps, num_layers=num_layers, init_scale=init_scale, dropout=dropout) self.embedding = Embedding( + self.full_name(), size=[vocab_size, hidden_size], dtype='float32', is_sparse=False, @@ -226,6 +230,7 @@ class TestImperativePtbRnn(unittest.TestCase): fluid.default_main_program().random_seed = seed # TODO: marsyang1993 Change seed to ptb_model = PtbModel( + "ptb_model", hidden_size=hidden_size, vocab_size=vocab_size, num_layers=num_layers, @@ -265,6 +270,7 @@ class TestImperativePtbRnn(unittest.TestCase): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed ptb_model = PtbModel( + "ptb_model", hidden_size=hidden_size, vocab_size=vocab_size, num_layers=num_layers, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index 128d18621d..0e134742a7 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -70,15 +70,17 @@ def optimizer_setting(params): class ConvBNLayer(fluid.imperative.Layer): def __init__(self, + name_scope, num_channels, num_filters, filter_size, stride=1, groups=1, act=None): - super(ConvBNLayer, self).__init__() + super(ConvBNLayer, self).__init__(name_scope) self._conv = Conv2D( + self.full_name(), num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, @@ -88,7 +90,7 @@ class ConvBNLayer(fluid.imperative.Layer): act=None, bias_attr=None) - self._batch_norm = BatchNorm(num_filters, act=act) + self._batch_norm = BatchNorm(self.full_name(), num_filters, act=act) def forward(self, inputs): y = self._conv(inputs) @@ -98,21 +100,29 @@ class ConvBNLayer(fluid.imperative.Layer): class BottleneckBlock(fluid.imperative.Layer): - def __init__(self, num_channels, num_filters, stride, shortcut=True): - super(BottleneckBlock, self).__init__() + def __init__(self, + name_scope, + num_channels, + num_filters, + stride, + shortcut=True): + super(BottleneckBlock, self).__init__(name_scope) self.conv0 = ConvBNLayer( + self.full_name(), num_channels=num_channels, num_filters=num_filters, filter_size=1, act='relu') self.conv1 = ConvBNLayer( + self.full_name(), num_channels=num_filters, num_filters=num_filters, filter_size=3, stride=stride, act='relu') self.conv2 = ConvBNLayer( + self.full_name(), num_channels=num_filters, num_filters=num_filters * 4, filter_size=1, @@ -120,6 +130,7 @@ class BottleneckBlock(fluid.imperative.Layer): if not shortcut: self.short = ConvBNLayer( + self.full_name(), num_channels=num_channels, num_filters=num_filters * 4, filter_size=1, @@ -141,13 +152,13 @@ class BottleneckBlock(fluid.imperative.Layer): y = fluid.layers.elementwise_add(x=short, y=conv2) - layer_helper = LayerHelper('elementwise_add_activation', act='relu') + layer_helper = LayerHelper(self.full_name(), act='relu') return layer_helper.append_activation(y) class ResNet(fluid.imperative.Layer): - def __init__(self, layers=50, class_dim=102): - super(ResNet, self).__init__() + def __init__(self, name_scope, layers=50, class_dim=102): + super(ResNet, self).__init__(name_scope) self.layers = layers supported_layers = [50, 101, 152] @@ -163,9 +174,18 @@ class ResNet(fluid.imperative.Layer): num_filters = [64, 128, 256, 512] self.conv = ConvBNLayer( - num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu') + self.full_name(), + num_channels=3, + num_filters=64, + filter_size=7, + stride=2, + act='relu') self.pool2d_max = Pool2D( - pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') + self.full_name(), + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') self.bottleneck_block_list = [] num_channels = 64 @@ -175,6 +195,7 @@ class ResNet(fluid.imperative.Layer): bottleneck_block = self.add_sublayer( 'bb_%d_%d' % (block, i), BottleneckBlock( + self.full_name(), num_channels=num_channels, num_filters=num_filters[block], stride=2 if i == 0 and block != 0 else 1, @@ -184,12 +205,13 @@ class ResNet(fluid.imperative.Layer): shortcut = True self.pool2d_avg = Pool2D( - pool_size=7, pool_type='avg', global_pooling=True) + self.full_name(), pool_size=7, pool_type='avg', global_pooling=True) import math stdv = 1.0 / math.sqrt(2048 * 1.0) - self.out = FC(size=class_dim, + self.out = FC(self.full_name(), + size=class_dim, act='softmax', param_attr=fluid.param_attr.ParamAttr( initializer=fluid.initializer.Uniform(-stdv, stdv))) @@ -214,7 +236,7 @@ class TestImperativeResnet(unittest.TestCase): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - resnet = ResNet() + resnet = ResNet("resnet") optimizer = optimizer_setting(train_parameters) np.random.seed(seed) import random @@ -275,7 +297,7 @@ class TestImperativeResnet(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) - resnet = ResNet() + resnet = ResNet("resnet") optimizer = optimizer_setting(train_parameters) np.random.seed(seed) From 5d132ecf83890be8b728b3cf17a8a533a98b98c0 Mon Sep 17 00:00:00 2001 From: mozga-intel Date: Fri, 22 Feb 2019 03:28:27 +0100 Subject: [PATCH 112/117] Auto-cmake generator, auto-fill map (#15402) test=develop --- paddle/fluid/operators/ngraph/CMakeLists.txt | 1 + .../fluid/operators/ngraph/ngraph_bridge.cc | 39 ++------- paddle/fluid/operators/ngraph/ngraph_bridge.h | 9 +- .../fluid/operators/ngraph/ngraph_engine.cc | 6 +- paddle/fluid/operators/ngraph/ngraph_ops.h | 39 --------- .../fluid/operators/ngraph/ops/CMakeLists.txt | 8 ++ .../fluid/operators/ngraph/ops/accuracy_op.h | 3 + .../operators/ngraph/ops/activation_op.h | 4 + .../operators/ngraph/ops/batch_norm_op.h | 4 + .../operators/ngraph/ops/binary_unary_op.h | 5 ++ paddle/fluid/operators/ngraph/ops/conv2d_op.h | 4 + .../operators/ngraph/ops/cross_entropy_op.h | 4 + .../operators/ngraph/ops/elementwise_add_op.h | 4 + .../operators/ngraph/ops/fill_constant_op.h | 3 + paddle/fluid/operators/ngraph/ops/mean_op.h | 4 + .../fluid/operators/ngraph/ops/momentum_op.h | 3 + paddle/fluid/operators/ngraph/ops/mul_op.h | 4 + paddle/fluid/operators/ngraph/ops/op_bridge.h | 84 +++++++++++++++++++ paddle/fluid/operators/ngraph/ops/pool2d_op.h | 4 + paddle/fluid/operators/ngraph/ops/scale_op.h | 3 + .../fluid/operators/ngraph/ops/softmax_op.h | 4 + paddle/fluid/operators/ngraph/ops/top_k_op.h | 3 + 22 files changed, 158 insertions(+), 84 deletions(-) delete mode 100644 paddle/fluid/operators/ngraph/ngraph_ops.h create mode 100644 paddle/fluid/operators/ngraph/ops/CMakeLists.txt create mode 100644 paddle/fluid/operators/ngraph/ops/op_bridge.h diff --git a/paddle/fluid/operators/ngraph/CMakeLists.txt b/paddle/fluid/operators/ngraph/CMakeLists.txt index 6b256ef026..7559d29ce2 100644 --- a/paddle/fluid/operators/ngraph/CMakeLists.txt +++ b/paddle/fluid/operators/ngraph/CMakeLists.txt @@ -2,4 +2,5 @@ if(WITH_NGRAPH) cc_library(ngraph_bridge SRCS ngraph_bridge.cc DEPS operator framework_proto ngraph) cc_library(ngraph_engine SRCS ngraph_engine.cc DEPS ngraph_bridge framework_proto) op_library(ngraph_engine_op DEPS ngraph_engine op_registry op_info device_context) + add_subdirectory(ops) endif() diff --git a/paddle/fluid/operators/ngraph/ngraph_bridge.cc b/paddle/fluid/operators/ngraph/ngraph_bridge.cc index 4bfcba6c3c..996376c53f 100644 --- a/paddle/fluid/operators/ngraph/ngraph_bridge.cc +++ b/paddle/fluid/operators/ngraph/ngraph_bridge.cc @@ -19,50 +19,21 @@ limitations under the License. */ #include "ngraph/ngraph.hpp" #include "paddle/fluid/operators/ngraph/ngraph_bridge.h" #include "paddle/fluid/operators/ngraph/ngraph_ops.h" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { namespace operators { -namespace NG_OPS = paddle::operators::ngraphs; -std::map&, - std::shared_ptr>>)>> - NgraphBridge::NG_NODE_MAP = { - {"accuracy", NG_OPS::BuildAccuracyNode}, - {"conv2d", NG_OPS::BuildConv2dNode}, - {"conv2d_grad", NG_OPS::BuildConv2dGradNode}, - {"batch_norm", NG_OPS::BuildBatchNormNode}, - {"batch_norm_grad", NG_OPS::BuildBatchNormGradNode}, - {"cross_entropy", NG_OPS::BuildCrossEntropyNode}, - {"cross_entropy_grad", NG_OPS::BuildCrossEntropyGradNode}, - {"elementwise_add", NG_OPS::BuildElementwiseAddNode}, - {"elementwise_add_grad", NG_OPS::BuildElementwiseAddGradNode}, - {"fill_constant", NG_OPS::BuildFillConstantNode}, - {"mean", NG_OPS::BuildMeanNode}, - {"mean_grad", NG_OPS::BuildMeanGradNode}, - {"momentum", NG_OPS::BuildMomentumNode}, - {"mul", NG_OPS::BuildMulNode}, - {"mul_grad", NG_OPS::BuildMulGradNode}, - {"pool2d", NG_OPS::BuildPool2dNode}, - {"pool2d_grad", NG_OPS::BuildPool2dGradNode}, - {"softmax", NG_OPS::BuildSoftmaxNode}, - {"softmax_grad", NG_OPS::BuildSoftmaxGradNode}, - {"scale", NG_OPS::BuildScaleNode}, - {"sigmoid", NG_OPS::BuildUnaryNode}, - {"sum", NG_OPS::BuildSumNode}, - {"relu", NG_OPS::BuildUnaryNode}, - {"relu_grad", NG_OPS::BuildReluGradNode}, - {"tanh", NG_OPS::BuildUnaryNode}, - {"tanh_grad", NG_OPS::BuildTanhGradNode}, - {"top_k", NG_OPS::BuildTopKNode}}; +bool NgraphBridge::isRegister(const std::string& str) { + return ops::NgraphSingleton::Lookup(str); +} void NgraphBridge::BuildNgNode( const std::shared_ptr& op) { auto& op_type = op->Type(); - NG_NODE_MAP[op_type](op, ngb_node_map_); + ops::NgraphSingleton::BuildNode(ngb_node_map_, op, op_type); } } // namespace operators diff --git a/paddle/fluid/operators/ngraph/ngraph_bridge.h b/paddle/fluid/operators/ngraph/ngraph_bridge.h index c57988f8f6..952d5b0b43 100644 --- a/paddle/fluid/operators/ngraph/ngraph_bridge.h +++ b/paddle/fluid/operators/ngraph/ngraph_bridge.h @@ -28,13 +28,6 @@ namespace operators { class NgraphBridge { public: - static std::map< - std::string, - std::function&, - std::shared_ptr>>)>> - NG_NODE_MAP; - explicit NgraphBridge( std::shared_ptr< std::unordered_map>> @@ -43,6 +36,8 @@ class NgraphBridge { void BuildNgNode(const std::shared_ptr& op); + static bool isRegister(const std::string& str); + private: std::shared_ptr< std::unordered_map>> diff --git a/paddle/fluid/operators/ngraph/ngraph_engine.cc b/paddle/fluid/operators/ngraph/ngraph_engine.cc index bec4b514a2..660a3298cb 100644 --- a/paddle/fluid/operators/ngraph/ngraph_engine.cc +++ b/paddle/fluid/operators/ngraph/ngraph_engine.cc @@ -88,14 +88,12 @@ static std::vector> NgraphOpIntervals( int pivot = left; while (pivot < right) { auto op_type = ops.at(pivot)->Type(); - if (NgraphBridge::NG_NODE_MAP.find(op_type) == - NgraphBridge::NG_NODE_MAP.end()) { + if (NgraphBridge::isRegister(op_type)) { ++pivot; } else { int start = pivot, end = start; while (pivot < right && - (NgraphBridge::NG_NODE_MAP.find(ops.at(pivot)->Type()) != - NgraphBridge::NG_NODE_MAP.end())) { + (!NgraphBridge::isRegister(ops.at(pivot)->Type()))) { ++pivot; ++end; } diff --git a/paddle/fluid/operators/ngraph/ngraph_ops.h b/paddle/fluid/operators/ngraph/ngraph_ops.h deleted file mode 100644 index 8edb4dd2a1..0000000000 --- a/paddle/fluid/operators/ngraph/ngraph_ops.h +++ /dev/null @@ -1,39 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -/* - * This file contains the list of the ngraph operators for Paddle. - * - * ATTENTION: It requires some C++11 features, for lower version C++ or C, we - * might release another API. - */ - -#pragma once - -#include "ops/accuracy_op.h" -#include "ops/activation_op.h" -#include "ops/batch_norm_op.h" -#include "ops/binary_unary_op.h" -#include "ops/conv2d_op.h" -#include "ops/cross_entropy_op.h" -#include "ops/elementwise_add_op.h" -#include "ops/fill_constant_op.h" -#include "ops/mean_op.h" -#include "ops/momentum_op.h" -#include "ops/mul_op.h" -#include "ops/pool2d_op.h" -#include "ops/scale_op.h" -#include "ops/softmax_op.h" -#include "ops/sum_op.h" -#include "ops/top_k_op.h" diff --git a/paddle/fluid/operators/ngraph/ops/CMakeLists.txt b/paddle/fluid/operators/ngraph/ops/CMakeLists.txt new file mode 100644 index 0000000000..7dee3308b7 --- /dev/null +++ b/paddle/fluid/operators/ngraph/ops/CMakeLists.txt @@ -0,0 +1,8 @@ +file(GLOB LIST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*.h") +set(pass_file ${PADDLE_BINARY_DIR}/paddle/fluid/operators/ngraph/ngraph_ops.h) +file(APPEND ${pass_file} "\#pragma once\n") +file(WRITE ${pass_file} "// Generated by the /paddle/fluid/operators/ngraph/ops/CMakeLists.txt. DO NOT EDIT!\n\n") + +foreach(OPS_NAME ${LIST_OPS}) + file(APPEND ${pass_file} "\#include \"paddle/fluid/operators/ngraph/ops/${OPS_NAME}\"\n") +endforeach(OPS_NAME) diff --git a/paddle/fluid/operators/ngraph/ops/accuracy_op.h b/paddle/fluid/operators/ngraph/ops/accuracy_op.h index bf37ce48d8..d90ec97298 100644 --- a/paddle/fluid/operators/ngraph/ops/accuracy_op.h +++ b/paddle/fluid/operators/ngraph/ops/accuracy_op.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include "ngraph/ngraph.hpp" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -63,3 +64,5 @@ void BuildAccuracyNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(accuracy, BuildAccuracyNode); diff --git a/paddle/fluid/operators/ngraph/ops/activation_op.h b/paddle/fluid/operators/ngraph/ops/activation_op.h index f66080e3aa..d1b0b80d22 100644 --- a/paddle/fluid/operators/ngraph/ops/activation_op.h +++ b/paddle/fluid/operators/ngraph/ops/activation_op.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include "ngraph/ngraph.hpp" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -50,3 +51,6 @@ void BuildTanhGradNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(relu_grad, BuildReluGradNode); +REGISTER_NG_OP(than_grad, BuildTanhGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/batch_norm_op.h b/paddle/fluid/operators/ngraph/ops/batch_norm_op.h index f0d2d5f27f..2d638bb53f 100644 --- a/paddle/fluid/operators/ngraph/ops/batch_norm_op.h +++ b/paddle/fluid/operators/ngraph/ops/batch_norm_op.h @@ -20,6 +20,7 @@ limitations under the License. */ #include "ngraph/ngraph.hpp" #include "paddle/fluid/operators/ngraph/ops/elementwise_node.h" #include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -155,3 +156,6 @@ void BuildBatchNormGradNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(batch_norm, BuildBatchNormNode); +REGISTER_NG_OP(batch_norm_grad, BuildBatchNormGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/binary_unary_op.h b/paddle/fluid/operators/ngraph/ops/binary_unary_op.h index 0c0d25d0cd..375f188286 100644 --- a/paddle/fluid/operators/ngraph/ops/binary_unary_op.h +++ b/paddle/fluid/operators/ngraph/ops/binary_unary_op.h @@ -16,6 +16,7 @@ limitations under the License. */ #include #include "ngraph/ngraph.hpp" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -47,3 +48,7 @@ static void BuildUnaryNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(relu, BuildUnaryNode); +REGISTER_NG_OP(tanh, BuildUnaryNode); +REGISTER_NG_OP(sigmoid, BuildUnaryNode); diff --git a/paddle/fluid/operators/ngraph/ops/conv2d_op.h b/paddle/fluid/operators/ngraph/ops/conv2d_op.h index 46fb2703f5..d664825c53 100644 --- a/paddle/fluid/operators/ngraph/ops/conv2d_op.h +++ b/paddle/fluid/operators/ngraph/ops/conv2d_op.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include "ngraph/ngraph.hpp" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -233,3 +234,6 @@ void BuildConv2dGradNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(conv2d, BuildConv2dNode); +REGISTER_NG_OP(conv2d_grad, BuildConv2dGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/cross_entropy_op.h b/paddle/fluid/operators/ngraph/ops/cross_entropy_op.h index f88a2cb941..3ab158f3e1 100644 --- a/paddle/fluid/operators/ngraph/ops/cross_entropy_op.h +++ b/paddle/fluid/operators/ngraph/ops/cross_entropy_op.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "ngraph/ngraph.hpp" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -143,3 +144,6 @@ void BuildCrossEntropyGradNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(cross_entropy, BuildCrossEntropyNode); +REGISTER_NG_OP(cross_entropy_grad, BuildCrossEntropyGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/elementwise_add_op.h b/paddle/fluid/operators/ngraph/ops/elementwise_add_op.h index 868df51e16..fb796c336a 100644 --- a/paddle/fluid/operators/ngraph/ops/elementwise_add_op.h +++ b/paddle/fluid/operators/ngraph/ops/elementwise_add_op.h @@ -19,6 +19,7 @@ limitations under the License. */ #include "ngraph/ngraph.hpp" #include "paddle/fluid/operators/ngraph/ops/elementwise_node.h" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -85,3 +86,6 @@ void BuildElementwiseAddGradNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(elementwise_add, BuildElementwiseAddNode); +REGISTER_NG_OP(elementwise_add_grad, BuildElementwiseAddGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/fill_constant_op.h b/paddle/fluid/operators/ngraph/ops/fill_constant_op.h index 58783bc220..bc958f2ba2 100644 --- a/paddle/fluid/operators/ngraph/ops/fill_constant_op.h +++ b/paddle/fluid/operators/ngraph/ops/fill_constant_op.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include "ngraph/ngraph.hpp" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -55,3 +56,5 @@ void BuildFillConstantNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(fill_constant, BuildFillConstantNode); diff --git a/paddle/fluid/operators/ngraph/ops/mean_op.h b/paddle/fluid/operators/ngraph/ops/mean_op.h index 4c44bc4c11..f839d9978d 100644 --- a/paddle/fluid/operators/ngraph/ops/mean_op.h +++ b/paddle/fluid/operators/ngraph/ops/mean_op.h @@ -19,6 +19,7 @@ limitations under the License. */ #include "ngraph/ngraph.hpp" #include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -64,3 +65,6 @@ void BuildMeanGradNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(mean, BuildMeanNode); +REGISTER_NG_OP(mean_grad, BuildMeanGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/momentum_op.h b/paddle/fluid/operators/ngraph/ops/momentum_op.h index f1b365c488..b8291a08a2 100644 --- a/paddle/fluid/operators/ngraph/ops/momentum_op.h +++ b/paddle/fluid/operators/ngraph/ops/momentum_op.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include "ngraph/ngraph.hpp" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -99,3 +100,5 @@ void BuildMomentumNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(momentum, BuildMomentumNode); diff --git a/paddle/fluid/operators/ngraph/ops/mul_op.h b/paddle/fluid/operators/ngraph/ops/mul_op.h index 4a6cbebe24..98c70a1a99 100644 --- a/paddle/fluid/operators/ngraph/ops/mul_op.h +++ b/paddle/fluid/operators/ngraph/ops/mul_op.h @@ -16,6 +16,7 @@ limitations under the License. */ #include #include "ngraph/ngraph.hpp" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -130,3 +131,6 @@ static void BuildMulGradNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(mul, BuildMulNode); +REGISTER_NG_OP(mul_grad, BuildMulGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/op_bridge.h b/paddle/fluid/operators/ngraph/ops/op_bridge.h new file mode 100644 index 0000000000..93df0ad806 --- /dev/null +++ b/paddle/fluid/operators/ngraph/ops/op_bridge.h @@ -0,0 +1,84 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include +#include +#include +#include + +#include "ngraph/node.hpp" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/operators/ngraph/ngraph_bridge.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace operators { +namespace ops { + +class NgraphSingleton { + NgraphSingleton() = default; + NgraphSingleton(NgraphSingleton const&) = delete; + void operator=(NgraphSingleton const) = delete; + + ~NgraphSingleton() = default; + + static std::map< + std::string, + std::function&, + std::shared_ptr>>)>> + ng_node_maps_; + + public: + template + static void Register(TF&& tf, const std::string& name) { + ng_node_maps_[name] = tf; + } + + static bool Lookup(const std::string& name) { + auto it = ng_node_maps_.find(name); + if (it == ng_node_maps_.end()) { + return true; + } + return false; + } + + static void BuildNode( + const std::shared_ptr>>& ng_maps, + const std::shared_ptr& op, + const std::string& name) { + ng_node_maps_[name](op, ng_maps); + } +}; + +std::map&, + std::shared_ptr>>)>> + NgraphSingleton::ng_node_maps_; + +} // namespace ops +} // namespace operators +} // namespace paddle + +#define REGISTER_NG_OP(op_type__, Converter__) \ + struct ng_##op_type__##_converter { \ + ng_##op_type__##_converter() { \ + paddle::operators::ops::NgraphSingleton::Register( \ + paddle::operators::ngraphs::Converter__, #op_type__); \ + } \ + }; \ + ng_##op_type__##_converter ng_##op_type__##_converter__; diff --git a/paddle/fluid/operators/ngraph/ops/pool2d_op.h b/paddle/fluid/operators/ngraph/ops/pool2d_op.h index 836c9d6c18..a6371372ef 100644 --- a/paddle/fluid/operators/ngraph/ops/pool2d_op.h +++ b/paddle/fluid/operators/ngraph/ops/pool2d_op.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "ngraph/ngraph.hpp" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -172,3 +173,6 @@ void BuildPool2dGradNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(pool2d, BuildPool2dNode); +REGISTER_NG_OP(pool2d_grad, BuildPool2dGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/scale_op.h b/paddle/fluid/operators/ngraph/ops/scale_op.h index 91a57d0be6..a334192419 100644 --- a/paddle/fluid/operators/ngraph/ops/scale_op.h +++ b/paddle/fluid/operators/ngraph/ops/scale_op.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include "ngraph/ngraph.hpp" #include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -37,3 +38,5 @@ void BuildScaleNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(scale, BuildScaleNode); diff --git a/paddle/fluid/operators/ngraph/ops/softmax_op.h b/paddle/fluid/operators/ngraph/ops/softmax_op.h index fc6395c08b..1df6418de0 100644 --- a/paddle/fluid/operators/ngraph/ops/softmax_op.h +++ b/paddle/fluid/operators/ngraph/ops/softmax_op.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "ngraph/ngraph.hpp" #include "paddle/fluid/operators/ngraph/ops/elementwise_scalar_op.h" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -72,3 +73,6 @@ void BuildSoftmaxGradNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(softmax, BuildSoftmaxNode); +REGISTER_NG_OP(softmax_grad, BuildSoftmaxGradNode); diff --git a/paddle/fluid/operators/ngraph/ops/top_k_op.h b/paddle/fluid/operators/ngraph/ops/top_k_op.h index 852ecd7139..6d10faa7c2 100644 --- a/paddle/fluid/operators/ngraph/ops/top_k_op.h +++ b/paddle/fluid/operators/ngraph/ops/top_k_op.h @@ -16,6 +16,7 @@ limitations under the License. */ #include #include "ngraph/ngraph.hpp" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" #include "paddle/fluid/platform/ngraph_helper.h" namespace paddle { @@ -42,3 +43,5 @@ void BuildTopKNode( } // namespace ngraphs } // namespace operators } // namespace paddle + +REGISTER_NG_OP(top_k, BuildTopKNode); From c4faf36e7a588098c2dfbe6e83c5df21ae8b9ab5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Gallus?= Date: Fri, 22 Feb 2019 04:17:15 +0100 Subject: [PATCH 113/117] MKL-DNN: Add test for conv bias fuse pass (#15824) * MKL-DNN: Add test for conv bias fuse pass test=develop * Remove const cast from Conv Bias Pass Test * Add conv with bias test case for conv+bias fuse ut test=develop --- paddle/fluid/framework/ir/CMakeLists.txt | 1 + .../conv_bias_mkldnn_fuse_pass_tester.cc | 151 ++++++++++++++++++ 2 files changed, 152 insertions(+) create mode 100644 paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index 07c2c970d4..25d9afbcc8 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -102,6 +102,7 @@ cc_test(test_seqpool_concat_fuse_pass SRCS seqpool_concat_fuse_pass_tester.cc DE cc_test(test_is_test_pass SRCS is_test_pass_tester.cc DEPS is_test_pass) if (WITH_MKLDNN) cc_test(test_depthwise_conv_mkldnn_pass SRCS mkldnn/depthwise_conv_mkldnn_pass_tester.cc DEPS depthwise_conv_mkldnn_pass) + cc_test(test_conv_bias_mkldnn_fuse_pass SRCS mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc DEPS conv_bias_mkldnn_fuse_pass naive_executor) cc_test(test_conv_relu_mkldnn_fuse_pass SRCS mkldnn/conv_relu_mkldnn_fuse_pass_tester.cc DEPS conv_relu_mkldnn_fuse_pass) cc_test(test_conv_elementwise_add_mkldnn_fuse_pass SRCS mkldnn/conv_elementwise_add_mkldnn_fuse_pass_tester.cc DEPS conv_elementwise_add_mkldnn_fuse_pass) endif () diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc new file mode 100644 index 0000000000..38b7fe5203 --- /dev/null +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc @@ -0,0 +1,151 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.h" +#include "paddle/fluid/framework/naive_executor.h" +#include "paddle/fluid/platform/place.h" + +#include +#include "paddle/fluid/framework/op_proto_maker.h" + +namespace paddle { +namespace framework { +namespace ir { + +void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name, + const std::vector& inputs, + const std::vector& outputs) { + auto* op = prog->MutableBlock(0)->AppendOp(); + op->SetType(type); + if (type == "conv2d") { + op->SetAttr("use_mkldnn", true); + op->SetAttr("name", name); + op->SetInput("Input", {inputs[0]}); + op->SetInput("Filter", {inputs[1]}); + if (inputs.size() > 2) + op->SetInput("Bias", {inputs[2]}); + else + op->SetInput("Bias", {}); + } else if (type == "elementwise_add") { + op->SetAttr("use_mkldnn", true); + op->SetInput("X", {inputs[0]}); + op->SetInput("Y", {inputs[1]}); + } + op->SetOutput("Out", outputs); + op->SetAttr(OpProtoAndCheckerMaker::OpRoleAttrName(), + static_cast(OpRole::kForward)); +} + +// (c, weights)->conv->f +// (f)->elementwise_add->g +ProgramDesc BuildProgramDesc(bool convWithExistingBias) { + ProgramDesc prog; + std::vector nodes{"c", "weights", "f", "eltwise_bias", "g"}; + if (convWithExistingBias) nodes.push_back("conv_bias"); + for (auto& v : nodes) { + auto* var = prog.MutableBlock(0)->Var(v); + var->SetType(proto::VarType::LOD_TENSOR); + if (v == "weights" || v == "conv_bias" || v == "eltwise_bias") { + var->SetPersistable(true); + } + } + + // conv+bias, both with MKL-DNN + if (convWithExistingBias) { + SetOp(&prog, "conv2d", "conv", + std::vector({"c", "weights", "conv_bias"}), + std::vector({"f"})); + } else { + SetOp(&prog, "conv2d", "conv", std::vector({"c", "weights"}), + std::vector({"f"})); + } + SetOp(&prog, "elementwise_add", "eltwise", + std::vector({"f", "eltwise_bias"}), + std::vector({"g"})); + + return prog; +} + +void InitTensorHolder(Scope* scope, const paddle::platform::Place& place, + const char* var_name) { + auto x = scope->Var(var_name); + auto tensor = x->GetMutable(); + tensor->mutable_data(place, proto::VarType::FP32, + ::paddle::memory::Allocator::kDefault, 1); +} + +void MainTest(bool convWithExistingBias) { + auto prog = BuildProgramDesc(convWithExistingBias); + std::unique_ptr graph(new ir::Graph(prog)); + auto place = paddle::platform::CPUPlace(); + NaiveExecutor exe{place}; + Scope scope; + // Init scope, as it is used in pass + exe.CreateVariables(prog, 0, true, &scope); + if (convWithExistingBias) { + InitTensorHolder(&scope, place, "conv_bias"); + InitTensorHolder(&scope, place, "eltwise_bias"); + } + graph->Set(kParamScopeAttr, new framework::Scope*(&scope)); + + auto pass = PassRegistry::Instance().Get("conv_bias_mkldnn_fuse_pass"); + + int original_nodes_num = graph->Nodes().size(); + + graph = pass->Apply(std::move(graph)); + + int current_nodes_num = graph->Nodes().size(); + + // Remove 3 Nodes: Conv, Bias, conv_out + // Add 1 Node: ConvBias + EXPECT_EQ(original_nodes_num - 2, current_nodes_num); + + // Assert conv_bias op in newly generated graph + int conv_bias_count = 0; + + for (auto* node : graph->Nodes()) { + if (node->IsOp() && node->Op()->Type() == "conv2d") { + auto* op = node->Op(); + ASSERT_TRUE(op->HasAttr("use_mkldnn")); + EXPECT_TRUE(boost::get(op->GetAttr("use_mkldnn"))); + // check if "conv" convolution is fused + auto op_name = boost::get(op->GetAttr("name")); + if (op_name == "conv") { + auto input_names = op->InputNames(); + ASSERT_TRUE(std::find(input_names.begin(), input_names.end(), "Bias") != + input_names.end()); + auto bias = boost::get>(op->Input("Bias")); + if (bias.size()) { + ++conv_bias_count; + } + } + } + } + EXPECT_EQ(conv_bias_count, 1); +} + +TEST(ConvBiasFusePass, bias_free_conv) { MainTest(false); } + +TEST(ConvBiasFusePass, conv_with_existing_bias) { MainTest(true); } + +TEST(ConvBiasFusePass, conv3d) { + Conv3DBiasFusePass pass; + ASSERT_TRUE(pass.is_conv3d()); +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +USE_PASS(conv_bias_mkldnn_fuse_pass); From 676995c86cb4b49f9a41c7a32c5e054b16201753 Mon Sep 17 00:00:00 2001 From: Yihua Xu Date: Fri, 22 Feb 2019 11:36:19 +0800 Subject: [PATCH 114/117] Optimze Gelu with MKL Erf function (#15770) * Optimize for gelu operator * Set up the low accuracy mode of MKL ERF function. test=develop * Only enable MKLML ERF when OS is linux * Use the speical mklml version included vmsErf function to verify gelu mkl kernel. test=develop * Add the CUDA macro to avoid NVCC's compile issue. test=develop * Add the TODO comments for mklml library modification. test=develop * Clean Code test=develop * Add the comment of marco for NVCC compiler. test=develop --- cmake/external/mklml.cmake | 6 ++++-- paddle/fluid/operators/activation_op.h | 22 ++++++++++++++++++++++ paddle/fluid/operators/math/blas.h | 8 ++++++++ paddle/fluid/operators/math/blas_impl.h | 23 +++++++++++++++++++++++ paddle/fluid/platform/dynload/mklml.h | 2 ++ 5 files changed, 59 insertions(+), 2 deletions(-) diff --git a/cmake/external/mklml.cmake b/cmake/external/mklml.cmake index 54826cedb8..32a9368a9f 100644 --- a/cmake/external/mklml.cmake +++ b/cmake/external/mklml.cmake @@ -39,8 +39,10 @@ IF(WIN32) SET(MKLML_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5md.lib) SET(MKLML_SHARED_LIB ${MKLML_LIB_DIR}/mklml.dll) SET(MKLML_SHARED_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5md.dll) -ELSE() - SET(MKLML_VER "mklml_lnx_${TIME_VERSION}" CACHE STRING "" FORCE) +ELSE() + #TODO(intel-huying): + # Now enable Erf function in mklml library temporarily, it will be updated as offical version later. + SET(MKLML_VER "VsErf_mklml_lnx_${TIME_VERSION}" CACHE STRING "" FORCE) SET(MKLML_URL "http://paddlepaddledeps.cdn.bcebos.com/${MKLML_VER}.tgz" CACHE STRING "" FORCE) SET(MKLML_LIB ${MKLML_LIB_DIR}/libmklml_intel.so) SET(MKLML_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5.so) diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index c7df3ea58a..e8f5530b78 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -11,6 +11,7 @@ limitations under the License. */ #pragma once #include +#include #include #include #include @@ -24,6 +25,7 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/platform/float16.h" #ifdef PADDLE_WITH_MKLDNN @@ -301,8 +303,28 @@ template struct GeluFunctor : public BaseActivationFunctor { template void operator()(Device d, X x, Out out) const { +// Because the execute or device context can not be deliver here, it keep the +// marco for NVCC. +#if defined(PADDLE_WITH_MKLML) && !defined(_WIN32) && !defined(__APPLE__) && \ + !defined(__OSX__) && !defined(PADDLE_WITH_CUDA) + auto x_data = x.data(); + auto out_data = out.data(); + int n = std::min(x.size(), out.size()); + + std::memset(out_data, 0, n * sizeof(T)); + math::CBlas::AXPY(n, static_cast(M_SQRT1_2), x_data, 1, out_data, 1); + math::CBlas::VMERF(n, out_data, out_data, VML_LA); + for (int i = 0; i < n; i++) { + out_data[i] += static_cast(1); + } + math::CBlas::VMUL(n, x_data, out_data, out_data); + for (int i = 0; i < n; i++) { + out_data[i] *= static_cast(0.5); + } +#else auto temp = (x * static_cast(M_SQRT1_2)).erf(); out.device(d) = x * static_cast(0.5) * (static_cast(1) + temp); +#endif } }; diff --git a/paddle/fluid/operators/math/blas.h b/paddle/fluid/operators/math/blas.h index f67f57827b..ce8109f64d 100644 --- a/paddle/fluid/operators/math/blas.h +++ b/paddle/fluid/operators/math/blas.h @@ -184,6 +184,9 @@ class Blas { template void VINV(int n, const T* a, T* y) const; + template + void VMERF(int n, const T* a, T* y, int64_t mode) const; + private: const DeviceContext& context_; }; @@ -290,6 +293,11 @@ class BlasT : private Blas { Base()->template VINV(args...); } + template + void VMERF(ARGS... args) const { + Base()->template VMERF(args...); + } + private: const Blas* Base() const { return static_cast*>(this); diff --git a/paddle/fluid/operators/math/blas_impl.h b/paddle/fluid/operators/math/blas_impl.h index 972366bc09..ba995dabec 100644 --- a/paddle/fluid/operators/math/blas_impl.h +++ b/paddle/fluid/operators/math/blas_impl.h @@ -123,6 +123,11 @@ struct CBlas { static void VINV(ARGS... args) { platform::dynload::vsInv(args...); } + + template + static void VMERF(ARGS... args) { + platform::dynload::vmsErf(args...); + } }; template <> @@ -223,6 +228,11 @@ struct CBlas { static void VINV(ARGS... args) { platform::dynload::vdInv(args...); } + + template + static void VMERF(ARGS... args) { + platform::dynload::vmdErf(args...); + } }; #else @@ -625,6 +635,19 @@ void Blas::VINV(int n, const T *a, T *y) const { #endif } +template <> +template +void Blas::VMERF(int n, const T *a, T *y, + int64_t mode) const { +#ifdef PADDLE_WITH_MKLML + CBlas::VMERF(n, a, y, mode); +#else + for (int i = 0; i < n; ++i) { + y[i] = std::erf(a[i]); + } +#endif +} + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/fluid/platform/dynload/mklml.h b/paddle/fluid/platform/dynload/mklml.h index a260cda491..a5b846f500 100644 --- a/paddle/fluid/platform/dynload/mklml.h +++ b/paddle/fluid/platform/dynload/mklml.h @@ -86,6 +86,8 @@ extern void* mklml_dso_handle; __macro(vdPowx); \ __macro(vsInv); \ __macro(vdInv); \ + __macro(vmsErf); \ + __macro(vmdErf); \ __macro(MKL_Set_Num_Threads) MKLML_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_MKLML_WRAP); From 7d96c74ab2c2c2c017499f2469a69457ba66f511 Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Fri, 22 Feb 2019 11:55:08 +0800 Subject: [PATCH 115/117] Initialize the benchmark tester for operator. (#15772) * Initialize the benchmark tester for operator. test=develop * Rearrange the codes. test=develop --- paddle/fluid/operators/CMakeLists.txt | 1 + .../fluid/operators/benchmark/CMakeLists.txt | 3 + paddle/fluid/operators/benchmark/op_tester.cc | 303 ++++++++++++++++++ paddle/fluid/operators/benchmark/op_tester.h | 69 ++++ .../operators/benchmark/op_tester_config.cc | 114 +++++++ .../operators/benchmark/op_tester_config.h | 51 +++ paddle/fluid/operators/jit/test.cc | 26 +- 7 files changed, 554 insertions(+), 13 deletions(-) create mode 100644 paddle/fluid/operators/benchmark/CMakeLists.txt create mode 100644 paddle/fluid/operators/benchmark/op_tester.cc create mode 100644 paddle/fluid/operators/benchmark/op_tester.h create mode 100644 paddle/fluid/operators/benchmark/op_tester_config.cc create mode 100644 paddle/fluid/operators/benchmark/op_tester_config.h diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index e099425b94..2166b8b545 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -97,3 +97,4 @@ if (WITH_PYTHON) endif() set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") +add_subdirectory(benchmark) diff --git a/paddle/fluid/operators/benchmark/CMakeLists.txt b/paddle/fluid/operators/benchmark/CMakeLists.txt new file mode 100644 index 0000000000..54008336a9 --- /dev/null +++ b/paddle/fluid/operators/benchmark/CMakeLists.txt @@ -0,0 +1,3 @@ +cc_test(op_tester SRCS op_tester.cc op_tester_config.cc + DEPS memory timer framework_proto proto_desc lod_tensor op_registry + device_context scope ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS}) diff --git a/paddle/fluid/operators/benchmark/op_tester.cc b/paddle/fluid/operators/benchmark/op_tester.cc new file mode 100644 index 0000000000..e179de56cd --- /dev/null +++ b/paddle/fluid/operators/benchmark/op_tester.cc @@ -0,0 +1,303 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/benchmark/op_tester.h" +#include "gflags/gflags.h" +#include "gtest/gtest.h" +#include "paddle/fluid/framework/op_info.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/variable_helper.h" +#include "paddle/fluid/platform/init.h" +#include "paddle/fluid/platform/profiler.h" +#include "paddle/fluid/platform/timer.h" +#include "paddle/fluid/pybind/pybind.h" + +namespace paddle { +namespace operators { +namespace benchmark { + +DEFINE_string(op_config_list, "", "Path of op config file."); + +void OpTester::Init(const std::string &filename) { + Init(OpTesterConfig(filename)); +} + +void OpTester::Init(const OpTesterConfig &config) { + config_ = config; + + auto &op_desc_info = framework::OpInfoMap::Instance(); + // Initialize the OpDesc + if (op_desc_info.Has(config_.op_type)) { + type_ = config_.op_type; + op_desc_.SetType(config_.op_type); + + CreateInputVarDesc(); + CreateOutputVarDesc(); + } else { + LOG(FATAL) << "Op \"" << config_.op_type << "\" is not registered."; + } + + if (config_.device_id >= 0) { + place_ = paddle::platform::CUDAPlace(config_.device_id); + } else { + place_ = paddle::platform::CPUPlace(); + } + + framework::InitDevices(false); + scope_.reset(new paddle::framework::Scope()); + + op_ = framework::OpRegistry::CreateOp(op_desc_); + CreateVariables(scope_.get()); +} + +void OpTester::Run() { + if (config_.print_debug_string) { + LOG(INFO) << DebugString(); + } + + // Warm up + RunImpl(); + + platform::Timer timer; + if (config_.profile) { + if (platform::is_cpu_place(place_)) { + platform::EnableProfiler(platform::ProfilerState::kCPU); + } else { +#ifdef PADDLE_WITH_CUDA + platform::EnableProfiler(platform::ProfilerState::kAll); + platform::SetDeviceId(config_.device_id); +#else + PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); +#endif + } + + timer.Start(); + for (int i = config_.repeat; i > 0; --i) { + RunImpl(); + } + timer.Pause(); + platform::DisableProfiler(platform::EventSortingKey::kDefault, + "op_tester_profiler"); + } else { + timer.Start(); + for (int i = config_.repeat; i > 0; --i) { + RunImpl(); + } + timer.Pause(); + } + config_.runtime = timer.ElapsedMS() / config_.repeat; + LOG(INFO) << "=== Run " << config_.repeat + << " times, latency: " << config_.runtime << " ms ==="; +} + +void OpTester::RunImpl() { + op_->Run(*scope_, place_); + platform::DeviceContextPool::Instance().Get(place_)->Wait(); + scope_->DropKids(); +} + +std::vector OpTester::GetOpProtoInputNames() { + std::vector input_names; + const framework::proto::OpProto &proto = + framework::OpInfoMap::Instance().Get(type_).Proto(); + for (int i = 0; i != proto.inputs_size(); ++i) { + const auto &input = proto.inputs(i); + input_names.push_back(input.name()); + } + return input_names; +} + +std::vector OpTester::GetOpProtoOutputNames() { + std::vector output_names; + const framework::proto::OpProto &proto = + framework::OpInfoMap::Instance().Get(type_).Proto(); + for (int i = 0; i != proto.outputs_size(); ++i) { + const auto &output = proto.outputs(i); + output_names.push_back(output.name()); + } + return output_names; +} + +void OpTester::CreateInputVarDesc() { + std::vector input_names = GetOpProtoInputNames(); + for (auto &name : input_names) { + const OpInputConfig *input = config_.GetInput(name); + if (input == nullptr) { + LOG(FATAL) << "The input " << name << " of op " << config_.op_type + << " is not correctlly provided."; + } + + std::string var_name = config_.op_type + "." + name; + framework::VarDesc *var = Var(var_name); + // Need to support more type + var->SetType(framework::proto::VarType::LOD_TENSOR); + var->SetPersistable(false); + var->SetDataType(framework::proto::VarType::FP32); + var->SetShape(input->dims); + + op_desc_.SetInput(name, {var_name}); + inputs_.push_back(var_name); + } +} + +void OpTester::CreateOutputVarDesc() { + std::vector output_names = GetOpProtoOutputNames(); + for (auto &name : output_names) { + std::string var_name = config_.op_type + "." + name; + framework::VarDesc *var = Var(var_name); + // Need to support more type + var->SetType(framework::proto::VarType::LOD_TENSOR); + var->SetPersistable(false); + var->SetDataType(framework::proto::VarType::FP32); + + op_desc_.SetOutput(name, {var_name}); + outputs_.push_back(var_name); + } +} + +framework::VarDesc *OpTester::Var(const std::string &name) { + auto it = vars_.find(name); + if (it != vars_.end()) { + return it->second.get(); + } + auto *var = new framework::VarDesc(name); + vars_[name].reset(var); + return var; +} + +template +void OpTester::SetupTensor(framework::LoDTensor *tensor, + const std::vector &shape, T lower, + T upper) { + static unsigned int seed = 100; + std::mt19937 rng(seed++); + std::uniform_real_distribution uniform_dist(0, 1); + + T *ptr = tensor->mutable_data(framework::make_ddim(shape), place_); + if (platform::is_cpu_place(place_)) { + for (int i = 0; i < tensor->numel(); ++i) { + ptr[i] = static_cast(uniform_dist(rng) * (upper - lower) + lower); + } + } else { + framework::LoDTensor cpu_tensor; + T *cpu_ptr = cpu_tensor.mutable_data(framework::make_ddim(shape), + platform::CPUPlace()); + for (int i = 0; i < cpu_tensor.numel(); ++i) { + cpu_ptr[i] = static_cast(uniform_dist(rng) * (upper - lower) + lower); + } + TensorCopySync(cpu_tensor, place_, tensor); + } +} + +void OpTester::CreateVariables(framework::Scope *scope) { + for (auto &item : vars_) { + auto &var = item.second; + if (var->Name() == framework::kEmptyVarName) { + continue; + } + + auto *ptr = scope->Var(var->Name()); + framework::InitializeVariable(ptr, var->GetType()); + if (var->Persistable()) { + VLOG(3) << "Create Variable " << var->Name() + << " global, which pointer is " << ptr; + } else { + VLOG(3) << "Create Variable " << var->Name() + << " locally, which pointer is " << ptr; + } + } + + // Allocate memory for input tensor + for (auto &name : inputs_) { + VLOG(3) << "Allocate memory for tensor " << name; + auto &var_desc = vars_[name]; + std::vector shape = var_desc->GetShape(); + + auto *var = scope->Var(name); + auto *tensor = var->GetMutable(); + SetupTensor(tensor, shape, static_cast(0.0), + static_cast(1.0)); + } +} + +static std::string GenSpaces(int count) { + std::stringstream ss; + for (int i = 0; i < count; ++i) { + ss << " "; + } + return ss.str(); +} + +std::string OpTester::DebugString() { + std::stringstream ss; + int count = 0; + for (auto &item : vars_) { + auto &var = item.second; + ss << GenSpaces(count++) << "vars {\n"; + ss << GenSpaces(count) << "name: \"" << var->Name() << "\"\n"; + ss << GenSpaces(count++) << "type: {\n"; + ss << GenSpaces(count) << "type: LOD_TENSOR\n"; + ss << GenSpaces(count++) << "lod_tensor {\n"; + ss << GenSpaces(count++) << "tensor {\n"; + ss << GenSpaces(count) << "data_type: FP32\n"; + std::vector shape = var->GetShape(); + for (auto d : shape) { + ss << GenSpaces(count) << "dims: " << d << "\n"; + } + ss << GenSpaces(--count) << "}\n"; + ss << GenSpaces(--count) << "}\n"; + ss << GenSpaces(--count) << "}\n"; + ss << GenSpaces(count) << "persistable: " << var->Persistable() << "\n"; + ss << GenSpaces(--count) << "}\n"; + } + ss << GenSpaces(count++) << "ops {\n"; + for (auto &name : op_desc_.InputNames()) { + ss << GenSpaces(count++) << "inputs {\n"; + ss << GenSpaces(count) << "parameters: \"" << name << "\"\n"; + ss << GenSpaces(count) << "arguments: \"" << op_desc_.Input(name)[0] + << "\"\n"; + ss << GenSpaces(--count) << "}\n"; + } + for (auto &name : op_desc_.OutputNames()) { + ss << GenSpaces(count++) << "outputs {\n"; + ss << GenSpaces(count) << "parameters: \"" << name << "\"\n"; + ss << GenSpaces(count) << "arguments: \"" << op_desc_.Output(name)[0] + << "\"\n"; + ss << GenSpaces(--count) << "}\n"; + } + ss << GenSpaces(count) << "type: " << op_desc_.Type() << "\n"; + ss << GenSpaces(--count) << "}\n"; + return ss.str(); +} + +TEST(op_tester, base) { + OpTester tester; + if (!FLAGS_op_config_list.empty()) { + tester.Init(FLAGS_op_config_list); + } else { + OpTesterConfig config; + config.op_type = "elementwise_add"; + config.inputs.resize(2); + config.inputs[0].name = "X"; + config.inputs[0].dims = {64, 64}; + config.inputs[1].name = "Y"; + config.inputs[1].dims = {64, 1}; + tester.Init(config); + } + tester.Run(); +} + +} // namespace benchmark +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/benchmark/op_tester.h b/paddle/fluid/operators/benchmark/op_tester.h new file mode 100644 index 0000000000..1723d46c47 --- /dev/null +++ b/paddle/fluid/operators/benchmark/op_tester.h @@ -0,0 +1,69 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/operators/benchmark/op_tester_config.h" + +namespace paddle { +namespace operators { +namespace benchmark { + +class OpTester { + public: + OpTester() {} + + void Init(const std::string &filename); + void Init(const OpTesterConfig &config); + + void Run(); + + std::string DebugString(); + + private: + std::vector GetOpProtoInputNames(); + std::vector GetOpProtoOutputNames(); + + void CreateInputVarDesc(); + void CreateOutputVarDesc(); + + framework::VarDesc *Var(const std::string &name); + void CreateVariables(framework::Scope *scope); + + template + void SetupTensor(framework::LoDTensor *input, + const std::vector &shape, T lower, T upper); + + void RunImpl(); + + private: + OpTesterConfig config_; + std::string type_; + framework::OpDesc op_desc_; + std::unordered_map> vars_; + std::vector inputs_; + std::vector outputs_; + std::unique_ptr op_; + platform::Place place_; + std::unique_ptr scope_; +}; + +} // namespace benchmark +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/benchmark/op_tester_config.cc b/paddle/fluid/operators/benchmark/op_tester_config.cc new file mode 100644 index 0000000000..3db8de7f76 --- /dev/null +++ b/paddle/fluid/operators/benchmark/op_tester_config.cc @@ -0,0 +1,114 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/benchmark/op_tester_config.h" +#include +#include "glog/logging.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace operators { +namespace benchmark { + +static const char kStartSeparator[] = "{"; +static const char kEndSeparator[] = "}"; +static const char kSepBetweenItems[] = ";"; + +static bool StartWith(const std::string& str, const std::string& substr) { + return str.find(substr) == 0; +} + +static bool EndWith(const std::string& str, const std::string& substr) { + return str.rfind(substr) == (str.length() - substr.length()); +} + +static void EraseEndSep(std::string* str) { + std::string substr = kSepBetweenItems; + if (EndWith(*str, substr)) { + str->erase(str->length() - substr.length(), str->length()); + } +} + +static std::vector ParseDims(std::string dims_str) { + std::vector dims; + std::string token; + std::istringstream token_stream(dims_str); + while (std::getline(token_stream, token, 'x')) { + dims.push_back(std::stoi(token)); + } + return dims; +} + +OpInputConfig::OpInputConfig(std::istream& is) { + std::string sep; + is >> sep; + if (sep == kStartSeparator) { + while (sep != kEndSeparator) { + is >> sep; + if (sep == "name" || sep == "name:") { + is >> name; + EraseEndSep(&name); + } else if (sep == "dims" || sep == "dims:") { + std::string dims_str; + is >> dims_str; + dims = ParseDims(dims_str); + } + } + } +} + +OpTesterConfig::OpTesterConfig(const std::string& filename) { + std::ifstream fin(filename, std::ios::in | std::ios::binary); + PADDLE_ENFORCE(static_cast(fin), "Cannot open file %s", + filename.c_str()); + + Init(fin); +} + +void OpTesterConfig::Init(std::istream& is) { + std::string sep; + is >> sep; + if (sep == kStartSeparator) { + while (sep != kEndSeparator) { + is >> sep; + if (sep == "op_type" || sep == "op_type:") { + is >> op_type; + } else if (sep == "device_id" || sep == "device_id:") { + is >> device_id; + } else if (sep == "repeat" || sep == "repeat:") { + is >> repeat; + } else if (sep == "profile" || sep == "profile:") { + is >> profile; + } else if (sep == "print_debug_string" || sep == "print_debug_string:") { + is >> print_debug_string; + } else if (sep == "input" || sep == "input:") { + OpInputConfig input_config(is); + inputs.push_back(input_config); + } + } + } +} + +const OpInputConfig* OpTesterConfig::GetInput(const std::string& name) { + for (size_t i = 0; i < inputs.size(); ++i) { + if (inputs[i].name == name) { + return &inputs[i]; + } + } + return nullptr; +} + +} // namespace benchmark +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/benchmark/op_tester_config.h b/paddle/fluid/operators/benchmark/op_tester_config.h new file mode 100644 index 0000000000..f7b62cb8ad --- /dev/null +++ b/paddle/fluid/operators/benchmark/op_tester_config.h @@ -0,0 +1,51 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include + +namespace paddle { +namespace operators { +namespace benchmark { + +struct OpInputConfig { + OpInputConfig() {} + explicit OpInputConfig(std::istream& is); + + std::string name; + std::vector dims; +}; + +struct OpTesterConfig { + OpTesterConfig() {} + explicit OpTesterConfig(const std::string& filename); + void Init(std::istream& is); + + const OpInputConfig* GetInput(const std::string& name); + + std::string op_type; + std::vector inputs; + int device_id{-1}; // CPU: -1 + int repeat{1}; + int profile{0}; + int print_debug_string{0}; + double runtime{0.0}; +}; + +} // namespace benchmark +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/jit/test.cc b/paddle/fluid/operators/jit/test.cc index 2632bfb6de..356eba6f86 100644 --- a/paddle/fluid/operators/jit/test.cc +++ b/paddle/fluid/operators/jit/test.cc @@ -1,16 +1,16 @@ /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #include #include @@ -259,7 +259,7 @@ struct TestFuncWithRefer, std::vector, std::vector, const std::vector& x, const std::vector& yref, const typename jit::SeqPoolTuples::attr_type& attr) { EXPECT_TRUE(tgt != nullptr); - EXPECT_EQ(x.size() % yref.size(), 0); + EXPECT_EQ(x.size() % yref.size(), static_cast(0)); int w = yref.size(); std::vector y(w); const T* x_data = x.data(); From 3b08c9abf428ad77323cb49b95a4f6333abb8be5 Mon Sep 17 00:00:00 2001 From: chengduo Date: Fri, 22 Feb 2019 00:05:38 -0600 Subject: [PATCH 116/117] enhance profiler (#15842) test=develop --- paddle/fluid/platform/device_tracer.cc | 2 + paddle/fluid/platform/profiler.cc | 57 +++++++++++++++++++++----- paddle/fluid/platform/profiler.h | 11 ++++- 3 files changed, 59 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/platform/device_tracer.cc b/paddle/fluid/platform/device_tracer.cc index f42212d095..52372c2514 100644 --- a/paddle/fluid/platform/device_tracer.cc +++ b/paddle/fluid/platform/device_tracer.cc @@ -601,6 +601,8 @@ void initCuptiCbidStr() { REGISTER_RUNTIME_CBID_STR(cudaStreamSynchronize_v3020); REGISTER_RUNTIME_CBID_STR(cudaStreamWaitEvent_v3020); REGISTER_RUNTIME_CBID_STR(cudaUnbindTexture_v3020); + REGISTER_RUNTIME_CBID_STR(cudaSetupArgument_v3020); + REGISTER_RUNTIME_CBID_STR(cudaLaunch_v3020); #if CUDA_VERSION >= 9000 REGISTER_RUNTIME_CBID_STR(cudaLaunchCooperativeKernel_v9000); REGISTER_RUNTIME_CBID_STR(cudaLaunchCooperativeKernelMultiDevice_v9000); diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 28f93b4b12..9a285a6b53 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -254,9 +254,11 @@ struct EventItem { std::string name; int calls; double total_time; - double min_time; double max_time; double ave_time; + double min_time; + double cpu_time; + double gpu_time; float ratio; }; @@ -290,8 +292,12 @@ void PrintProfiler(const std::vector>& events_table, // Output events table std::cout.setf(std::ios::left); std::cout << std::setw(name_width) << "Event" << std::setw(data_width) - << "Calls" << std::setw(data_width) << "Total" - << std::setw(data_width) << "Min." << std::setw(data_width) + << "Calls" << std::setw(data_width) << "Total"; + if (g_state == ProfilerState::kAll) { + std::cout << std::setw(data_width * 2) << "CPU Time (Ratio)" + << std::setw(data_width * 2) << "GPU Time (Ratio)"; + } + std::cout << std::setw(data_width) << "Min." << std::setw(data_width) << "Max." << std::setw(data_width) << "Ave." << std::setw(data_width) << "Ratio." << std::endl; for (size_t i = 0; i < events_table.size(); ++i) { @@ -299,8 +305,18 @@ void PrintProfiler(const std::vector>& events_table, const EventItem& event_item = events_table[i][j]; std::cout << std::setw(name_width) << event_item.name << std::setw(data_width) << event_item.calls - << std::setw(data_width) << event_item.total_time - << std::setw(data_width) << event_item.min_time + << std::setw(data_width) << event_item.total_time; + if (g_state == ProfilerState::kAll) { + std::cout << std::setw(data_width * 2) + << string::Sprintf( + "%f (%f)", event_item.cpu_time, + (event_item.cpu_time / event_item.total_time)) + << std::setw(data_width * 2) + << string::Sprintf( + "%f (%f)", event_item.gpu_time, + (event_item.gpu_time / event_item.total_time)); + } + std::cout << std::setw(data_width) << event_item.min_time << std::setw(data_width) << event_item.max_time << std::setw(data_width) << event_item.ave_time << std::setw(data_width) << event_item.ratio << std::endl; @@ -349,6 +365,18 @@ void ParseEvents(const std::vector>& events, return a.ave_time > b.ave_time; }; break; + case EventSortingKey::kGPUTime: + sorted_domain = "average time"; + sorted_func = [](const EventItem& a, const EventItem& b) { + return a.gpu_time > b.gpu_time; + }; + break; + case EventSortingKey::kCPUTime: + sorted_domain = "average time"; + sorted_func = [](const EventItem& a, const EventItem& b) { + return a.cpu_time > b.cpu_time; + }; + break; default: sorted_domain = "event first end time"; } @@ -387,10 +415,17 @@ void ParseEvents(const std::vector>& events, } if (rit != pushed_events.rend()) { - double event_time = (g_state == ProfilerState::kCUDA || - g_state == ProfilerState::kAll) - ? rit->CudaElapsedMs((*analyze_events)[i][j]) - : rit->CpuElapsedMs((*analyze_events)[i][j]); + double event_time = 0; + double gpu_time = rit->CudaElapsedMs((*analyze_events)[i][j]); + double cpu_time = rit->CpuElapsedMs((*analyze_events)[i][j]); + if (g_state == ProfilerState::kCUDA) { + event_time = gpu_time; + } else if (g_state == ProfilerState::kCPU) { + event_time = cpu_time; + } else { + event_time = gpu_time + cpu_time; + } + total += event_time; std::string event_name; @@ -407,7 +442,7 @@ void ParseEvents(const std::vector>& events, event_idx[event_name] = event_items.size(); EventItem event_item = {event_name, 1, event_time, event_time, event_time, event_time, - 0.}; + gpu_time, cpu_time, 0.}; event_items.push_back(event_item); } else { int index = event_idx[event_name]; @@ -420,6 +455,8 @@ void ParseEvents(const std::vector>& events, // max time event_items[index].max_time = std::max(event_time, event_items[index].max_time); + event_items[index].gpu_time += gpu_time; + event_items[index].cpu_time += cpu_time; } // remove the push marker from the list diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h index 55d94f0fd8..4057e5ea05 100644 --- a/paddle/fluid/platform/profiler.h +++ b/paddle/fluid/platform/profiler.h @@ -117,7 +117,16 @@ struct RecordBlock { std::vector> GetAllEvents(); // Candidate keys to sort the profiling report -enum EventSortingKey { kDefault, kCalls, kTotal, kMin, kMax, kAve }; +enum EventSortingKey { + kDefault, + kCalls, + kTotal, + kMin, + kMax, + kAve, + kCPUTime, + kGPUTime +}; // Enable the profiling function. void EnableProfiler(ProfilerState state); From ee2321debd803037da29656c7d6e437fdaac036b Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 22 Feb 2019 16:33:03 +0800 Subject: [PATCH 117/117] Revert 15770 develop a6910f900 gelu mkl opt (#15872) * Revert "Optimze Gelu with MKL Erf function (#15770)" This reverts commit 676995c86cb4b49f9a41c7a32c5e054b16201753. * test=develop --- cmake/external/mklml.cmake | 6 ++---- paddle/fluid/operators/activation_op.h | 22 ---------------------- paddle/fluid/operators/math/blas.h | 8 -------- paddle/fluid/operators/math/blas_impl.h | 23 ----------------------- paddle/fluid/platform/dynload/mklml.h | 2 -- 5 files changed, 2 insertions(+), 59 deletions(-) diff --git a/cmake/external/mklml.cmake b/cmake/external/mklml.cmake index 32a9368a9f..54826cedb8 100644 --- a/cmake/external/mklml.cmake +++ b/cmake/external/mklml.cmake @@ -39,10 +39,8 @@ IF(WIN32) SET(MKLML_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5md.lib) SET(MKLML_SHARED_LIB ${MKLML_LIB_DIR}/mklml.dll) SET(MKLML_SHARED_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5md.dll) -ELSE() - #TODO(intel-huying): - # Now enable Erf function in mklml library temporarily, it will be updated as offical version later. - SET(MKLML_VER "VsErf_mklml_lnx_${TIME_VERSION}" CACHE STRING "" FORCE) +ELSE() + SET(MKLML_VER "mklml_lnx_${TIME_VERSION}" CACHE STRING "" FORCE) SET(MKLML_URL "http://paddlepaddledeps.cdn.bcebos.com/${MKLML_VER}.tgz" CACHE STRING "" FORCE) SET(MKLML_LIB ${MKLML_LIB_DIR}/libmklml_intel.so) SET(MKLML_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5.so) diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index e8f5530b78..c7df3ea58a 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -11,7 +11,6 @@ limitations under the License. */ #pragma once #include -#include #include #include #include @@ -25,7 +24,6 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" -#include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/platform/float16.h" #ifdef PADDLE_WITH_MKLDNN @@ -303,28 +301,8 @@ template struct GeluFunctor : public BaseActivationFunctor { template void operator()(Device d, X x, Out out) const { -// Because the execute or device context can not be deliver here, it keep the -// marco for NVCC. -#if defined(PADDLE_WITH_MKLML) && !defined(_WIN32) && !defined(__APPLE__) && \ - !defined(__OSX__) && !defined(PADDLE_WITH_CUDA) - auto x_data = x.data(); - auto out_data = out.data(); - int n = std::min(x.size(), out.size()); - - std::memset(out_data, 0, n * sizeof(T)); - math::CBlas::AXPY(n, static_cast(M_SQRT1_2), x_data, 1, out_data, 1); - math::CBlas::VMERF(n, out_data, out_data, VML_LA); - for (int i = 0; i < n; i++) { - out_data[i] += static_cast(1); - } - math::CBlas::VMUL(n, x_data, out_data, out_data); - for (int i = 0; i < n; i++) { - out_data[i] *= static_cast(0.5); - } -#else auto temp = (x * static_cast(M_SQRT1_2)).erf(); out.device(d) = x * static_cast(0.5) * (static_cast(1) + temp); -#endif } }; diff --git a/paddle/fluid/operators/math/blas.h b/paddle/fluid/operators/math/blas.h index ce8109f64d..f67f57827b 100644 --- a/paddle/fluid/operators/math/blas.h +++ b/paddle/fluid/operators/math/blas.h @@ -184,9 +184,6 @@ class Blas { template void VINV(int n, const T* a, T* y) const; - template - void VMERF(int n, const T* a, T* y, int64_t mode) const; - private: const DeviceContext& context_; }; @@ -293,11 +290,6 @@ class BlasT : private Blas { Base()->template VINV(args...); } - template - void VMERF(ARGS... args) const { - Base()->template VMERF(args...); - } - private: const Blas* Base() const { return static_cast*>(this); diff --git a/paddle/fluid/operators/math/blas_impl.h b/paddle/fluid/operators/math/blas_impl.h index ba995dabec..972366bc09 100644 --- a/paddle/fluid/operators/math/blas_impl.h +++ b/paddle/fluid/operators/math/blas_impl.h @@ -123,11 +123,6 @@ struct CBlas { static void VINV(ARGS... args) { platform::dynload::vsInv(args...); } - - template - static void VMERF(ARGS... args) { - platform::dynload::vmsErf(args...); - } }; template <> @@ -228,11 +223,6 @@ struct CBlas { static void VINV(ARGS... args) { platform::dynload::vdInv(args...); } - - template - static void VMERF(ARGS... args) { - platform::dynload::vmdErf(args...); - } }; #else @@ -635,19 +625,6 @@ void Blas::VINV(int n, const T *a, T *y) const { #endif } -template <> -template -void Blas::VMERF(int n, const T *a, T *y, - int64_t mode) const { -#ifdef PADDLE_WITH_MKLML - CBlas::VMERF(n, a, y, mode); -#else - for (int i = 0; i < n; ++i) { - y[i] = std::erf(a[i]); - } -#endif -} - } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/fluid/platform/dynload/mklml.h b/paddle/fluid/platform/dynload/mklml.h index a5b846f500..a260cda491 100644 --- a/paddle/fluid/platform/dynload/mklml.h +++ b/paddle/fluid/platform/dynload/mklml.h @@ -86,8 +86,6 @@ extern void* mklml_dso_handle; __macro(vdPowx); \ __macro(vsInv); \ __macro(vdInv); \ - __macro(vmsErf); \ - __macro(vmdErf); \ __macro(MKL_Set_Num_Threads) MKLML_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_MKLML_WRAP);