From 29ceb9312611be6fc83b3c673ae76737a090cc05 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 14 Jan 2019 15:31:15 +0800 Subject: [PATCH 01/46] Use malloc and free in JeMalloc test=develop --- .../memory/allocation/legacy_allocator.cc | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/paddle/fluid/memory/allocation/legacy_allocator.cc b/paddle/fluid/memory/allocation/legacy_allocator.cc index 64aa63ffe9..cf6d351a41 100644 --- a/paddle/fluid/memory/allocation/legacy_allocator.cc +++ b/paddle/fluid/memory/allocation/legacy_allocator.cc @@ -13,8 +13,14 @@ // limitations under the License. #include "paddle/fluid/memory/allocation/legacy_allocator.h" + #include #include + +#ifdef WITH_JEMALLOC +#include +#endif + #include "glog/logging.h" #include "paddle/fluid/memory/detail/buddy_allocator.h" #include "paddle/fluid/memory/detail/system_allocator.h" @@ -89,7 +95,11 @@ struct NaiveAllocator { template <> void *Alloc(const platform::CPUPlace &place, size_t size) { VLOG(10) << "Allocate " << size << " bytes on " << platform::Place(place); +#ifdef WITH_JEMALLOC + void *p = malloc(size); +#else void *p = GetCPUBuddyAllocator()->Alloc(size); +#endif if (FLAGS_init_allocated_mem) { memset(p, 0xEF, size); } @@ -100,12 +110,21 @@ void *Alloc(const platform::CPUPlace &place, size_t size) { template <> void Free(const platform::CPUPlace &place, void *p) { VLOG(10) << "Free pointer=" << p << " on " << platform::Place(place); +#ifdef WITH_JEMALLOC + free(p); +#else GetCPUBuddyAllocator()->Free(p); +#endif } template <> size_t Used(const platform::CPUPlace &place) { +#ifdef WITH_JEMALLOC + // fake the result of used memory when WITH_JEMALLOC is ON + return 0U; +#else return GetCPUBuddyAllocator()->Used(); +#endif } #ifdef PADDLE_WITH_CUDA From 200776bdf09ecfc3c5870ece64031bf9aa93417e Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Wed, 16 Jan 2019 08:10:49 +0000 Subject: [PATCH 02/46] add simple rnn --- python/paddle/fluid/imperative/nn.py | 32 +++++++++++++++++++ .../fluid/tests/unittests/test_imperative.py | 16 ++++++++++ 2 files changed, 48 insertions(+) diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 8754e5d4d0..ef1d28e59e 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -248,3 +248,35 @@ class FC(layers.Layer): outputs={"Out": out}, attrs={"use_mkldnn": False}) return out + + +class SimpleRNNCell(layers.Layer): + def __init__(self, step_input_size, hidden_size, output_size, param_attr): + self.input_size = step_input_size + self.hidden_size = hidden_size + self.output_size = output_size + from ..layer_helper import LayerHelper + self._helper = LayerHelper('SimpleRNNCell', param_attr=param_attr) + + def _build_once(self, inputs): + i2h_param_shape = [self.step_input_size, self.hidden_size] + h2h_param_shape = [self.hidden_size, self.hidden_size] + h2o_param_shape = [self.output_size, self.hidden_size] + self._i2h_w = self._helper.create_parameter( + attr=self._helper.param_attr, + shape=i2h_param_shape, + dtype=self._dtype, + is_bias=False) + self._h2h_w = self._helper.create_parameter( + attr=self._helper.param_attr, + shape=h2h_param_shape, + dtype=self._dtype, + is_bias=False) + self._h2o_w = self._helper.create_parameter( + attr=self._helper.param_attr, + shape=h2o_param_shape, + dtype=self._dtype, + is_bias=False) + + def forward(self, inputs): + return 1 diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index 86baff3c58..915b2921d7 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -80,6 +80,19 @@ class MLP(fluid.imperative.Layer): return x +class SimpleRNN(fluid.imperative.Layer): + def __init__(self, inputs): + super(SimpleRNN, self).__init__() + self.seq_len = input.shape[0] + self._fc1 = FC(3, + fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1))) + + def forward(self, inputs): + for i in range(self.seq_len): + x = self._fc1(inputs[i]) + + class TestImperative(unittest.TestCase): def test_layer(self): with fluid.imperative.guard(): @@ -210,6 +223,9 @@ class TestImperative(unittest.TestCase): self.assertTrue(np.allclose(dy_out, static_out)) self.assertTrue(np.allclose(dy_grad, static_grad)) + def test_rnn_ptb(self): + np_inp = np.arrary([]) + if __name__ == '__main__': unittest.main() From a360f1436b81c2fef3900cda6f053a5ad1a16ba4 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Thu, 17 Jan 2019 02:31:17 +0000 Subject: [PATCH 03/46] little change --- python/paddle/fluid/imperative/nn.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index ef1d28e59e..24f1865f3d 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -251,10 +251,16 @@ class FC(layers.Layer): class SimpleRNNCell(layers.Layer): - def __init__(self, step_input_size, hidden_size, output_size, param_attr): + def __init__(self, + step_input_size, + hidden_size, + output_size, + param_attr, + dtype=core.VarDesc.VarType.FP32): self.input_size = step_input_size self.hidden_size = hidden_size self.output_size = output_size + self._dype = core.VarDesc.VarType.FP32 from ..layer_helper import LayerHelper self._helper = LayerHelper('SimpleRNNCell', param_attr=param_attr) @@ -279,4 +285,19 @@ class SimpleRNNCell(layers.Layer): is_bias=False) def forward(self, inputs): + input = inputs[0] + pre_hidden = inputs[1] + out = self._helper.create_variable_for_type_inference(self._dtype) + hidden = self._helper.create_variable_for_type_inference(self._dype) + + self._helper.append_op( + type="mul", + inputs={"X": input, + "Y": self._w}, + outputs={"Out": out}, + attrs={ + "x_num_col_dims": self._num_flatten_dims, + "y_num_col_dims": 1 + }) + return 1 From 88ee56d0b2b2730149fcd1170ffebfa9176f585e Mon Sep 17 00:00:00 2001 From: jerrywgz Date: Fri, 18 Jan 2019 07:53:33 +0000 Subject: [PATCH 04/46] enhance nms for mask rcnn --- paddle/fluid/operators/detection/bbox_util.h | 20 ++ .../operators/detection/multiclass_nms_op.cc | 290 ++++++++++++------ .../tests/unittests/test_multiclass_nms_op.py | 173 +++++++++-- 3 files changed, 371 insertions(+), 112 deletions(-) diff --git a/paddle/fluid/operators/detection/bbox_util.h b/paddle/fluid/operators/detection/bbox_util.h index 6abeca1da4..0270ca77f3 100644 --- a/paddle/fluid/operators/detection/bbox_util.h +++ b/paddle/fluid/operators/detection/bbox_util.h @@ -93,5 +93,25 @@ void BboxOverlaps(const framework::Tensor& r_boxes, } } +template +void SliceOneClass(const platform::DeviceContext& ctx, + const framework::Tensor& items, const int class_id, + framework::Tensor* one_class_item) { + T* item_data = one_class_item->mutable_data(ctx.GetPlace()); + const T* items_data = items.data(); + const int64_t num_item = items.dims()[0]; + const int class_num = items.dims()[1]; + int item_size = 1; + if (items.dims().size() == 3) { + item_size = items.dims()[2]; + } + for (int i = 0; i < num_item; ++i) { + for (int j = 0; j < item_size; ++j) { + item_data[i * item_size + j] = + items_data[i * class_num * item_size + class_id * item_size + j]; + } + } +} + } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/detection/multiclass_nms_op.cc b/paddle/fluid/operators/detection/multiclass_nms_op.cc index 2395b18148..680754dded 100644 --- a/paddle/fluid/operators/detection/multiclass_nms_op.cc +++ b/paddle/fluid/operators/detection/multiclass_nms_op.cc @@ -1,18 +1,16 @@ /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - limitations under the License. */ +#include #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detection/bbox_util.h" #include "paddle/fluid/operators/detection/poly_util.h" namespace paddle { @@ -35,30 +33,45 @@ class MultiClassNMSOp : public framework::OperatorWithKernel { auto box_dims = ctx->GetInputDim("BBoxes"); auto score_dims = ctx->GetInputDim("Scores"); + auto score_size = score_dims.size(); if (ctx->IsRuntime()) { + PADDLE_ENFORCE(score_size == 2 || score_size == 3, + "The rank of Input(Scores) must be 2 or 3"); PADDLE_ENFORCE_EQ(box_dims.size(), 3, - "The rank of Input(BBoxes) must be 3."); - PADDLE_ENFORCE_EQ(score_dims.size(), 3, - "The rank of Input(Scores) must be 3."); - PADDLE_ENFORCE(box_dims[2] == 4 || box_dims[2] == 8 || - box_dims[2] == 16 || box_dims[2] == 24 || - box_dims[2] == 32, - "The 2nd dimension of Input(BBoxes) must be 4 or 8, " - "represents the layout of coordinate " - "[xmin, ymin, xmax, ymax] or " - "4 points: [x1, y1, x2, y2, x3, y3, x4, y4] or " - "8 points: [xi, yi] i= 1,2,...,8 or " - "12 points: [xi, yi] i= 1,2,...,12 or " - "16 points: [xi, yi] i= 1,2,...,16"); - PADDLE_ENFORCE_EQ(box_dims[1], score_dims[2], - "The 1st dimensiong of Input(BBoxes) must be equal to " - "3rd dimension of Input(Scores), which represents the " - "predicted bboxes."); + "The rank of Input(BBoxes) must be 3"); + if (score_size == 3) { + PADDLE_ENFORCE(box_dims[2] == 4 || box_dims[2] == 8 || + box_dims[2] == 16 || box_dims[2] == 24 || + box_dims[2] == 32, + "The last dimension of Input(BBoxes) must be 4 or 8, " + "represents the layout of coordinate " + "[xmin, ymin, xmax, ymax] or " + "4 points: [x1, y1, x2, y2, x3, y3, x4, y4] or " + "8 points: [xi, yi] i= 1,2,...,8 or " + "12 points: [xi, yi] i= 1,2,...,12 or " + "16 points: [xi, yi] i= 1,2,...,16"); + PADDLE_ENFORCE_EQ( + box_dims[1], score_dims[2], + "The 2nd dimension of Input(BBoxes) must be equal to " + "last dimension of Input(Scores), which represents the " + "predicted bboxes."); + } else { + PADDLE_ENFORCE(box_dims[2] == 4, + "The last dimension of Input(BBoxes) must be 4"); + PADDLE_ENFORCE_EQ(box_dims[1], score_dims[1], + "The 2nd dimension of Input(BBoxes)" + "must be equal to the 2nd dimension" + " of Input(Scores)"); + } } // Here the box_dims[0] is not the real dimension of output. // It will be rewritten in the computing kernel. - ctx->SetOutputDim("Out", {box_dims[1], box_dims[2] + 2}); + if (score_size == 3) { + ctx->SetOutputDim("Out", {box_dims[1], box_dims[2] + 2}); + } else { + ctx->SetOutputDim("Out", {-1, box_dims[2] + 2}); + } } protected: @@ -123,8 +136,12 @@ static inline T JaccardOverlap(const T* box1, const T* box2, const T inter_ymin = std::max(box1[1], box2[1]); const T inter_xmax = std::min(box1[2], box2[2]); const T inter_ymax = std::min(box1[3], box2[3]); - const T inter_w = inter_xmax - inter_xmin; - const T inter_h = inter_ymax - inter_ymin; + T inter_w = inter_xmax - inter_xmin; + T inter_h = inter_ymax - inter_ymin; + if (!normalized) { + inter_w += 1; + inter_h += 1; + } const T inter_area = inter_w * inter_h; const T bbox1_area = BBoxArea(box1, normalized); const T bbox2_area = BBoxArea(box2, normalized); @@ -139,7 +156,7 @@ T PolyIoU(const T* box1, const T* box2, const size_t box_size, T bbox2_area = PolyArea(box2, box_size, normalized); T inter_area = PolyOverlapArea(box1, box2, box_size, normalized); if (bbox1_area == 0 || bbox2_area == 0 || inter_area == 0) { - // If coordinate values are is invalid + // If coordinate values are invalid // if area size <= 0, return 0. return T(0.); } else { @@ -152,7 +169,8 @@ class MultiClassNMSKernel : public framework::OpKernel { public: void NMSFast(const Tensor& bbox, const Tensor& scores, const T score_threshold, const T nms_threshold, const T eta, - const int64_t top_k, std::vector* selected_indices) const { + const int64_t top_k, std::vector* selected_indices, + const bool normalized) const { // The total boxes for each instance. int64_t num_boxes = bbox.dims()[0]; // 4: [xmin ymin xmax ymax] @@ -178,15 +196,16 @@ class MultiClassNMSKernel : public framework::OpKernel { T overlap = T(0.); // 4: [xmin ymin xmax ymax] if (box_size == 4) { - overlap = JaccardOverlap(bbox_data + idx * box_size, - bbox_data + kept_idx * box_size, true); + overlap = + JaccardOverlap(bbox_data + idx * box_size, + bbox_data + kept_idx * box_size, normalized); } // 8: [x1 y1 x2 y2 x3 y3 x4 y4] or 16, 24, 32 if (box_size == 8 || box_size == 16 || box_size == 24 || box_size == 32) { - overlap = - PolyIoU(bbox_data + idx * box_size, - bbox_data + kept_idx * box_size, box_size, true); + overlap = PolyIoU(bbox_data + idx * box_size, + bbox_data + kept_idx * box_size, box_size, + normalized); } keep = overlap <= adaptive_threshold; } else { @@ -205,37 +224,66 @@ class MultiClassNMSKernel : public framework::OpKernel { void MultiClassNMS(const framework::ExecutionContext& ctx, const Tensor& scores, const Tensor& bboxes, + const int scores_size, std::map>* indices, int* num_nmsed_out) const { int64_t background_label = ctx.Attr("background_label"); int64_t nms_top_k = ctx.Attr("nms_top_k"); int64_t keep_top_k = ctx.Attr("keep_top_k"); + bool normalized = ctx.Attr("normalized"); T nms_threshold = static_cast(ctx.Attr("nms_threshold")); T nms_eta = static_cast(ctx.Attr("nms_eta")); T score_threshold = static_cast(ctx.Attr("score_threshold")); + auto& dev_ctx = ctx.template device_context(); - int64_t class_num = scores.dims()[0]; - int64_t predict_dim = scores.dims()[1]; int num_det = 0; - for (int64_t c = 0; c < class_num; ++c) { - if (c == background_label) continue; - Tensor score = scores.Slice(c, c + 1); - NMSFast(bboxes, score, score_threshold, nms_threshold, nms_eta, nms_top_k, - &((*indices)[c])); - num_det += (*indices)[c].size(); + int64_t box_num = 0, class_num = 0, predict_dim = 0; + if (scores_size == 3) { + class_num = scores.dims()[0]; + predict_dim = scores.dims()[1]; + for (int64_t c = 0; c < class_num; ++c) { + if (c == background_label) continue; + Tensor score = scores.Slice(c, c + 1); + NMSFast(bboxes, score, score_threshold, nms_threshold, nms_eta, + nms_top_k, &((*indices)[c]), normalized); + num_det += (*indices)[c].size(); + } + } else { + box_num = scores.dims()[0]; + class_num = scores.dims()[1]; + Tensor score; + score.Resize({box_num, 1}); + Tensor bbox; + bbox.Resize({box_num, 4}); + for (int64_t c = 0; c < class_num; ++c) { + if (c == background_label) continue; + SliceOneClass(dev_ctx, scores, c, &score); + SliceOneClass(dev_ctx, bboxes, c, &bbox); + NMSFast(bbox, score, score_threshold, nms_threshold, nms_eta, nms_top_k, + &((*indices)[c]), normalized); + std::stable_sort((*indices)[c].begin(), (*indices)[c].end()); + num_det += (*indices)[c].size(); + } } *num_nmsed_out = num_det; const T* scores_data = scores.data(); if (keep_top_k > -1 && num_det > keep_top_k) { + const T* sdata; std::vector>> score_index_pairs; for (const auto& it : *indices) { int label = it.first; - const T* sdata = scores_data + label * predict_dim; + if (scores_size == 3) { + sdata = scores_data + label * predict_dim; + } else { + Tensor score; + score.Resize({box_num, 1}); + SliceOneClass(dev_ctx, scores, label, &score); + sdata = score.data(); + } const std::vector& label_indices = it.second; for (size_t j = 0; j < label_indices.size(); ++j) { int idx = label_indices[j]; - PADDLE_ENFORCE_LT(idx, predict_dim); score_index_pairs.push_back( std::make_pair(sdata[idx], std::make_pair(label, idx))); } @@ -252,31 +300,55 @@ class MultiClassNMSKernel : public framework::OpKernel { int idx = score_index_pairs[j].second.second; new_indices[label].push_back(idx); } + if (scores_size == 2) { + for (const auto& it : new_indices) { + int label = it.first; + std::stable_sort(new_indices[label].begin(), + new_indices[label].end()); + } + } new_indices.swap(*indices); *num_nmsed_out = keep_top_k; } } - void MultiClassOutput(const Tensor& scores, const Tensor& bboxes, + void MultiClassOutput(const platform::DeviceContext& ctx, + const Tensor& scores, const Tensor& bboxes, const std::map>& selected_indices, - Tensor* outs) const { + const int scores_size, Tensor* outs) const { + int64_t class_num = scores.dims()[1]; int64_t predict_dim = scores.dims()[1]; int64_t box_size = bboxes.dims()[1]; - int64_t out_dim = bboxes.dims()[1] + 2; + if (scores_size == 2) { + box_size = bboxes.dims()[2]; + } + int64_t out_dim = box_size + 2; auto* scores_data = scores.data(); auto* bboxes_data = bboxes.data(); auto* odata = outs->data(); - + const T* sdata; + Tensor bbox; + bbox.Resize({scores.dims()[0], box_size}); int count = 0; for (const auto& it : selected_indices) { int label = it.first; - const T* sdata = scores_data + label * predict_dim; const std::vector& indices = it.second; + if (scores_size == 2) { + SliceOneClass(ctx, bboxes, label, &bbox); + } else { + sdata = scores_data + label * predict_dim; + } for (size_t j = 0; j < indices.size(); ++j) { int idx = indices[j]; - const T* bdata = bboxes_data + idx * box_size; - odata[count * out_dim] = label; // label - odata[count * out_dim + 1] = sdata[idx]; // score + odata[count * out_dim] = label; // label + const T* bdata; + if (scores_size == 3) { + bdata = bboxes_data + idx * box_size; + odata[count * out_dim + 1] = sdata[idx]; // score + } else { + bdata = bbox.data() + idx * box_size; + odata[count * out_dim + 1] = *(scores_data + idx * class_num + label); + } // xmin, ymin, xmax, ymax or multi-points coordinates std::memcpy(odata + count * out_dim + 2, bdata, box_size * sizeof(T)); count++; @@ -285,40 +357,23 @@ class MultiClassNMSKernel : public framework::OpKernel { } void Compute(const framework::ExecutionContext& ctx) const override { - auto* boxes = ctx.Input("BBoxes"); - auto* scores = ctx.Input("Scores"); + auto* boxes = ctx.Input("BBoxes"); + auto* scores = ctx.Input("Scores"); auto* outs = ctx.Output("Out"); auto score_dims = scores->dims(); - - int64_t batch_size = score_dims[0]; int64_t class_num = score_dims[1]; - int64_t predict_dim = score_dims[2]; - int64_t box_dim = boxes->dims()[2]; - int64_t out_dim = boxes->dims()[2] + 2; + auto& dev_ctx = ctx.template device_context(); std::vector>> all_indices; std::vector batch_starts = {0}; - for (int64_t i = 0; i < batch_size; ++i) { - Tensor ins_score = scores->Slice(i, i + 1); - ins_score.Resize({class_num, predict_dim}); - - Tensor ins_boxes = boxes->Slice(i, i + 1); - ins_boxes.Resize({predict_dim, box_dim}); - - std::map> indices; - int num_nmsed_out = 0; - MultiClassNMS(ctx, ins_score, ins_boxes, &indices, &num_nmsed_out); - all_indices.push_back(indices); - batch_starts.push_back(batch_starts.back() + num_nmsed_out); - } - - int num_kept = batch_starts.back(); - if (num_kept == 0) { - T* od = outs->mutable_data({1}, ctx.GetPlace()); - od[0] = -1; - } else { - outs->mutable_data({num_kept, out_dim}, ctx.GetPlace()); + int64_t batch_size = score_dims[0]; + int64_t predict_dim = 0; + int64_t box_dim = boxes->dims()[2]; + int64_t out_dim = box_dim + 2; + int num_nmsed_out = 0; + if (score_dims.size() == 3) { + predict_dim = score_dims[2]; for (int64_t i = 0; i < batch_size; ++i) { Tensor ins_score = scores->Slice(i, i + 1); ins_score.Resize({class_num, predict_dim}); @@ -326,17 +381,69 @@ class MultiClassNMSKernel : public framework::OpKernel { Tensor ins_boxes = boxes->Slice(i, i + 1); ins_boxes.Resize({predict_dim, box_dim}); - int64_t s = batch_starts[i]; - int64_t e = batch_starts[i + 1]; - if (e > s) { - Tensor out = outs->Slice(s, e); - MultiClassOutput(ins_score, ins_boxes, all_indices[i], &out); + std::map> indices; + MultiClassNMS(ctx, ins_score, ins_boxes, score_dims.size(), &indices, + &num_nmsed_out); + all_indices.push_back(indices); + batch_starts.push_back(batch_starts.back() + num_nmsed_out); + } + } else { + auto boxes_lod = boxes->lod().back(); + int64_t n = static_cast(boxes_lod.size() - 1); + for (int i = 0; i < n; ++i) { + Tensor boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]); + Tensor scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]); + std::map> indices; + MultiClassNMS(ctx, scores_slice, boxes_slice, score_dims.size(), + &indices, &num_nmsed_out); + all_indices.push_back(indices); + batch_starts.push_back(batch_starts.back() + num_nmsed_out); + } + } + + int num_kept = batch_starts.back(); + if (num_kept == 0) { + T* od = outs->mutable_data({1, 1}, ctx.GetPlace()); + od[0] = -1; + batch_starts.back() = 1; + } else { + outs->mutable_data({num_kept, out_dim}, ctx.GetPlace()); + if (score_dims.size() == 3) { + for (int64_t i = 0; i < batch_size; ++i) { + Tensor ins_score = scores->Slice(i, i + 1); + ins_score.Resize({class_num, predict_dim}); + + Tensor ins_boxes = boxes->Slice(i, i + 1); + ins_boxes.Resize({predict_dim, box_dim}); + + int64_t s = batch_starts[i]; + int64_t e = batch_starts[i + 1]; + if (e > s) { + Tensor out = outs->Slice(s, e); + MultiClassOutput(dev_ctx, ins_score, ins_boxes, all_indices[i], + score_dims.size(), &out); + } + } + } else { + auto boxes_lod = boxes->lod().back(); + int64_t n = static_cast(boxes_lod.size() - 1); + for (int i = 0; i < n; ++i) { + Tensor boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]); + Tensor scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]); + int64_t s = batch_starts[i]; + int64_t e = batch_starts[i + 1]; + if (e > s) { + Tensor out = outs->Slice(s, e); + MultiClassOutput(dev_ctx, scores_slice, boxes_slice, all_indices[i], + score_dims.size(), &out); + } } } } framework::LoD lod; lod.emplace_back(batch_starts); + LOG(ERROR) << "c++ lod: " << lod; outs->set_lod(lod); } @@ -346,17 +453,23 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { AddInput("BBoxes", - "(Tensor) A 3-D Tensor with shape " + "Two types of bboxes are supported:" + "1. (Tensor) A 3-D Tensor with shape " "[N, M, 4 or 8 16 24 32] represents the " "predicted locations of M bounding bboxes, N is the batch size. " "Each bounding box has four coordinate values and the layout is " - "[xmin, ymin, xmax, ymax], when box size equals to 4."); + "[xmin, ymin, xmax, ymax], when box size equals to 4." + "2. (LoDTensor) A 3-D Tensor with shape [N, M, 4]"); AddInput("Scores", - "(Tensor) A 3-D Tensor with shape [N, C, M] represents the " + "Two types of scores are supported:" + "1. (Tensor) A 3-D Tensor with shape [N, C, M] represents the " "predicted confidence predictions. N is the batch size, C is the " "class number, M is number of bounding boxes. For each category " "there are total M scores which corresponding M bounding boxes. " - " Please note, M is equal to the 1st dimension of BBoxes. "); + " Please note, M is equal to the 1st dimension of BBoxes. " + "2. (LoDTensor) A 2-D LoDTensor with shape" + "[N, num_class]. N is the number of bbox and" + "M represents the scores of bboxes in each class."); AddAttr( "background_label", "(int, defalut: 0) " @@ -384,6 +497,10 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { "(int64_t) " "Number of total bboxes to be kept per image after NMS " "step. -1 means keeping all bboxes after NMS step."); + AddAttr("normalized", + "(bool, default false) " + "Whether detections are normalized.") + .SetDefault(true); AddOutput("Out", "(LoDTensor) A 2-D LoDTensor with shape [No, 6] represents the " "detections. Each row has 6 values: " @@ -399,17 +516,14 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( This operator is to do multi-class non maximum suppression (NMS) on a batched of boxes and scores. - In the NMS step, this operator greedily selects a subset of detection bounding boxes that have high scores larger than score_threshold, if providing this threshold, then selects the largest nms_top_k confidences scores if nms_top_k is larger than -1. Then this operator pruns away boxes that have high IOU (intersection over union) overlap with already selected boxes by adaptive threshold NMS based on parameters of nms_threshold and nms_eta. - Aftern NMS step, at most keep_top_k number of total bboxes are to be kept per image if keep_top_k is larger than -1. - This operator support multi-class and batched inputs. It applying NMS independently for each class. The outputs is a 2-D LoDTenosr, for each image, the offsets in first dimension of LoDTensor are called LoD, the number diff --git a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py index 9778bd694d..af36bcfaa0 100644 --- a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py @@ -19,7 +19,7 @@ import copy from op_test import OpTest -def iou(box_a, box_b): +def iou(box_a, box_b, normalized): """Apply intersection-over-union overlap between box_a and box_b """ xmin_a = min(box_a[0], box_a[2]) @@ -32,8 +32,10 @@ def iou(box_a, box_b): xmax_b = max(box_b[0], box_b[2]) ymax_b = max(box_b[1], box_b[3]) - area_a = (ymax_a - ymin_a) * (xmax_a - xmin_a) - area_b = (ymax_b - ymin_b) * (xmax_b - xmin_b) + area_a = (ymax_a - ymin_a + (normalized == False)) * \ + (xmax_a - xmin_a + (normalized == False)) + area_b = (ymax_b - ymin_b + (normalized == False)) * \ + (xmax_b - xmin_b + (normalized == False)) if area_a <= 0 and area_b <= 0: return 0.0 @@ -42,17 +44,21 @@ def iou(box_a, box_b): xb = min(xmax_a, xmax_b) yb = min(ymax_a, ymax_b) - inter_area = max(xb - xa, 0.0) * max(yb - ya, 0.0) - - box_a_area = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]) - box_b_area = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]) + inter_area = max(xb - xa + (normalized == False), 0.0) * \ + max(yb - ya + (normalized == False), 0.0) iou_ratio = inter_area / (area_a + area_b - inter_area) return iou_ratio -def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0): +def nms(boxes, + scores, + score_threshold, + nms_threshold, + top_k=200, + normalized=True, + eta=1.0): """Apply non-maximum suppression at test time to avoid detecting too many overlapping bounding boxes for a given object. Args: @@ -87,7 +93,7 @@ def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0): for k in range(len(selected_indices)): if keep: kept_idx = selected_indices[k] - overlap = iou(boxes[idx], boxes[kept_idx]) + overlap = iou(boxes[idx], boxes[kept_idx], normalized) keep = True if overlap <= adaptive_threshold else False else: break @@ -99,16 +105,24 @@ def nms(boxes, scores, score_threshold, nms_threshold, top_k=200, eta=1.0): def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, - nms_top_k, keep_top_k): - class_num = scores.shape[0] - priorbox_num = scores.shape[1] + nms_top_k, keep_top_k, normalized, shared): + if shared: + class_num = scores.shape[0] + priorbox_num = scores.shape[1] + else: + box_num = scores.shape[0] + class_num = scores.shape[1] selected_indices = {} num_det = 0 for c in range(class_num): if c == background: continue - indices = nms(boxes, scores[c], score_threshold, nms_threshold, - nms_top_k) + if shared: + indices = nms(boxes, scores[c], score_threshold, nms_threshold, + nms_top_k, normalized) + else: + indices = nms(boxes[:, c, :], scores[:, c], score_threshold, + nms_threshold, nms_top_k, normalized) selected_indices[c] = indices num_det += len(indices) @@ -116,7 +130,10 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, score_index = [] for c, indices in selected_indices.items(): for idx in indices: - score_index.append((scores[c][idx], c, idx)) + if shared: + score_index.append((scores[c][idx], c, idx)) + else: + score_index.append((scores[idx][c], c, idx)) sorted_score_index = sorted( score_index, key=lambda tup: tup[0], reverse=True) @@ -127,24 +144,74 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, selected_indices[c] = [] for s, c, idx in sorted_score_index: selected_indices[c].append(idx) + if not shared: + for labels in selected_indices: + selected_indices[labels].sort() num_det = keep_top_k return selected_indices, num_det -def batched_multiclass_nms(boxes, scores, background, score_threshold, - nms_threshold, nms_top_k, keep_top_k): +def lod_multiclass_nms(boxes, scores, background, score_threshold, + nms_threshold, nms_top_k, keep_top_k, box_lod, + normalized): + det_outs = [] + lod = [] + head = 0 + for n in range(len(box_lod[0])): + box = boxes[head:head + box_lod[0][n]] + score = scores[head:head + box_lod[0][n]] + head = head + box_lod[0][n] + nmsed_outs, nmsed_num = multiclass_nms( + box, + score, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + normalized, + shared=False) + if nmsed_num == 0: + lod.append(1) + continue + lod.append(nmsed_num) + for c, indices in nmsed_outs.items(): + for idx in indices: + xmin, ymin, xmax, ymax = box[idx, c, :] + det_outs.append([c, score[idx][c], xmin, ymin, xmax, ymax]) + + return det_outs, lod + + +def batched_multiclass_nms(boxes, + scores, + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + normalized=True): batch_size = scores.shape[0] det_outs = [] lod = [] for n in range(batch_size): - nmsed_outs, nmsed_num = multiclass_nms(boxes[n], scores[n], background, - score_threshold, nms_threshold, - nms_top_k, keep_top_k) - lod.append(nmsed_num) - if nmsed_num == 0: continue + nmsed_outs, nmsed_num = multiclass_nms( + boxes[n], + scores[n], + background, + score_threshold, + nms_threshold, + nms_top_k, + keep_top_k, + normalized, + shared=True) + if nmsed_num == 0: + lod.append(1) + continue + lod.append(nmsed_num) tmp_det_out = [] for c, indices in nmsed_outs.items(): for idx in indices: @@ -168,7 +235,6 @@ class TestMulticlassNMSOp(OpTest): M = 1200 C = 21 BOX_SIZE = 4 - background = 0 nms_threshold = 0.3 nms_top_k = 400 @@ -193,6 +259,7 @@ class TestMulticlassNMSOp(OpTest): nmsed_outs, lod = batched_multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, nms_top_k, keep_top_k) + print('python lod: ', lod) nmsed_outs = [-1] if not nmsed_outs else nmsed_outs nmsed_outs = np.array(nmsed_outs).astype('float32') @@ -206,6 +273,7 @@ class TestMulticlassNMSOp(OpTest): 'keep_top_k': keep_top_k, 'score_threshold': score_threshold, 'nms_eta': 1.0, + 'normalized': True, } def test_check_output(self): @@ -219,13 +287,70 @@ class TestMulticlassNMSOpNoOutput(TestMulticlassNMSOp): self.score_threshold = 2.0 +class TestMulticlassNMSLoDInput(OpTest): + def set_argument(self): + self.score_threshold = 0.01 + + def setUp(self): + self.set_argument() + M = 1200 + C = 21 + BOX_SIZE = 4 + box_lod = [[1200]] + background = 0 + nms_threshold = 0.3 + nms_top_k = 400 + keep_top_k = 200 + score_threshold = self.score_threshold + normalized = False + + scores = np.random.random((M, C)).astype('float32') + + def softmax(x): + shiftx = x - np.max(x).clip(-64.) + exps = np.exp(shiftx) + return exps / np.sum(exps) + + scores = np.apply_along_axis(softmax, 1, scores) + + boxes = np.random.random((M, C, BOX_SIZE)).astype('float32') + boxes[:, :, 0] = boxes[:, :, 0] * 10 + boxes[:, :, 1] = boxes[:, :, 1] * 10 + boxes[:, :, 2] = boxes[:, :, 2] * 10 + 10 + boxes[:, :, 3] = boxes[:, :, 3] * 10 + 10 + + nmsed_outs, lod = lod_multiclass_nms( + boxes, scores, background, score_threshold, nms_threshold, + nms_top_k, keep_top_k, box_lod, normalized) + nmsed_outs = [-1] if not nmsed_outs else nmsed_outs + nmsed_outs = np.array(nmsed_outs).astype('float32') + self.op_type = 'multiclass_nms' + self.inputs = { + 'BBoxes': (boxes, box_lod), + 'Scores': (scores, box_lod), + } + self.outputs = {'Out': (nmsed_outs, [lod])} + self.attrs = { + 'background_label': 0, + 'nms_threshold': nms_threshold, + 'nms_top_k': nms_top_k, + 'keep_top_k': keep_top_k, + 'score_threshold': score_threshold, + 'nms_eta': 1.0, + 'normalized': normalized, + } + + def test_check_output(self): + self.check_output() + + class TestIOU(unittest.TestCase): def test_iou(self): box1 = np.array([4.0, 3.0, 7.0, 5.0]).astype('float32') box2 = np.array([3.0, 4.0, 6.0, 8.0]).astype('float32') expt_output = np.array([2.0 / 16.0]).astype('float32') - calc_output = np.array([iou(box1, box2)]).astype('float32') + calc_output = np.array([iou(box1, box2, True)]).astype('float32') self.assertTrue(np.allclose(calc_output, expt_output)) From f660553d7781c065ef61d09ca136373d7c983f0f Mon Sep 17 00:00:00 2001 From: jerrywgz Date: Fri, 18 Jan 2019 08:41:27 +0000 Subject: [PATCH 05/46] enhance nms for mask rcnn, test=develop --- paddle/fluid/operators/detection/multiclass_nms_op.cc | 3 +-- .../fluid/tests/unittests/test_multiclass_nms_op.py | 10 ++++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/operators/detection/multiclass_nms_op.cc b/paddle/fluid/operators/detection/multiclass_nms_op.cc index 680754dded..14ce9937dc 100644 --- a/paddle/fluid/operators/detection/multiclass_nms_op.cc +++ b/paddle/fluid/operators/detection/multiclass_nms_op.cc @@ -405,7 +405,7 @@ class MultiClassNMSKernel : public framework::OpKernel { if (num_kept == 0) { T* od = outs->mutable_data({1, 1}, ctx.GetPlace()); od[0] = -1; - batch_starts.back() = 1; + batch_starts = {0, 1}; } else { outs->mutable_data({num_kept, out_dim}, ctx.GetPlace()); if (score_dims.size() == 3) { @@ -443,7 +443,6 @@ class MultiClassNMSKernel : public framework::OpKernel { framework::LoD lod; lod.emplace_back(batch_starts); - LOG(ERROR) << "c++ lod: " << lod; outs->set_lod(lod); } diff --git a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py index af36bcfaa0..2a50e0bd85 100644 --- a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py @@ -173,13 +173,15 @@ def lod_multiclass_nms(boxes, scores, background, score_threshold, normalized, shared=False) if nmsed_num == 0: - lod.append(1) + #lod.append(1) continue lod.append(nmsed_num) for c, indices in nmsed_outs.items(): for idx in indices: xmin, ymin, xmax, ymax = box[idx, c, :] det_outs.append([c, score[idx][c], xmin, ymin, xmax, ymax]) + if len(lod) == 0: + lod.append(1) return det_outs, lod @@ -208,7 +210,7 @@ def batched_multiclass_nms(boxes, normalized, shared=True) if nmsed_num == 0: - lod.append(1) + # lod.append(1) continue lod.append(nmsed_num) @@ -221,7 +223,8 @@ def batched_multiclass_nms(boxes, sorted_det_out = sorted( tmp_det_out, key=lambda tup: tup[0], reverse=False) det_outs.extend(sorted_det_out) - + if len(lod) == 0: + lod += [1] return det_outs, lod @@ -259,7 +262,6 @@ class TestMulticlassNMSOp(OpTest): nmsed_outs, lod = batched_multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, nms_top_k, keep_top_k) - print('python lod: ', lod) nmsed_outs = [-1] if not nmsed_outs else nmsed_outs nmsed_outs = np.array(nmsed_outs).astype('float32') From af1cee5a3531093a035b74dca7b3dfdbce0c251b Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Fri, 18 Jan 2019 09:22:02 +0000 Subject: [PATCH 06/46] change in 1/18 --- python/paddle/fluid/imperative/nn.py | 56 +++++++++++++++---- .../fluid/tests/unittests/test_imperative.py | 6 +- 2 files changed, 46 insertions(+), 16 deletions(-) diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 24f1865f3d..bf735e8f1a 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -257,12 +257,14 @@ class SimpleRNNCell(layers.Layer): output_size, param_attr, dtype=core.VarDesc.VarType.FP32): + super(SimpleRNNCell, self).__init__() self.input_size = step_input_size self.hidden_size = hidden_size self.output_size = output_size self._dype = core.VarDesc.VarType.FP32 from ..layer_helper import LayerHelper - self._helper = LayerHelper('SimpleRNNCell', param_attr=param_attr) + self._helper = LayerHelper( + 'SimpleRNNCell', act="tanh", param_attr=param_attr) def _build_once(self, inputs): i2h_param_shape = [self.step_input_size, self.hidden_size] @@ -284,20 +286,50 @@ class SimpleRNNCell(layers.Layer): dtype=self._dtype, is_bias=False) - def forward(self, inputs): - input = inputs[0] - pre_hidden = inputs[1] - out = self._helper.create_variable_for_type_inference(self._dtype) - hidden = self._helper.create_variable_for_type_inference(self._dype) + def forward(self, input, pre_hidden): + tmp_i2h = self._helper.create_variable_for_type_inference(self._dtype) + tmp_h2h = self._helper.create_variable_for_type_inference(self._dtype) + hidden = self._helper.create_variable_for_type_inference(self._dype) + out = self._helper.create_variable_for_type_inference(self._dype) + softmax_out = self._helper.create_variable_for_type_inference( + self._dtype) self._helper.append_op( type="mul", inputs={"X": input, - "Y": self._w}, + "Y": self._i2h_w}, + outputs={"Out": tmp_i2h}, + attrs={"x_num_col_dims": 1, + "y_num_col_dims": 1}) + + self._helper.append_op( + type="mul", + inputs={"X": pre_hidden, + "Y": self._h2h_w}, + outputs={"Out": tmp_h2h}, + attrs={"x_num_col_dims": 1, + "y_num_col_dims": 1}) + + self._helper.append_op( + type='sum', + inputs={'X': [tmp_i2h, tmp_h2h]}, + outputs={'Out': hidden}, + attrs={'use_mkldnn': False}) + + hidden = self._helper.append_activation(hidden) + + self._helper.append_op( + type="mul", + inputs={"X": hidden, + "Y": self._h2o_w}, outputs={"Out": out}, - attrs={ - "x_num_col_dims": self._num_flatten_dims, - "y_num_col_dims": 1 - }) + attrs={"x_num_col_dims": 1, + "y_num_col_dims": 1}) + + self._helper.append_op( + type="softmax", + inputs={"X": out}, + outputs={"Out": softmax_out}, + attrs={"use_cudnn": False}) - return 1 + return softmax_out, hidden diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index a578867a3d..3c9893bdda 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -19,7 +19,7 @@ import sys import paddle.fluid as fluid from paddle.fluid import core -from paddle.fluid.imperative.nn import FC +from paddle.fluid.imperative.nn import FC, SimpleRNNCell from test_imperative_base import new_program_scope @@ -70,9 +70,7 @@ class SimpleRNN(fluid.imperative.Layer): def __init__(self, inputs): super(SimpleRNN, self).__init__() self.seq_len = input.shape[0] - self._fc1 = FC(3, - fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=0.1))) + self.cell = SimpleRNNCell(input.shape[1], out) def forward(self, inputs): for i in range(self.seq_len): From b62a17bbae254c0b96169cab0129dd942ff19083 Mon Sep 17 00:00:00 2001 From: jerrywgz Date: Fri, 18 Jan 2019 10:01:47 +0000 Subject: [PATCH 07/46] add nms api --- .../operators/detection/multiclass_nms_op.cc | 8 ++--- python/paddle/fluid/layers/detection.py | 35 +++++++++++++++++++ python/paddle/fluid/tests/test_detection.py | 11 ++++++ 3 files changed, 50 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/operators/detection/multiclass_nms_op.cc b/paddle/fluid/operators/detection/multiclass_nms_op.cc index 14ce9937dc..c61e3e1338 100644 --- a/paddle/fluid/operators/detection/multiclass_nms_op.cc +++ b/paddle/fluid/operators/detection/multiclass_nms_op.cc @@ -458,7 +458,8 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { "predicted locations of M bounding bboxes, N is the batch size. " "Each bounding box has four coordinate values and the layout is " "[xmin, ymin, xmax, ymax], when box size equals to 4." - "2. (LoDTensor) A 3-D Tensor with shape [N, M, 4]"); + "2. (LoDTensor) A 3-D Tensor with shape [N, M, 4]" + "N is the number of boxes, M is the class number"); AddInput("Scores", "Two types of scores are supported:" "1. (Tensor) A 3-D Tensor with shape [N, C, M] represents the " @@ -467,8 +468,7 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { "there are total M scores which corresponding M bounding boxes. " " Please note, M is equal to the 1st dimension of BBoxes. " "2. (LoDTensor) A 2-D LoDTensor with shape" - "[N, num_class]. N is the number of bbox and" - "M represents the scores of bboxes in each class."); + "[N, num_class]. N is the number of bbox"); AddAttr( "background_label", "(int, defalut: 0) " @@ -497,7 +497,7 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { "Number of total bboxes to be kept per image after NMS " "step. -1 means keeping all bboxes after NMS step."); AddAttr("normalized", - "(bool, default false) " + "(bool, default true) " "Whether detections are normalized.") .SetDefault(true); AddOutput("Out", diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 8aed97dc59..e8ce0c1d90 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -48,6 +48,7 @@ __all__ = [ 'box_coder', 'polygon_box_transform', 'yolov3_loss', + 'multiclass_nms', ] @@ -1810,3 +1811,37 @@ def generate_proposals(scores, rpn_roi_probs.stop_gradient = True return rpn_rois, rpn_roi_probs + + +def multiclass_nms(bboxes, + scores, + score_threshold, + nms_top_k, + nms_threshold, + keep_top_k, + normalized=True, + nms_eta=1., + background_label=0): + """ + """ + helper = LayerHelper('multiclass_nms', **locals()) + + output = helper.create_variable_for_type_inference(dtype=bboxes.dtype) + helper.append_op( + type="multiclass_nms", + inputs={'BBoxes': bboxes, + 'Scores': scores}, + attrs={ + 'background_label': background_label, + 'score_threshold': score_threshold, + 'nms_top_k': nms_top_k, + 'nms_threshold': nms_threshold, + 'nms_eta': nms_eta, + 'keep_top_k': keep_top_k, + 'nms_eta': nms_eta, + 'normalized': normalized + }, + outputs={'Out': output}) + output.stop_gradient = True + + return output diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index d99eaa0634..7736cfc2fb 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -401,5 +401,16 @@ class TestYoloDetection(unittest.TestCase): self.assertIsNotNone(loss) +class TestMulticlassNMS(unittest.TestCase): + def test_multiclass_nms(self): + program = Program() + with program_guard(program): + bboxes = layers.data( + name='bboxes', shape=[-1, 10, 4], dtype='float32') + scores = layers.data(name='scores', shape=[-1, 10], dtype='float32') + output = layers.multiclass_nms(bboxes, scores, 0.3, 400, 0.7, 200) + self.assertIsNotNone(output) + + if __name__ == '__main__': unittest.main() From 7d0c5fafa9938f6eee7278ea8ea1a7aa9ad63021 Mon Sep 17 00:00:00 2001 From: jerrywgz Date: Mon, 21 Jan 2019 06:34:06 +0000 Subject: [PATCH 08/46] add API spec, test=develop --- paddle/fluid/API.spec | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 50ffef72ba..1289c1e373 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -318,6 +318,7 @@ paddle.fluid.layers.iou_similarity ArgSpec(args=['x', 'y', 'name'], varargs=None paddle.fluid.layers.box_coder ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name'], varargs=None, keywords=None, defaults=('encode_center_size', True, None)) paddle.fluid.layers.polygon_box_transform ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.yolov3_loss ArgSpec(args=['x', 'gtbox', 'gtlabel', 'anchors', 'class_num', 'ignore_thresh', 'loss_weight_xy', 'loss_weight_wh', 'loss_weight_conf_target', 'loss_weight_conf_notarget', 'loss_weight_class', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None)) +paddle.fluid.layers.multiclass_nms ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'nms_threshold', 'keep_top_k', 'normalized', 'nms_eta', 'background_label'], varargs=None, keywords=None, defaults=(True, 1.0, 0)) paddle.fluid.layers.accuracy ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None)) paddle.fluid.layers.auc ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1)) paddle.fluid.layers.exponential_decay ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)) From e686818aed8056b131bced5e3f54aa283c9d8234 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Tue, 22 Jan 2019 09:12:56 +0000 Subject: [PATCH 09/46] simple RNN --- paddle/fluid/imperative/tracer.cc | 2 + python/paddle/fluid/imperative/nn.py | 60 +++++++++++----- .../fluid/tests/unittests/test_imperative.py | 70 +++++++++++++++++-- 3 files changed, 107 insertions(+), 25 deletions(-) diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index 2878f5be88..d7a17e1be7 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -28,6 +28,8 @@ void CreateGradOp(const framework::OpDesc& op_desc, .GradOpMaker()(op_desc, no_grad_set, grad_to_var, grad_sub_block); PADDLE_ENFORCE(grad_op_descs.size() == 1, "Only support 1 grad op now."); // TODO(panyx0718): Leak? + // TODO(marsyang1993): Change grad_op_desc pointer to + // vector to allow multi grad_op *grad_op_desc = grad_op_descs[0].release(); } diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index bf735e8f1a..583979b564 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -23,11 +23,7 @@ from ..framework import Variable, OpProtoHolder from ..param_attr import ParamAttr from ..initializer import Normal, Constant -__all__ = [ - 'Conv2D', - 'Pool2D', - 'FC', -] +__all__ = ['Conv2D', 'Pool2D', 'FC', 'SimpleRNNCell'] class Conv2D(layers.Layer): @@ -251,14 +247,9 @@ class FC(layers.Layer): class SimpleRNNCell(layers.Layer): - def __init__(self, - step_input_size, - hidden_size, - output_size, - param_attr, - dtype=core.VarDesc.VarType.FP32): + def __init__(self, step_input_size, hidden_size, output_size, param_attr): super(SimpleRNNCell, self).__init__() - self.input_size = step_input_size + self.step_input_size = step_input_size self.hidden_size = hidden_size self.output_size = output_size self._dype = core.VarDesc.VarType.FP32 @@ -266,7 +257,7 @@ class SimpleRNNCell(layers.Layer): self._helper = LayerHelper( 'SimpleRNNCell', act="tanh", param_attr=param_attr) - def _build_once(self, inputs): + def _build_once(self, inputs, pre_hidden): i2h_param_shape = [self.step_input_size, self.hidden_size] h2h_param_shape = [self.hidden_size, self.hidden_size] h2o_param_shape = [self.output_size, self.hidden_size] @@ -294,6 +285,7 @@ class SimpleRNNCell(layers.Layer): out = self._helper.create_variable_for_type_inference(self._dype) softmax_out = self._helper.create_variable_for_type_inference( self._dtype) + self._helper.append_op( type="mul", inputs={"X": input, @@ -301,7 +293,7 @@ class SimpleRNNCell(layers.Layer): outputs={"Out": tmp_i2h}, attrs={"x_num_col_dims": 1, "y_num_col_dims": 1}) - + print("mul op 1") self._helper.append_op( type="mul", inputs={"X": pre_hidden, @@ -309,15 +301,45 @@ class SimpleRNNCell(layers.Layer): outputs={"Out": tmp_h2h}, attrs={"x_num_col_dims": 1, "y_num_col_dims": 1}) - + print("mul op 2") self._helper.append_op( - type='sum', - inputs={'X': [tmp_i2h, tmp_h2h]}, + type="elementwise_add", + inputs={'X': tmp_h2h, + 'Y': tmp_i2h}, outputs={'Out': hidden}, - attrs={'use_mkldnn': False}) + attrs={'axis': -1, + 'use_mkldnn': False}) + print("elementwise op 1") + self._helper.append_op( + type='print', + inputs={'In': hidden}, + attrs={ + 'first_n': -1, + 'summarize': -1, + 'message': None or "", + 'print_tensor_name': True, + 'print_tensor_type': True, + 'print_tensor_shape': True, + 'print_tensor_lod': True, + 'print_phase': 'BOTH' + }) hidden = self._helper.append_activation(hidden) + self._helper.append_op( + type='print', + inputs={'In': hidden}, + attrs={ + 'first_n': -1, + 'summarize': -1, + 'message': None or "", + 'print_tensor_name': True, + 'print_tensor_type': True, + 'print_tensor_shape': True, + 'print_tensor_lod': True, + 'print_phase': 'BOTH' + }) + self._helper.append_op( type="mul", inputs={"X": hidden, @@ -325,11 +347,13 @@ class SimpleRNNCell(layers.Layer): outputs={"Out": out}, attrs={"x_num_col_dims": 1, "y_num_col_dims": 1}) + print("mul op 3") self._helper.append_op( type="softmax", inputs={"X": out}, outputs={"Out": softmax_out}, attrs={"use_cudnn": False}) + print("softmax op 1") return softmax_out, hidden diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index 3c9893bdda..2e097e12d2 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -19,7 +19,10 @@ import sys import paddle.fluid as fluid from paddle.fluid import core -from paddle.fluid.imperative.nn import FC, SimpleRNNCell +from paddle.fluid.imperative.nn import FC +from paddle.fluid.imperative.nn import SimpleRNNCell +from typing import List, Any, Tuple + from test_imperative_base import new_program_scope @@ -67,14 +70,34 @@ class MLP(fluid.imperative.Layer): class SimpleRNN(fluid.imperative.Layer): - def __init__(self, inputs): + def __init__(self): super(SimpleRNN, self).__init__() - self.seq_len = input.shape[0] - self.cell = SimpleRNNCell(input.shape[1], out) + self.seq_len = 4 + self._cell = SimpleRNNCell( + 3, + 3, + 3, + fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1))) def forward(self, inputs): + out = list() + pre_hiddens = list() + + init_hidden = fluid.layers.tensor.create_parameter( + attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=0.1)), + shape=[1, 3], + dtype='float32', + is_bias=False) + pre_hidden = init_hidden for i in range(self.seq_len): - x = self._fc1(inputs[i]) + input = fluid.layers.slice( + inputs, axes=[1], starts=[i], ends=[i + 1]) + input = fluid.layers.reshape(input, shape=[1, 3]) + pre_hidden, out_softmax = self._cell(input, pre_hidden) + out.append(out_softmax) + + return out, pre_hiddens class TestImperative(unittest.TestCase): @@ -207,8 +230,41 @@ class TestImperative(unittest.TestCase): self.assertTrue(np.allclose(dy_out, static_out)) self.assertTrue(np.allclose(dy_grad, static_grad)) - def test_rnn_ptb(self): - np_inp = np.arrary([]) + def test_rnn(self): + np_inp = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], + [10.0, 11.0, 12.0]]) + np_inp = np_inp.reshape((1, 4, 3)) + np_inp = np_inp.astype(np.float32) + # with fluid.imperative.guard(): + # var_inp = fluid.imperative.base.to_variable(np_inp) + # var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3]) + # simple_rnn = SimpleRNN() + # outs, pre_hiddens = simple_rnn.forward(var_inp) + # dy_out = outs[3]._numpy() + # outs[3]._backward() + # dy_grad = simple_rnn._cell._i2h_w._gradient() + # print("dy_grad is {}".format(dy_grad)) + + with new_program_scope(): + print("im here") + inp = fluid.layers.data( + name="inp", shape=[1, 4, 3], append_batch_size=False) + simple_rnn = SimpleRNN() + outs, pre_hiddens = simple_rnn(inp) + param_grads = fluid.backward.append_backward( + outs[3], + parameter_list=[ + simple_rnn._cell._i2h_w.name, simple_rnn._cell._h2h_w.name, + simple_rnn._cell._h2o_w.name + ]) + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + # print("param_grads is : {} ".format(param_grads)) + static_out, static_grad = exe.run( + feed={inp.name: np_inp}, + fetch_list=[outs[3].name, param_grads[2][1].name]) + # self.assertTrue(np.allclose(dy_out, static_out)) + # self.assertTrue(np.allclose(dy_grad, static_grad)) if __name__ == '__main__': From e5a33062691deffc3c03ba02d5a76c8ba752f051 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Tue, 22 Jan 2019 12:06:34 +0000 Subject: [PATCH 10/46] test=develop, add simple rnn test --- python/paddle/fluid/imperative/nn.py | 64 +++++++++---------- .../fluid/tests/unittests/test_imperative.py | 49 +++++++------- 2 files changed, 55 insertions(+), 58 deletions(-) diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index f48db9faa6..59db26824c 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -315,7 +315,8 @@ class SimpleRNNCell(layers.Layer): out = self._helper.create_variable_for_type_inference(self._dype) softmax_out = self._helper.create_variable_for_type_inference( self._dtype) - + reduce_out = self._helper.create_variable_for_type_inference( + self._dtype) self._helper.append_op( type="mul", inputs={"X": input, @@ -323,7 +324,7 @@ class SimpleRNNCell(layers.Layer): outputs={"Out": tmp_i2h}, attrs={"x_num_col_dims": 1, "y_num_col_dims": 1}) - print("mul op 1") + # print("mul op 1") self._helper.append_op( type="mul", inputs={"X": pre_hidden, @@ -331,7 +332,7 @@ class SimpleRNNCell(layers.Layer): outputs={"Out": tmp_h2h}, attrs={"x_num_col_dims": 1, "y_num_col_dims": 1}) - print("mul op 2") + # print("mul op 2") self._helper.append_op( type="elementwise_add", inputs={'X': tmp_h2h, @@ -339,35 +340,22 @@ class SimpleRNNCell(layers.Layer): outputs={'Out': hidden}, attrs={'axis': -1, 'use_mkldnn': False}) - print("elementwise op 1") - - self._helper.append_op( - type='print', - inputs={'In': hidden}, - attrs={ - 'first_n': -1, - 'summarize': -1, - 'message': None or "", - 'print_tensor_name': True, - 'print_tensor_type': True, - 'print_tensor_shape': True, - 'print_tensor_lod': True, - 'print_phase': 'BOTH' - }) + # print("elementwise op 1") + + # self._helper.append_op( + # type='print', + # inputs={'In': hidden}, + # attrs={ + # 'first_n': -1, + # 'summarize': -1, + # 'message': None or "", + # 'print_tensor_name': True, + # 'print_tensor_type': True, + # 'print_tensor_shape': True, + # 'print_tensor_lod': True, + # 'print_phase': 'BOTH' + # }) hidden = self._helper.append_activation(hidden) - self._helper.append_op( - type='print', - inputs={'In': hidden}, - attrs={ - 'first_n': -1, - 'summarize': -1, - 'message': None or "", - 'print_tensor_name': True, - 'print_tensor_type': True, - 'print_tensor_shape': True, - 'print_tensor_lod': True, - 'print_phase': 'BOTH' - }) self._helper.append_op( type="mul", @@ -376,13 +364,21 @@ class SimpleRNNCell(layers.Layer): outputs={"Out": out}, attrs={"x_num_col_dims": 1, "y_num_col_dims": 1}) - print("mul op 3") + # print("mul op 3") self._helper.append_op( type="softmax", inputs={"X": out}, outputs={"Out": softmax_out}, attrs={"use_cudnn": False}) - print("softmax op 1") + # print("softmax op 1") - return softmax_out, hidden + self._helper.append_op( + type='reduce_sum', + inputs={'X': softmax_out}, + outputs={'Out': reduce_out}, + attrs={'dim': None, + 'keep_dim': False, + 'reduce_all': True}) + # print("reduce_sum op 1") + return reduce_out, hidden diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index 2e097e12d2..6ec3a4620e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -80,7 +80,7 @@ class SimpleRNN(fluid.imperative.Layer): fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1))) def forward(self, inputs): - out = list() + outs = list() pre_hiddens = list() init_hidden = fluid.layers.tensor.create_parameter( @@ -94,10 +94,10 @@ class SimpleRNN(fluid.imperative.Layer): input = fluid.layers.slice( inputs, axes=[1], starts=[i], ends=[i + 1]) input = fluid.layers.reshape(input, shape=[1, 3]) - pre_hidden, out_softmax = self._cell(input, pre_hidden) - out.append(out_softmax) + out_softmax, pre_hidden = self._cell(input, pre_hidden) + outs.append(out_softmax) - return out, pre_hiddens + return outs, pre_hiddens class TestImperative(unittest.TestCase): @@ -235,15 +235,17 @@ class TestImperative(unittest.TestCase): [10.0, 11.0, 12.0]]) np_inp = np_inp.reshape((1, 4, 3)) np_inp = np_inp.astype(np.float32) - # with fluid.imperative.guard(): - # var_inp = fluid.imperative.base.to_variable(np_inp) - # var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3]) - # simple_rnn = SimpleRNN() - # outs, pre_hiddens = simple_rnn.forward(var_inp) - # dy_out = outs[3]._numpy() - # outs[3]._backward() - # dy_grad = simple_rnn._cell._i2h_w._gradient() - # print("dy_grad is {}".format(dy_grad)) + with fluid.imperative.guard(): + var_inp = fluid.imperative.base.to_variable(np_inp) + var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3]) + simple_rnn = SimpleRNN() + outs, pre_hiddens = simple_rnn.forward(var_inp) + dy_out = outs[3]._numpy() + outs[3]._backward() + dy_grad_h2o = simple_rnn._cell._h2o_w._gradient() + dy_grad_h2h = simple_rnn._cell._h2h_w._gradient() + dy_grad_i2h = simple_rnn._cell._i2h_w._gradient() + # print("dy_grad is {}".format(dy_grad)) with new_program_scope(): print("im here") @@ -251,20 +253,19 @@ class TestImperative(unittest.TestCase): name="inp", shape=[1, 4, 3], append_batch_size=False) simple_rnn = SimpleRNN() outs, pre_hiddens = simple_rnn(inp) - param_grads = fluid.backward.append_backward( - outs[3], - parameter_list=[ - simple_rnn._cell._i2h_w.name, simple_rnn._cell._h2h_w.name, - simple_rnn._cell._h2o_w.name - ]) + param_grads = fluid.backward.append_backward(outs[3]) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) - # print("param_grads is : {} ".format(param_grads)) - static_out, static_grad = exe.run( + static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run( feed={inp.name: np_inp}, - fetch_list=[outs[3].name, param_grads[2][1].name]) - # self.assertTrue(np.allclose(dy_out, static_out)) - # self.assertTrue(np.allclose(dy_grad, static_grad)) + fetch_list=[ + outs[3].name, param_grads[0][1].name, + param_grads[1][1].name, param_grads[2][1].name + ]) + self.assertTrue(np.allclose(dy_out, static_out)) + self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o)) + self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h)) + self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h)) if __name__ == '__main__': From 05bbe4e153186cb3f2ae2477157a5f5e2558e143 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Tue, 22 Jan 2019 12:32:57 +0000 Subject: [PATCH 11/46] test=develop, add simple rnn test --- python/paddle/fluid/imperative/nn.py | 23 +++---------------- .../fluid/tests/unittests/test_imperative.py | 9 ++++---- 2 files changed, 7 insertions(+), 25 deletions(-) diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 59db26824c..d7d73df45f 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -324,7 +324,7 @@ class SimpleRNNCell(layers.Layer): outputs={"Out": tmp_i2h}, attrs={"x_num_col_dims": 1, "y_num_col_dims": 1}) - # print("mul op 1") + self._helper.append_op( type="mul", inputs={"X": pre_hidden, @@ -332,7 +332,7 @@ class SimpleRNNCell(layers.Layer): outputs={"Out": tmp_h2h}, attrs={"x_num_col_dims": 1, "y_num_col_dims": 1}) - # print("mul op 2") + self._helper.append_op( type="elementwise_add", inputs={'X': tmp_h2h, @@ -340,21 +340,6 @@ class SimpleRNNCell(layers.Layer): outputs={'Out': hidden}, attrs={'axis': -1, 'use_mkldnn': False}) - # print("elementwise op 1") - - # self._helper.append_op( - # type='print', - # inputs={'In': hidden}, - # attrs={ - # 'first_n': -1, - # 'summarize': -1, - # 'message': None or "", - # 'print_tensor_name': True, - # 'print_tensor_type': True, - # 'print_tensor_shape': True, - # 'print_tensor_lod': True, - # 'print_phase': 'BOTH' - # }) hidden = self._helper.append_activation(hidden) self._helper.append_op( @@ -364,14 +349,12 @@ class SimpleRNNCell(layers.Layer): outputs={"Out": out}, attrs={"x_num_col_dims": 1, "y_num_col_dims": 1}) - # print("mul op 3") self._helper.append_op( type="softmax", inputs={"X": out}, outputs={"Out": softmax_out}, attrs={"use_cudnn": False}) - # print("softmax op 1") self._helper.append_op( type='reduce_sum', @@ -380,5 +363,5 @@ class SimpleRNNCell(layers.Layer): attrs={'dim': None, 'keep_dim': False, 'reduce_all': True}) - # print("reduce_sum op 1") + return reduce_out, hidden diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index 6ec3a4620e..0110a8dd47 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -245,7 +245,6 @@ class TestImperative(unittest.TestCase): dy_grad_h2o = simple_rnn._cell._h2o_w._gradient() dy_grad_h2h = simple_rnn._cell._h2h_w._gradient() dy_grad_i2h = simple_rnn._cell._i2h_w._gradient() - # print("dy_grad is {}".format(dy_grad)) with new_program_scope(): print("im here") @@ -262,10 +261,10 @@ class TestImperative(unittest.TestCase): outs[3].name, param_grads[0][1].name, param_grads[1][1].name, param_grads[2][1].name ]) - self.assertTrue(np.allclose(dy_out, static_out)) - self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o)) - self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h)) - self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h)) + self.assertTrue(np.allclose(dy_out, static_out)) + self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o)) + self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h)) + self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h)) if __name__ == '__main__': From 4a33a44f451a0e8d6b45ae66d499ea94bfa6642c Mon Sep 17 00:00:00 2001 From: fuchang01 Date: Tue, 22 Jan 2019 02:37:42 +0000 Subject: [PATCH 12/46] analyzer bert tester --- .../fluid/inference/tests/api/CMakeLists.txt | 5 + .../tests/api/analyzer_bert_tester.cc | 217 ++++++++++++++++++ 2 files changed, 222 insertions(+) create mode 100644 paddle/fluid/inference/tests/api/analyzer_bert_tester.cc diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 423c39813f..fa2e19bc4c 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -115,6 +115,11 @@ if (NOT EXISTS ${MOBILENET_INSTALL_DIR}) endif() inference_analysis_api_test_with_refer_result(test_analyzer_mobilenet_transpose ${MOBILENET_INSTALL_DIR} analyzer_vis_tester.cc SERIAL) +# bert +set(BERT_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/bert") +download_model_and_data(${BERT_INSTALL_DIR} "bert_model.tar.gz" "bert_data.txt.tar.gz") +inference_analysis_api_test(test_analyzer_bert ${BERT_INSTALL_DIR} analyzer_bert_tester.cc) + # resnet50 inference_analysis_api_test_with_fake_data(test_analyzer_resnet50 "${INFERENCE_DEMO_INSTALL_DIR}/resnet50" analyzer_resnet50_tester.cc "resnet50_model.tar.gz" SERIAL) diff --git a/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc new file mode 100644 index 0000000000..709d51388d --- /dev/null +++ b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc @@ -0,0 +1,217 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include +#include +#include +#include "paddle/fluid/inference/api/paddle_inference_api.h" + +DEFINE_int32(repeat, 1, "repeat"); + +namespace paddle { +namespace inference { + +using paddle::PaddleTensor; +using paddle::contrib::AnalysisConfig; + +template +void GetValueFromStream(std::stringstream *ss, T *t) { + (*ss) >> (*t); +} + +template <> +void GetValueFromStream(std::stringstream *ss, std::string *t) { + *t = ss->str(); +} + +// Split string to vector +template +void Split(const std::string &line, char sep, std::vector *v) { + std::stringstream ss; + T t; + for (auto c : line) { + if (c != sep) { + ss << c; + } else { + GetValueFromStream(&ss, &t); + v->push_back(std::move(t)); + ss.str({}); + ss.clear(); + } + } + + if (!ss.str().empty()) { + GetValueFromStream(&ss, &t); + v->push_back(std::move(t)); + ss.str({}); + ss.clear(); + } +} + +template +constexpr paddle::PaddleDType GetPaddleDType(); + +template <> +constexpr paddle::PaddleDType GetPaddleDType() { + return paddle::PaddleDType::INT64; +} + +template <> +constexpr paddle::PaddleDType GetPaddleDType() { + return paddle::PaddleDType::FLOAT32; +} + +// Parse tensor from string +template +bool ParseTensor(const std::string &field, paddle::PaddleTensor *tensor) { + std::vector data; + Split(field, ':', &data); + if (data.size() < 2) return false; + + std::string shape_str = data[0]; + + std::vector shape; + Split(shape_str, ' ', &shape); + + std::string mat_str = data[1]; + + std::vector mat; + Split(mat_str, ' ', &mat); + + tensor->shape = shape; + auto size = + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()) * + sizeof(T); + tensor->data.Resize(size); + std::copy(mat.begin(), mat.end(), static_cast(tensor->data.data())); + tensor->dtype = GetPaddleDType(); + + return true; +} + +// Parse input tensors from string +bool ParseLine(const std::string &line, + std::vector *tensors) { + std::vector fields; + Split(line, ';', &fields); + + if (fields.size() < 5) return false; + + tensors->clear(); + tensors->reserve(5); + + int i = 0; + // src_id + paddle::PaddleTensor src_id; + ParseTensor(fields[i++], &src_id); + tensors->push_back(src_id); + + // pos_id + paddle::PaddleTensor pos_id; + ParseTensor(fields[i++], &pos_id); + tensors->push_back(pos_id); + + // segment_id + paddle::PaddleTensor segment_id; + ParseTensor(fields[i++], &segment_id); + tensors->push_back(segment_id); + + // self_attention_bias + paddle::PaddleTensor self_attention_bias; + ParseTensor(fields[i++], &self_attention_bias); + tensors->push_back(self_attention_bias); + + // next_segment_index + paddle::PaddleTensor next_segment_index; + ParseTensor(fields[i++], &next_segment_index); + tensors->push_back(next_segment_index); + + return true; +} + +// Print outputs to log +void PrintOutputs(const std::vector &outputs) { + LOG(INFO) << "example_id\tcontradiction\tentailment\tneutral"; + + for (size_t i = 0; i < outputs.front().data.length(); i += 3) { + LOG(INFO) << (i / 3) << "\t" + << static_cast(outputs.front().data.data())[i] << "\t" + << static_cast(outputs.front().data.data())[i + 1] + << "\t" + << static_cast(outputs.front().data.data())[i + 2]; + } +} + +bool LoadInputData(std::vector> *inputs) { + if (FLAGS_infer_data.empty()) { + LOG(ERROR) << "please set input data path"; + return false; + } + + std::ifstream fin(FLAGS_infer_data); + std::string line; + + int lineno = 0; + while (std::getline(fin, line)) { + std::vector feed_data; + if (!ParseLine(line, &feed_data)) { + LOG(ERROR) << "Parse line[" << lineno << "] error!"; + } else { + inputs->push_back(std::move(feed_data)); + } + } + + return true; +} + +void SetConfig(contrib::AnalysisConfig *config) { + config->SetModel(FLAGS_infer_model); +} + +void profile(bool use_mkldnn = false) { + contrib::AnalysisConfig config; + SetConfig(&config); + + if (use_mkldnn) { + config.EnableMKLDNN(); + } + + std::vector outputs; + std::vector> inputs; + LoadInputData(&inputs); + TestPrediction(reinterpret_cast(&config), + inputs, &outputs, FLAGS_num_threads); +} + +void compare(bool use_mkldnn = false) { + AnalysisConfig config; + SetConfig(&config); + + std::vector> inputs; + LoadInputData(&inputs); + CompareNativeAndAnalysis( + reinterpret_cast(&config), inputs); +} + +TEST(Analyzer_bert, profile) { profile(); } +#ifdef PADDLE_WITH_MKLDNN +TEST(Analyzer_bert, profile_mkldnn) { profile(true); } +#endif +} // namespace inference +} // namespace paddle From 33590b583f46a889a5071b8185b1b987559e5021 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Wed, 23 Jan 2019 03:19:34 +0000 Subject: [PATCH 13/46] test=develop, move simple rnn cell to test_imperative --- python/paddle/fluid/imperative/nn.py | 93 +----------------- .../fluid/tests/unittests/test_imperative.py | 94 ++++++++++++++++++- .../unittests/test_imperative_ptb_rnn.py | 21 +++++ 3 files changed, 113 insertions(+), 95 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index d7d73df45f..1bfeace521 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -23,7 +23,7 @@ from ..framework import Variable, OpProtoHolder from ..param_attr import ParamAttr from ..initializer import Normal, Constant -__all__ = ['Conv2D', 'Pool2D', 'FC', 'SimpleRNNCell'] +__all__ = ['Conv2D', 'Pool2D', 'FC'] class Conv2D(layers.Layer): @@ -274,94 +274,3 @@ class FC(layers.Layer): out = bias_out # add activation return self._helper.append_activation(out) - - -class SimpleRNNCell(layers.Layer): - def __init__(self, step_input_size, hidden_size, output_size, param_attr): - super(SimpleRNNCell, self).__init__() - self.step_input_size = step_input_size - self.hidden_size = hidden_size - self.output_size = output_size - self._dype = core.VarDesc.VarType.FP32 - from ..layer_helper import LayerHelper - self._helper = LayerHelper( - 'SimpleRNNCell', act="tanh", param_attr=param_attr) - - def _build_once(self, inputs, pre_hidden): - i2h_param_shape = [self.step_input_size, self.hidden_size] - h2h_param_shape = [self.hidden_size, self.hidden_size] - h2o_param_shape = [self.output_size, self.hidden_size] - self._i2h_w = self._helper.create_parameter( - attr=self._helper.param_attr, - shape=i2h_param_shape, - dtype=self._dtype, - is_bias=False) - self._h2h_w = self._helper.create_parameter( - attr=self._helper.param_attr, - shape=h2h_param_shape, - dtype=self._dtype, - is_bias=False) - self._h2o_w = self._helper.create_parameter( - attr=self._helper.param_attr, - shape=h2o_param_shape, - dtype=self._dtype, - is_bias=False) - - def forward(self, input, pre_hidden): - - tmp_i2h = self._helper.create_variable_for_type_inference(self._dtype) - tmp_h2h = self._helper.create_variable_for_type_inference(self._dtype) - hidden = self._helper.create_variable_for_type_inference(self._dype) - out = self._helper.create_variable_for_type_inference(self._dype) - softmax_out = self._helper.create_variable_for_type_inference( - self._dtype) - reduce_out = self._helper.create_variable_for_type_inference( - self._dtype) - self._helper.append_op( - type="mul", - inputs={"X": input, - "Y": self._i2h_w}, - outputs={"Out": tmp_i2h}, - attrs={"x_num_col_dims": 1, - "y_num_col_dims": 1}) - - self._helper.append_op( - type="mul", - inputs={"X": pre_hidden, - "Y": self._h2h_w}, - outputs={"Out": tmp_h2h}, - attrs={"x_num_col_dims": 1, - "y_num_col_dims": 1}) - - self._helper.append_op( - type="elementwise_add", - inputs={'X': tmp_h2h, - 'Y': tmp_i2h}, - outputs={'Out': hidden}, - attrs={'axis': -1, - 'use_mkldnn': False}) - hidden = self._helper.append_activation(hidden) - - self._helper.append_op( - type="mul", - inputs={"X": hidden, - "Y": self._h2o_w}, - outputs={"Out": out}, - attrs={"x_num_col_dims": 1, - "y_num_col_dims": 1}) - - self._helper.append_op( - type="softmax", - inputs={"X": out}, - outputs={"Out": softmax_out}, - attrs={"use_cudnn": False}) - - self._helper.append_op( - type='reduce_sum', - inputs={'X': softmax_out}, - outputs={'Out': reduce_out}, - attrs={'dim': None, - 'keep_dim': False, - 'reduce_all': True}) - - return reduce_out, hidden diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index 0110a8dd47..07693caddb 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -20,9 +20,6 @@ import sys import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid.imperative.nn import FC -from paddle.fluid.imperative.nn import SimpleRNNCell -from typing import List, Any, Tuple - from test_imperative_base import new_program_scope @@ -69,6 +66,97 @@ class MLP(fluid.imperative.Layer): return x +class SimpleRNNCell(fluid.imperative.Layer): + def __init__(self, step_input_size, hidden_size, output_size, param_attr): + super(SimpleRNNCell, self).__init__() + self.step_input_size = step_input_size + self.hidden_size = hidden_size + self.output_size = output_size + self._dype = core.VarDesc.VarType.FP32 + from paddle.fluid.layer_helper import LayerHelper + self._helper = LayerHelper( + 'SimpleRNNCell', act="tanh", param_attr=param_attr) + + def _build_once(self, inputs, pre_hidden): + i2h_param_shape = [self.step_input_size, self.hidden_size] + h2h_param_shape = [self.hidden_size, self.hidden_size] + h2o_param_shape = [self.output_size, self.hidden_size] + self._i2h_w = self._helper.create_parameter( + attr=self._helper.param_attr, + shape=i2h_param_shape, + dtype=self._dtype, + is_bias=False) + self._h2h_w = self._helper.create_parameter( + attr=self._helper.param_attr, + shape=h2h_param_shape, + dtype=self._dtype, + is_bias=False) + self._h2o_w = self._helper.create_parameter( + attr=self._helper.param_attr, + shape=h2o_param_shape, + dtype=self._dtype, + is_bias=False) + + def forward(self, input, pre_hidden): + + tmp_i2h = self._helper.create_variable_for_type_inference(self._dtype) + tmp_h2h = self._helper.create_variable_for_type_inference(self._dtype) + hidden = self._helper.create_variable_for_type_inference(self._dype) + out = self._helper.create_variable_for_type_inference(self._dype) + softmax_out = self._helper.create_variable_for_type_inference( + self._dtype) + reduce_out = self._helper.create_variable_for_type_inference( + self._dtype) + self._helper.append_op( + type="mul", + inputs={"X": input, + "Y": self._i2h_w}, + outputs={"Out": tmp_i2h}, + attrs={"x_num_col_dims": 1, + "y_num_col_dims": 1}) + + self._helper.append_op( + type="mul", + inputs={"X": pre_hidden, + "Y": self._h2h_w}, + outputs={"Out": tmp_h2h}, + attrs={"x_num_col_dims": 1, + "y_num_col_dims": 1}) + + self._helper.append_op( + type="elementwise_add", + inputs={'X': tmp_h2h, + 'Y': tmp_i2h}, + outputs={'Out': hidden}, + attrs={'axis': -1, + 'use_mkldnn': False}) + hidden = self._helper.append_activation(hidden) + + self._helper.append_op( + type="mul", + inputs={"X": hidden, + "Y": self._h2o_w}, + outputs={"Out": out}, + attrs={"x_num_col_dims": 1, + "y_num_col_dims": 1}) + + self._helper.append_op( + type="softmax", + inputs={"X": out}, + outputs={"Out": softmax_out}, + attrs={"use_cudnn": False}) + + self._helper.append_op( + type='reduce_sum', + inputs={'X': softmax_out}, + outputs={'Out': reduce_out}, + attrs={'dim': None, + 'keep_dim': False, + 'reduce_all': True}) + + return reduce_out, hidden + + class SimpleRNN(fluid.imperative.Layer): def __init__(self): super(SimpleRNN, self).__init__() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py new file mode 100644 index 0000000000..19df224770 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -0,0 +1,21 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest + +import paddle.fluid.framework as framework +import paddle.fluid.optimizer as optimizer +from paddle.fluid.backward import append_backward From cc534530576edba67064f821b6197edd01b8e23b Mon Sep 17 00:00:00 2001 From: jerrywgz Date: Wed, 23 Jan 2019 05:20:20 +0000 Subject: [PATCH 14/46] add comment and refine code, test=develop --- paddle/fluid/API.spec | 2 +- paddle/fluid/operators/detection/bbox_util.h | 20 -- .../operators/detection/multiclass_nms_op.cc | 187 +++++++++--------- python/paddle/fluid/layers/detection.py | 82 +++++++- 4 files changed, 170 insertions(+), 121 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 1289c1e373..acf4e1ff10 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -318,7 +318,7 @@ paddle.fluid.layers.iou_similarity ArgSpec(args=['x', 'y', 'name'], varargs=None paddle.fluid.layers.box_coder ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name'], varargs=None, keywords=None, defaults=('encode_center_size', True, None)) paddle.fluid.layers.polygon_box_transform ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.yolov3_loss ArgSpec(args=['x', 'gtbox', 'gtlabel', 'anchors', 'class_num', 'ignore_thresh', 'loss_weight_xy', 'loss_weight_wh', 'loss_weight_conf_target', 'loss_weight_conf_notarget', 'loss_weight_class', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None)) -paddle.fluid.layers.multiclass_nms ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'nms_threshold', 'keep_top_k', 'normalized', 'nms_eta', 'background_label'], varargs=None, keywords=None, defaults=(True, 1.0, 0)) +paddle.fluid.layers.multiclass_nms ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'nms_threshold', 'keep_top_k', 'normalized', 'nms_eta', 'background_label', 'name'], varargs=None, keywords=None, defaults=(True, 1.0, 0, None)) paddle.fluid.layers.accuracy ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None)) paddle.fluid.layers.auc ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1)) paddle.fluid.layers.exponential_decay ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)) diff --git a/paddle/fluid/operators/detection/bbox_util.h b/paddle/fluid/operators/detection/bbox_util.h index 0270ca77f3..6abeca1da4 100644 --- a/paddle/fluid/operators/detection/bbox_util.h +++ b/paddle/fluid/operators/detection/bbox_util.h @@ -93,25 +93,5 @@ void BboxOverlaps(const framework::Tensor& r_boxes, } } -template -void SliceOneClass(const platform::DeviceContext& ctx, - const framework::Tensor& items, const int class_id, - framework::Tensor* one_class_item) { - T* item_data = one_class_item->mutable_data(ctx.GetPlace()); - const T* items_data = items.data(); - const int64_t num_item = items.dims()[0]; - const int class_num = items.dims()[1]; - int item_size = 1; - if (items.dims().size() == 3) { - item_size = items.dims()[2]; - } - for (int i = 0; i < num_item; ++i) { - for (int j = 0; j < item_size; ++j) { - item_data[i * item_size + j] = - items_data[i * class_num * item_size + class_id * item_size + j]; - } - } -} - } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/detection/multiclass_nms_op.cc b/paddle/fluid/operators/detection/multiclass_nms_op.cc index c61e3e1338..43d6382280 100644 --- a/paddle/fluid/operators/detection/multiclass_nms_op.cc +++ b/paddle/fluid/operators/detection/multiclass_nms_op.cc @@ -1,8 +1,11 @@ /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -10,7 +13,6 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/detection/bbox_util.h" #include "paddle/fluid/operators/detection/poly_util.h" namespace paddle { @@ -136,12 +138,9 @@ static inline T JaccardOverlap(const T* box1, const T* box2, const T inter_ymin = std::max(box1[1], box2[1]); const T inter_xmax = std::min(box1[2], box2[2]); const T inter_ymax = std::min(box1[3], box2[3]); - T inter_w = inter_xmax - inter_xmin; - T inter_h = inter_ymax - inter_ymin; - if (!normalized) { - inter_w += 1; - inter_h += 1; - } + T norm = normalized ? static_cast(0.) : static_cast(1.); + T inter_w = inter_xmax - inter_xmin + norm; + T inter_h = inter_ymax - inter_ymin + norm; const T inter_area = inter_w * inter_h; const T bbox1_area = BBoxArea(box1, normalized); const T bbox2_area = BBoxArea(box2, normalized); @@ -164,6 +163,25 @@ T PolyIoU(const T* box1, const T* box2, const size_t box_size, } } +template +void SliceOneClass(const platform::DeviceContext& ctx, + const framework::Tensor& items, const int class_id, + framework::Tensor* one_class_item) { + T* item_data = one_class_item->mutable_data(ctx.GetPlace()); + const T* items_data = items.data(); + const int64_t num_item = items.dims()[0]; + const int class_num = items.dims()[1]; + int item_size = 1; + if (items.dims().size() == 3) { + item_size = items.dims()[2]; + } + for (int i = 0; i < num_item; ++i) { + std::memcpy(item_data + i * item_size, + items_data + i * class_num * item_size + class_id * item_size, + sizeof(T) * item_size); + } +} + template class MultiClassNMSKernel : public framework::OpKernel { public: @@ -237,33 +255,26 @@ class MultiClassNMSKernel : public framework::OpKernel { auto& dev_ctx = ctx.template device_context(); int num_det = 0; - int64_t box_num = 0, class_num = 0, predict_dim = 0; - if (scores_size == 3) { - class_num = scores.dims()[0]; - predict_dim = scores.dims()[1]; - for (int64_t c = 0; c < class_num; ++c) { - if (c == background_label) continue; - Tensor score = scores.Slice(c, c + 1); - NMSFast(bboxes, score, score_threshold, nms_threshold, nms_eta, - nms_top_k, &((*indices)[c]), normalized); - num_det += (*indices)[c].size(); + + int64_t class_num = scores_size == 3 ? scores.dims()[0] : scores.dims()[1]; + Tensor bbox_slice, score_slice; + for (int64_t c = 0; c < class_num; ++c) { + if (c == background_label) continue; + if (scores_size == 3) { + score_slice = scores.Slice(c, c + 1); + bbox_slice = bboxes; + } else { + score_slice.Resize({scores.dims()[0], 1}); + bbox_slice.Resize({scores.dims()[0], 4}); + SliceOneClass(dev_ctx, scores, c, &score_slice); + SliceOneClass(dev_ctx, bboxes, c, &bbox_slice); } - } else { - box_num = scores.dims()[0]; - class_num = scores.dims()[1]; - Tensor score; - score.Resize({box_num, 1}); - Tensor bbox; - bbox.Resize({box_num, 4}); - for (int64_t c = 0; c < class_num; ++c) { - if (c == background_label) continue; - SliceOneClass(dev_ctx, scores, c, &score); - SliceOneClass(dev_ctx, bboxes, c, &bbox); - NMSFast(bbox, score, score_threshold, nms_threshold, nms_eta, nms_top_k, - &((*indices)[c]), normalized); + NMSFast(bbox_slice, score_slice, score_threshold, nms_threshold, nms_eta, + nms_top_k, &((*indices)[c]), normalized); + if (scores_size == 2) { std::stable_sort((*indices)[c].begin(), (*indices)[c].end()); - num_det += (*indices)[c].size(); } + num_det += (*indices)[c].size(); } *num_nmsed_out = num_det; @@ -274,12 +285,11 @@ class MultiClassNMSKernel : public framework::OpKernel { for (const auto& it : *indices) { int label = it.first; if (scores_size == 3) { - sdata = scores_data + label * predict_dim; + sdata = scores_data + label * scores.dims()[1]; } else { - Tensor score; - score.Resize({box_num, 1}); - SliceOneClass(dev_ctx, scores, label, &score); - sdata = score.data(); + score_slice.Resize({scores.dims()[0], 1}); + SliceOneClass(dev_ctx, scores, label, &score_slice); + sdata = score_slice.data(); } const std::vector& label_indices = it.second; for (size_t j = 0; j < label_indices.size(); ++j) { @@ -362,43 +372,33 @@ class MultiClassNMSKernel : public framework::OpKernel { auto* outs = ctx.Output("Out"); auto score_dims = scores->dims(); - int64_t class_num = score_dims[1]; + auto score_size = score_dims.size(); auto& dev_ctx = ctx.template device_context(); std::vector>> all_indices; std::vector batch_starts = {0}; int64_t batch_size = score_dims[0]; - int64_t predict_dim = 0; int64_t box_dim = boxes->dims()[2]; int64_t out_dim = box_dim + 2; int num_nmsed_out = 0; - if (score_dims.size() == 3) { - predict_dim = score_dims[2]; - for (int64_t i = 0; i < batch_size; ++i) { - Tensor ins_score = scores->Slice(i, i + 1); - ins_score.Resize({class_num, predict_dim}); - - Tensor ins_boxes = boxes->Slice(i, i + 1); - ins_boxes.Resize({predict_dim, box_dim}); - - std::map> indices; - MultiClassNMS(ctx, ins_score, ins_boxes, score_dims.size(), &indices, - &num_nmsed_out); - all_indices.push_back(indices); - batch_starts.push_back(batch_starts.back() + num_nmsed_out); - } - } else { - auto boxes_lod = boxes->lod().back(); - int64_t n = static_cast(boxes_lod.size() - 1); - for (int i = 0; i < n; ++i) { - Tensor boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]); - Tensor scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]); - std::map> indices; - MultiClassNMS(ctx, scores_slice, boxes_slice, score_dims.size(), - &indices, &num_nmsed_out); - all_indices.push_back(indices); - batch_starts.push_back(batch_starts.back() + num_nmsed_out); + Tensor boxes_slice, scores_slice; + int n = score_size == 3 ? batch_size : boxes->lod().back().size() - 1; + for (int i = 0; i < n; ++i) { + if (score_size == 3) { + scores_slice = scores->Slice(i, i + 1); + scores_slice.Resize({score_dims[1], score_dims[2]}); + boxes_slice = boxes->Slice(i, i + 1); + boxes_slice.Resize({score_dims[2], box_dim}); + } else { + auto boxes_lod = boxes->lod().back(); + scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]); + boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]); } + std::map> indices; + MultiClassNMS(ctx, scores_slice, boxes_slice, score_size, &indices, + &num_nmsed_out); + all_indices.push_back(indices); + batch_starts.push_back(batch_starts.back() + num_nmsed_out); } int num_kept = batch_starts.back(); @@ -408,35 +408,23 @@ class MultiClassNMSKernel : public framework::OpKernel { batch_starts = {0, 1}; } else { outs->mutable_data({num_kept, out_dim}, ctx.GetPlace()); - if (score_dims.size() == 3) { - for (int64_t i = 0; i < batch_size; ++i) { - Tensor ins_score = scores->Slice(i, i + 1); - ins_score.Resize({class_num, predict_dim}); - - Tensor ins_boxes = boxes->Slice(i, i + 1); - ins_boxes.Resize({predict_dim, box_dim}); - - int64_t s = batch_starts[i]; - int64_t e = batch_starts[i + 1]; - if (e > s) { - Tensor out = outs->Slice(s, e); - MultiClassOutput(dev_ctx, ins_score, ins_boxes, all_indices[i], - score_dims.size(), &out); - } + for (int i = 0; i < n; ++i) { + if (score_size == 3) { + scores_slice = scores->Slice(i, i + 1); + boxes_slice = boxes->Slice(i, i + 1); + scores_slice.Resize({score_dims[1], score_dims[2]}); + boxes_slice.Resize({score_dims[2], box_dim}); + } else { + auto boxes_lod = boxes->lod().back(); + scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]); + boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]); } - } else { - auto boxes_lod = boxes->lod().back(); - int64_t n = static_cast(boxes_lod.size() - 1); - for (int i = 0; i < n; ++i) { - Tensor boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]); - Tensor scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]); - int64_t s = batch_starts[i]; - int64_t e = batch_starts[i + 1]; - if (e > s) { - Tensor out = outs->Slice(s, e); - MultiClassOutput(dev_ctx, scores_slice, boxes_slice, all_indices[i], - score_dims.size(), &out); - } + int64_t s = batch_starts[i]; + int64_t e = batch_starts[i + 1]; + if (e > s) { + Tensor out = outs->Slice(s, e); + MultiClassOutput(dev_ctx, scores_slice, boxes_slice, all_indices[i], + score_dims.size(), &out); } } } @@ -458,17 +446,18 @@ class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { "predicted locations of M bounding bboxes, N is the batch size. " "Each bounding box has four coordinate values and the layout is " "[xmin, ymin, xmax, ymax], when box size equals to 4." - "2. (LoDTensor) A 3-D Tensor with shape [N, M, 4]" - "N is the number of boxes, M is the class number"); + "2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]" + "M is the number of bounding boxes, C is the class number"); AddInput("Scores", "Two types of scores are supported:" "1. (Tensor) A 3-D Tensor with shape [N, C, M] represents the " "predicted confidence predictions. N is the batch size, C is the " "class number, M is number of bounding boxes. For each category " "there are total M scores which corresponding M bounding boxes. " - " Please note, M is equal to the 1st dimension of BBoxes. " - "2. (LoDTensor) A 2-D LoDTensor with shape" - "[N, num_class]. N is the number of bbox"); + " Please note, M is equal to the 2nd dimension of BBoxes. " + "2. (LoDTensor) A 2-D LoDTensor with shape [M, C]. " + "M is the number of bbox, C is the class number. In this case, " + "Input BBoxes should be the second case with shape [M, C, 4]."); AddAttr( "background_label", "(int, defalut: 0) " @@ -528,8 +517,8 @@ independently for each class. The outputs is a 2-D LoDTenosr, for each image, the offsets in first dimension of LoDTensor are called LoD, the number of offset is N + 1, where N is the batch size. If LoD[i + 1] - LoD[i] == 0, means there is no detected bbox for this image. If there is no detected boxes -for all images, all the elements in LoD are 0, and the Out only contains one -value which is -1. +for all images, all the elements in LoD are set to {0,1}, and the Out only +contains one value which is -1. )DOC"); } }; diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index e8ce0c1d90..3d0896850e 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -1821,8 +1821,88 @@ def multiclass_nms(bboxes, keep_top_k, normalized=True, nms_eta=1., - background_label=0): + background_label=0, + name=None): """ + **Multiclass NMS** + + This operator is to do multi-class non maximum suppression (NMS) on + boxes and scores. + + In the NMS step, this operator greedily selects a subset of detection bounding + boxes that have high scores larger than score_threshold, if providing this + threshold, then selects the largest nms_top_k confidences scores if nms_top_k + is larger than -1. Then this operator pruns away boxes that have high IOU + (intersection over union) overlap with already selected boxes by adaptive + threshold NMS based on parameters of nms_threshold and nms_eta. + + Aftern NMS step, at most keep_top_k number of total bboxes are to be kept + per image if keep_top_k is larger than -1. + + Args: + bboxes (Variable): Two types of bboxes are supported: + 1. (Tensor) A 3-D Tensor with shape + [N, M, 4 or 8 16 24 32] represents the + predicted locations of M bounding bboxes, + N is the batch size. Each bounding box has four + coordinate values and the layout is + [xmin, ymin, xmax, ymax], when box size equals to 4. + 2. (LoDTensor) A 3-D Tensor with shape [M, C, 4] + M is the number of bounding boxes, C is the + class number + scores (Variable): Two types of scores are supported: + 1. (Tensor) A 3-D Tensor with shape [N, C, M] + represents the predicted confidence predictions. + N is the batch size, C is the class number, M is + number of bounding boxes. For each category there + are total M scores which corresponding M bounding + boxes. Please note, M is equal to the 2nd dimension + of BBoxes. + 2. (LoDTensor) A 2-D LoDTensor with shape [M, C]. + M is the number of bbox, C is the class number. + In this case, input BBoxes should be the second + case with shape [M, C, 4]. + background_label (int): The index of background label, the background + label will be ignored. If set to -1, then all + categories will be considered. Default: 0 + score_threshold (float): Threshold to filter out bounding boxes with + low confidence score. If not provided, + consider all boxes. + nms_top_k (int): Maximum number of detections to be kept according to + the confidences aftern the filtering detections based + on score_threshold. + nms_threshold (float): The threshold to be used in NMS. Default: 0.3 + nms_eta (float): The threshold to be used in NMS. Default: 1.0 + keep_top_k (int): Number of total bboxes to be kept per image after NMS + step. -1 means keeping all bboxes after NMS step. + normalized (bool): Whether detections are normalized. Default: True + name(str): Name of the multiclass nms op. Default: None. + + Returns: + Out: A 2-D LoDTensor with shape [No, 6] represents the detections. + Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax] + or A 2-D LoDTensor with shape [No, 10] represents the detections. + Each row has 10 values: + [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the + total number of detections. If there is no detected boxes for all + images, lod will be set to {0, 1} and Out only contains one value + which is -1. + + Examples: + .. code-block:: python + + boxes = fluid.layers.data(name='bboxes', shape=[81, 4], + dtype='float32', lod_level=1) + scores = fluid.layers.data(name='scores', shape=[81], + dtype='float32', lod_level=1) + out = fluid.layers.multiclass_nms(bboxes=boxes, + scores=scores, + background_label=0, + score_threshold=0.5, + nms_top_k=400, + nms_threshold=0.3, + keep_top_k=200, + normalized=False) """ helper = LayerHelper('multiclass_nms', **locals()) From 353b5f06a768aad47564b2d37c1aac408fe35ce3 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Wed, 23 Jan 2019 16:22:17 +0800 Subject: [PATCH 15/46] refine analyzer_bert_test to pass the ci test=develop --- .../tests/api/analyzer_bert_tester.cc | 69 +++++++++++++------ 1 file changed, 47 insertions(+), 22 deletions(-) diff --git a/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc index 709d51388d..aced71b774 100644 --- a/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc @@ -12,17 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include -#include -#include -#include -#include -#include -#include -#include -#include "paddle/fluid/inference/api/paddle_inference_api.h" - -DEFINE_int32(repeat, 1, "repeat"); +#include "paddle/fluid/inference/tests/api/tester_helper.h" namespace paddle { namespace inference { @@ -166,16 +156,17 @@ bool LoadInputData(std::vector> *inputs) { std::ifstream fin(FLAGS_infer_data); std::string line; + int sample = 0; - int lineno = 0; + // The unit-test dataset only have 10 samples, each sample have 5 feeds. while (std::getline(fin, line)) { std::vector feed_data; - if (!ParseLine(line, &feed_data)) { - LOG(ERROR) << "Parse line[" << lineno << "] error!"; - } else { - inputs->push_back(std::move(feed_data)); - } + ParseLine(line, &feed_data); + inputs->push_back(std::move(feed_data)); + sample++; + if (!FLAGS_test_all_data && sample == FLAGS_batch_size) break; } + LOG(INFO) << "number of samples: " << sample; return true; } @@ -199,19 +190,53 @@ void profile(bool use_mkldnn = false) { inputs, &outputs, FLAGS_num_threads); } +TEST(Analyzer_bert, profile) { profile(); } +#ifdef PADDLE_WITH_MKLDNN +TEST(Analyzer_bert, profile_mkldnn) { profile(true); } +#endif + +// Check the fuse status +TEST(Analyzer_bert, fuse_statis) { + AnalysisConfig cfg; + SetConfig(&cfg); + int num_ops; + auto predictor = CreatePaddlePredictor(cfg); + auto fuse_statis = GetFuseStatis( + static_cast(predictor.get()), &num_ops); + LOG(INFO) << "num_ops: " << num_ops; +} + +// Compare result of NativeConfig and AnalysisConfig void compare(bool use_mkldnn = false) { - AnalysisConfig config; - SetConfig(&config); + AnalysisConfig cfg; + SetConfig(&cfg); + if (use_mkldnn) { + cfg.EnableMKLDNN(); + } std::vector> inputs; LoadInputData(&inputs); CompareNativeAndAnalysis( - reinterpret_cast(&config), inputs); + reinterpret_cast(&cfg), inputs); } -TEST(Analyzer_bert, profile) { profile(); } +TEST(Analyzer_bert, compare) { compare(); } #ifdef PADDLE_WITH_MKLDNN -TEST(Analyzer_bert, profile_mkldnn) { profile(true); } +TEST(Analyzer_bert, compare_mkldnn) { compare(true /* use_mkldnn */); } #endif + +// Compare Deterministic result +// TODO(luotao): Since each unit-test on CI only have 10 minutes, cancel this to +// decrease the CI time. +// TEST(Analyzer_bert, compare_determine) { +// AnalysisConfig cfg; +// SetConfig(&cfg); +// +// std::vector> inputs; +// LoadInputData(&inputs); +// CompareDeterministic(reinterpret_cast(&cfg), +// inputs); +// } } // namespace inference } // namespace paddle From ac80273686629fe3fb576d7cf8dd981f0a146a1b Mon Sep 17 00:00:00 2001 From: minqiyang Date: Wed, 23 Jan 2019 19:13:15 +0800 Subject: [PATCH 16/46] Change definitions to PADDLE_WITH_JEMALLOC --- CMakeLists.txt | 2 +- paddle/fluid/memory/allocation/legacy_allocator.cc | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d6aa8f1b85..b3111eed8b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -265,7 +265,7 @@ endif() if (WITH_JEMALLOC) find_package(JeMalloc REQUIRED) include_directories(${JEMALLOC_INCLUDE_DIR}) - add_definitions(-DWITH_JEMALLOC) + add_definitions(-DPADDLE_WITH_JEMALLOC) endif() include(generic) # simplify cmake module diff --git a/paddle/fluid/memory/allocation/legacy_allocator.cc b/paddle/fluid/memory/allocation/legacy_allocator.cc index cf6d351a41..04a68d6c23 100644 --- a/paddle/fluid/memory/allocation/legacy_allocator.cc +++ b/paddle/fluid/memory/allocation/legacy_allocator.cc @@ -17,7 +17,7 @@ #include #include -#ifdef WITH_JEMALLOC +#ifdef PADDLE_WITH_JEMALLOC #include #endif @@ -95,7 +95,7 @@ struct NaiveAllocator { template <> void *Alloc(const platform::CPUPlace &place, size_t size) { VLOG(10) << "Allocate " << size << " bytes on " << platform::Place(place); -#ifdef WITH_JEMALLOC +#ifdef PADDLE_WITH_JEMALLOC void *p = malloc(size); #else void *p = GetCPUBuddyAllocator()->Alloc(size); @@ -110,7 +110,7 @@ void *Alloc(const platform::CPUPlace &place, size_t size) { template <> void Free(const platform::CPUPlace &place, void *p) { VLOG(10) << "Free pointer=" << p << " on " << platform::Place(place); -#ifdef WITH_JEMALLOC +#ifdef PADDLE_WITH_JEMALLOC free(p); #else GetCPUBuddyAllocator()->Free(p); @@ -119,8 +119,8 @@ void Free(const platform::CPUPlace &place, void *p) { template <> size_t Used(const platform::CPUPlace &place) { -#ifdef WITH_JEMALLOC - // fake the result of used memory when WITH_JEMALLOC is ON +#ifdef PADDLE_WITH_JEMALLOC + // fake the result of used memory when PADDLE_WITH_JEMALLOC is ON return 0U; #else return GetCPUBuddyAllocator()->Used(); From cddecad701939936b62f1c0f44edf077d04d8232 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Wed, 23 Jan 2019 12:17:16 +0000 Subject: [PATCH 17/46] test=develop, add embeding to layers and add ptb_rnn in imperative test --- python/paddle/fluid/imperative/nn.py | 52 ++++- .../unittests/test_imperative_ptb_rnn.py | 196 +++++++++++++++++- 2 files changed, 246 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 1bfeace521..381fc4ef15 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -23,7 +23,7 @@ from ..framework import Variable, OpProtoHolder from ..param_attr import ParamAttr from ..initializer import Normal, Constant -__all__ = ['Conv2D', 'Pool2D', 'FC'] +__all__ = ['Conv2D', 'Pool2D', 'FC', 'EMBEDDING'] class Conv2D(layers.Layer): @@ -274,3 +274,53 @@ class FC(layers.Layer): out = bias_out # add activation return self._helper.append_activation(out) + + +class EMBEDDING(layers.Layer): + def __init__(self, + size, + is_sparse=False, + is_distributed=False, + padding_idx=None, + param_attr=None, + dtype='float32'): + + super(EMBEDDING, self).__init__() + self._size = size + self._is_sparse = is_sparse + self._is_distributed = is_distributed + + self._padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( + size[0] + padding_idx) + + self._param_attr = param_attr + self._dtype = dtype + self._remote_prefetch = self.is_sparse and (not self.is_distributed) + if self._remote_prefetch: + assert self._is_sparse is True and self._is_distributed is False + + from ..layer_helper import LayerHelper + self._helper = LayerHelper('embedding', param_attr=param_attr) + + def _build_once(self, input): + self._w = self._helper.create_parameter( + attr=self._param_attr, + shape=self._size, + dtype=self._dtype, + is_bias=False) + + def forward(self, input): + out = self._helper.create_variable_for_type_inference(self._dtype) + self._helper.append_op( + type='lookup_table', + inputs={'Ids': input, + 'W': self._w}, + outputs={'Out': out}, + attrs={ + 'is_sparse': self._is_sparse, + 'is_distributed': self._is_distributed, + 'remote_prefetch': self._remote_prefetch, + 'padding_idx': self._padding_idx + }) + + return out diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index 19df224770..ecd52c8b80 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -15,7 +15,201 @@ from __future__ import print_function import unittest - +import paddle.fluid as fluid +from paddle.fluid.imperative.nn import EMBEDDING import paddle.fluid.framework as framework import paddle.fluid.optimizer as optimizer from paddle.fluid.backward import append_backward + + +class SimpleLSTMRNN(fluid.imperative.Layer): + def __init__(self, hidden_size, num_layers=2, init_scale=0.1, dropout=None): + self._hidden_size = hidden_size + self._num_layers = num_layers + self._init_scale = init_scale + self._dropout = dropout + self.input = None + + def _build_once(self, + input_embedding, + seq_len, + init_hidden=None, + init_cell=None): + self.weight_1_arr = [] + self.weight_2_arr = [] + self.bias_arr = [] + self.hidden_array = [] + self.cell_array = [] + self.mask_array = [] + + for i in range(self._num_layers): + weight_1 = fluid.layers.create_parameter( + shape=[self._hidden_size * 2, self._hidden_size * 4], + dtype="float32", + name="fc_weight1_" + str(i), + default_initializer=fluid.initializer.UniformInitializer( + low=-self._init_scale, high=self._init_scale)) + self.weight_1_arr.append(weight_1) + bias_1 = fluid.layers.create_parameter( + [self._hidden_size * 4], + dtype="float32", + name="fc_bias1_" + str(i), + default_initializer=fluid.initializer.Constant(0.0)) + self.bias_arr.append(bias_1) + + pre_hidden = self.layers.slice( + init_hidden, axes=[0], starts=[i], ends=[i + 1]) + pre_cell = fluid.layers.slice( + init_cell, axes=[0], starts=[i], ends=[i + 1]) + pre_hidden = fluid.layers.reshape( + pre_hidden, shape=[-1, self._hidden_size]) + pre_cell = fluid.layers.reshape( + pre_cell, shape=[-1, self._hidden_size]) + fluid.hidden_array.append(pre_hidden) + fluid.cell_array.append(pre_cell) + + def forward(self, + input_embedding, + seq_len, + init_hidden=None, + init_cell=None): + res = [] + for index in range(seq_len): + self.input = fluid.layers.slice( + input_embedding, axes=[1], starts=[index], ends=[index + 1]) + self.input = fluid.layers.reshape( + self.input, shape=[-1, self._hidden_size]) + for k in range(self._num_layers): + pre_hidden = self.hidden_array[k] + pre_cell = self.cell_array[k] + weight_1 = self.weight_1_arr[k] + bias = self.bias_arr[k] + + nn = fluid.layers.concat([self.input, pre_hidden], 1) + gate_input = fluid.layers.matmul(x=nn, y=weight_1) + + gate_input = fluid.layers.elementwise_add(gate_input, bias) + i, j, f, o = fluid.layers.split( + gate_input, num_or_sections=4, dim=-1) + + c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( + i) * fluid.layers.tanh(j) + m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) + + self.hidden_array[k] = m + self.cell_array[k] = c + self.input = m + + if self.dropout is not None and self.dropout > 0.0: + self.input = fluid.layers.dropout( + self.input, + dropout_prob=self.dropout, + dropout_implementation='upscale_in_train') + + res.append( + fluid.layers.reshape( + input, shape=[1, -1, self._hidden_size])) + real_res = fluid.layers.concat(res, 0) + real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) + last_hidden = fluid.layers.concat(self.hidden_array, 1) + last_hidden = fluid.layers.reshape( + last_hidden, shape=[-1, self._num_layers, self._hidden_size]) + last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) + last_cell = fluid.layers.concat(self.cell_array, 1) + last_cell = fluid.layers.reshape( + last_cell, shape=[-1, self._num_layers, self._hidden_size]) + last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) + + return real_res, last_hidden, last_cell + + +class PtbModel(fluid.imperative.Layer): + def __init__(self, + hidden_size, + vocab_size, + num_layers=2, + num_steps=20, + init_scale=0.1, + dropout=None): + super(PtbModel, self).__init__() + self.hidden_size = hidden_size + self.vocab_size = vocab_size + self.init_scale = init_scale + self.num_layers = num_layers + self.num_steps = num_steps + self.simple_lstm_rnn = SimpleLSTMRNN( + hidden_size, + num_layers=num_layers, + init_scale=init_scale, + dropout=dropout) + self.embedding = EMBEDDING( + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False, + param_attr=fluid.ParamAttr( + name='embedding_para', + initializer=fluid.initializer.UniformInitializer( + low=-init_scale, high=init_scale))) + + def _build_once(self, input, label, init_hidden, init_cell): + self.softmax_weight = fluid.layers.create_parameter( + [self._hidden_size, self._vocab_size], + dtype="float32", + name="softmax_weight", + default_initializer=fluid.initializer.UniformInitializer( + low=-self._init_scale, high=self._init_scale)) + self.softmax_bias = fluid.layers.create_parameter( + [self._vocab_size], + dtype="float32", + name='softmax_bias', + default_initializer=fluid.initializer.UniformInitializer( + low=-self._init_scale, high=self._init_scale)) + + def forward(self, input, label, init_hidden, init_cell): + init_h = fluid.layers.reshape( + init_hidden, shape=[self.num_layers, -1, self.hidden_size]) + init_c = fluid.layers.reshape( + init_cell, shape=[self.num_layers, -1, self.hidden_size]) + + x_emb = self.embedding(input) + x_emb = fluid.layers.reshape( + x_emb, shape=[-1, self.num_steps, self.hidden_size]) + if self.dropout is not None and self.dropout > 0.0: + x_emb = fluid.layers.dropout( + x_emb, + dropout_prob=self.drop_out, + dropout_implementation='upscale_in_train') + rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h, + init_c) + rnn_out = fluid.layers.reshape( + rnn_out, shape=[-1, self.num_steps, self.hidden_size]) + projection = fluid.layers.reshape(rnn_out, self.softmax_weight) + projection = fluid.layers.elementwise_add(projection, self.softmax_bias) + projection = fluid.layers.reshape( + projection, shape=[-1, self.vocab_size]) + projection = fluid.layers.reshape( + projection, shape=[-1, self.vocab_size]) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=label, soft_label=False) + loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) + loss = fluid.layers.reduce_mean(loss, dim=[0]) + loss = fluid.layers.reduce_sum(loss) + loss.permissions = True + + return loss, last_hidden, last_cell + + +class TestImperativePtbRnn(unittest.TestCase): + def test_mnist_cpu_float32(self): + seed = 90 + + with fluid.imperative.guard(): + fluid.default_startup_program().random_seed = seed + fluid.default_main_program().random_seed = seed + # TODO: marsyang1993 Change seed to + ptb_model = PtbModel( + hidden_size=10, + vocab_size=1000, + num_layers=1, + num_steps=3, + init_scale=0.1) From 9eb2d7b3e1c976ad179561ca62be19f41a7584a7 Mon Sep 17 00:00:00 2001 From: jerrywgz Date: Thu, 24 Jan 2019 04:28:41 +0000 Subject: [PATCH 18/46] refine code, test=develop --- .../operators/detection/multiclass_nms_op.cc | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/operators/detection/multiclass_nms_op.cc b/paddle/fluid/operators/detection/multiclass_nms_op.cc index 43d6382280..265bfc6c75 100644 --- a/paddle/fluid/operators/detection/multiclass_nms_op.cc +++ b/paddle/fluid/operators/detection/multiclass_nms_op.cc @@ -171,14 +171,17 @@ void SliceOneClass(const platform::DeviceContext& ctx, const T* items_data = items.data(); const int64_t num_item = items.dims()[0]; const int class_num = items.dims()[1]; - int item_size = 1; if (items.dims().size() == 3) { - item_size = items.dims()[2]; - } - for (int i = 0; i < num_item; ++i) { - std::memcpy(item_data + i * item_size, - items_data + i * class_num * item_size + class_id * item_size, - sizeof(T) * item_size); + int item_size = items.dims()[2]; + for (int i = 0; i < num_item; ++i) { + std::memcpy(item_data + i * item_size, + items_data + i * class_num * item_size + class_id * item_size, + sizeof(T) * item_size); + } + } else { + for (int i = 0; i < num_item; ++i) { + item_data[i] = items_data[i * class_num + class_id]; + } } } From 3ce2d295c0e196be109fedb230a6af0804b8338c Mon Sep 17 00:00:00 2001 From: minqiyang Date: Thu, 24 Jan 2019 13:55:26 +0800 Subject: [PATCH 19/46] Refine stop_gradient test=develop --- python/paddle/fluid/framework.py | 11 +++++++++++ python/paddle/fluid/imperative/nn.py | 13 ++++--------- python/paddle/fluid/optimizer.py | 2 +- .../tests/unittests/test_imperative_optimizer.py | 9 ++++----- 4 files changed, 20 insertions(+), 15 deletions(-) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 3ddd73080b..17798e359c 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1307,6 +1307,17 @@ class Block(object): outputs=kwargs.get("outputs", None), attrs=kwargs.get("attrs", None)) self.ops.append(op) + + # set stop_gradient in static mode + if kwargs.get("stop_gradient", False): + outputs = kwargs.get("outputs", None) + if outputs is not None: + for k, v in six.iteritems(outputs): + if isinstance(v, Variable): + v.stop_gradient = True + elif isinstance(v, list) or isinstance(v, tuple): + for var in v: + var.stop_gradient = True self._trace_op(op, kwargs.get("stop_gradient", False)) return op diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 140c0ff037..fe5014f5e6 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -332,21 +332,16 @@ class BatchNorm(layers.Layer): shape=param_shape, dtype=self._dtype, default_initializer=Constant(1.0)) - - # TODO(minqiyang): change stop_gradient sign to trainable to align with static graph - # # setting stop_gradient=True to reduce computation - # if use_global_stats and self._helper.param_attr.learning_rate == 0.: - # self._scale.stop_gradient = True + if use_global_stats and self._helper.param_attr.learning_rate == 0.: + self._scale.stop_gradient = True self._bias = self._helper.create_parameter( attr=self._helper.bias_attr, shape=param_shape, dtype=self._dtype, is_bias=True) - # TODO(minqiyang): change stop_gradient sign to trainable to align with static graph - # # setting stop_gradient=True to reduce computation - # if use_global_stats and self._helper.bias_attr.learning_rate == 0.: - # self._bias.stop_gradient = True + if use_global_stats and self._helper.bias_attr.learning_rate == 0.: + self._bias.stop_gradient = True self._mean = self._helper.create_parameter( attr=ParamAttr( diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 14f4276e2f..e0e781a322 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -387,7 +387,7 @@ class Optimizer(object): params_grads = [] for param in parameters: - if param.stop_gradient: + if param.stop_gradient or not param.trainable: continue # create gradient variable grad_var = Variable( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index d0a5a88317..91637cac5b 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -98,7 +98,7 @@ class MNIST(fluid.imperative.Layer): class TestImperativeMnist(unittest.TestCase): - def test_mnist_cpu_float32(self): + def test_mnist_float32(self): seed = 90 with fluid.imperative.guard(): @@ -196,11 +196,10 @@ class TestImperativeMnist(unittest.TestCase): static_param_value[static_param_name_list[i - 1]] = out[i] for key, value in six.iteritems(static_param_init_value): - self.assertTrue( - np.allclose(value.all(), dy_param_init_value[key].all())) - self.assertTrue(np.allclose(static_out.all(), dy_out.all())) + self.assertTrue(np.allclose(value, dy_param_init_value[key])) + self.assertTrue(np.allclose(static_out, dy_out)) for key, value in six.iteritems(static_param_value): - self.assertTrue(np.allclose(value.all(), dy_param_value[key].all())) + self.assertTrue(np.allclose(value, dy_param_value[key])) if __name__ == '__main__': From e3a8929cf8b1311fbccb46e6d46eb451c71dcea5 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Fri, 25 Jan 2019 03:31:45 +0000 Subject: [PATCH 20/46] little change --- paddle/fluid/inference/utils/CMakeLists.txt | 4 +- python/paddle/fluid/imperative/nn.py | 2 +- .../unittests/test_imperative_ptb_rnn.py | 166 ++++++++++++------ .../tests/unittests/test_imperative_split.py | 48 +++++ 4 files changed, 159 insertions(+), 61 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_imperative_split.py diff --git a/paddle/fluid/inference/utils/CMakeLists.txt b/paddle/fluid/inference/utils/CMakeLists.txt index c43eaf7f98..a7b239731b 100644 --- a/paddle/fluid/inference/utils/CMakeLists.txt +++ b/paddle/fluid/inference/utils/CMakeLists.txt @@ -1,4 +1,4 @@ cc_library(benchmark SRCS benchmark.cc DEPS enforce) cc_test(test_benchmark SRCS benchmark_tester.cc DEPS benchmark) -cc_binary(visualizer SRCS visualizer.cc DEPS analysis - paddle_pass_builder ir_pass_manager pass graph_viz_pass analysis_passes) +#cc_binary(visualizer SRCS visualizer.cc DEPS analysis +# paddle_pass_builder ir_pass_manager pass graph_viz_pass analysis_passes) diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 381fc4ef15..0fe680b491 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -295,7 +295,7 @@ class EMBEDDING(layers.Layer): self._param_attr = param_attr self._dtype = dtype - self._remote_prefetch = self.is_sparse and (not self.is_distributed) + self._remote_prefetch = self._is_sparse and (not self._is_distributed) if self._remote_prefetch: assert self._is_sparse is True and self._is_distributed is False diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index ecd52c8b80..c64d5964e7 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -18,23 +18,28 @@ import unittest import paddle.fluid as fluid from paddle.fluid.imperative.nn import EMBEDDING import paddle.fluid.framework as framework -import paddle.fluid.optimizer as optimizer +from paddle.fluid.optimizer import SGDOptimizer +from paddle.fluid.imperative.base import to_variable +import numpy as np from paddle.fluid.backward import append_backward class SimpleLSTMRNN(fluid.imperative.Layer): - def __init__(self, hidden_size, num_layers=2, init_scale=0.1, dropout=None): + def __init__(self, + hidden_size, + num_steps, + num_layers=2, + init_scale=0.1, + dropout=None): + super(SimpleLSTMRNN, self).__init__() self._hidden_size = hidden_size self._num_layers = num_layers self._init_scale = init_scale self._dropout = dropout self.input = None + self.num_steps = num_steps - def _build_once(self, - input_embedding, - seq_len, - init_hidden=None, - init_cell=None): + def _build_once(self, input_embedding, init_hidden=None, init_cell=None): self.weight_1_arr = [] self.weight_2_arr = [] self.bias_arr = [] @@ -57,7 +62,7 @@ class SimpleLSTMRNN(fluid.imperative.Layer): default_initializer=fluid.initializer.Constant(0.0)) self.bias_arr.append(bias_1) - pre_hidden = self.layers.slice( + pre_hidden = fluid.layers.slice( init_hidden, axes=[0], starts=[i], ends=[i + 1]) pre_cell = fluid.layers.slice( init_cell, axes=[0], starts=[i], ends=[i + 1]) @@ -65,22 +70,20 @@ class SimpleLSTMRNN(fluid.imperative.Layer): pre_hidden, shape=[-1, self._hidden_size]) pre_cell = fluid.layers.reshape( pre_cell, shape=[-1, self._hidden_size]) - fluid.hidden_array.append(pre_hidden) - fluid.cell_array.append(pre_cell) - - def forward(self, - input_embedding, - seq_len, - init_hidden=None, - init_cell=None): + self.hidden_array.append(pre_hidden) + self.cell_array.append(pre_cell) + + def forward(self, input_embedding, init_hidden=None, init_cell=None): res = [] - for index in range(seq_len): + for index in range(self.num_steps): self.input = fluid.layers.slice( input_embedding, axes=[1], starts=[index], ends=[index + 1]) self.input = fluid.layers.reshape( self.input, shape=[-1, self._hidden_size]) for k in range(self._num_layers): pre_hidden = self.hidden_array[k] + print("pre_hidden shape is:{}".format(pre_hidden.shape)) + print("input shape is:{}".format(self.input.shape)) pre_cell = self.cell_array[k] weight_1 = self.weight_1_arr[k] bias = self.bias_arr[k] @@ -89,38 +92,41 @@ class SimpleLSTMRNN(fluid.imperative.Layer): gate_input = fluid.layers.matmul(x=nn, y=weight_1) gate_input = fluid.layers.elementwise_add(gate_input, bias) - i, j, f, o = fluid.layers.split( - gate_input, num_or_sections=4, dim=-1) - - c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( - i) * fluid.layers.tanh(j) - m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) - - self.hidden_array[k] = m - self.cell_array[k] = c - self.input = m - - if self.dropout is not None and self.dropout > 0.0: - self.input = fluid.layers.dropout( - self.input, - dropout_prob=self.dropout, - dropout_implementation='upscale_in_train') - - res.append( - fluid.layers.reshape( - input, shape=[1, -1, self._hidden_size])) - real_res = fluid.layers.concat(res, 0) - real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) - last_hidden = fluid.layers.concat(self.hidden_array, 1) - last_hidden = fluid.layers.reshape( - last_hidden, shape=[-1, self._num_layers, self._hidden_size]) - last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) - last_cell = fluid.layers.concat(self.cell_array, 1) - last_cell = fluid.layers.reshape( - last_cell, shape=[-1, self._num_layers, self._hidden_size]) - last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) - - return real_res, last_hidden, last_cell + print("gate_input shape is: {}".format(gate_input.shape)) + print("gate_input value is :{}".format(gate_input._numpy())) + print("gate_input desc is :{}".format(gate_input)) + # i, j, f, o = fluid.layers.split(gate_input, num_or_sections=4, dim=-1) + # # + # # c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( + # # i) * fluid.layers.tanh(j) + # # m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) + # # + # # self.hidden_array[k] = m + # # self.cell_array[k] = c + # # self.input = m + # # + # # if self.dropout is not None and self.dropout > 0.0: + # # self.input = fluid.layers.dropout( + # # self.input, + # # dropout_prob=self.dropout, + # # dropout_implementation='upscale_in_train') + # # + # # res.append( + # # fluid.layers.reshape( + # # input, shape=[1, -1, self._hidden_size])) + # # real_res = fluid.layers.concat(res, 0) + # # real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) + # # last_hidden = fluid.layers.concat(self.hidden_array, 1) + # # last_hidden = fluid.layers.reshape( + # # last_hidden, shape=[-1, self._num_layers, self._hidden_size]) + # # last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) + # # last_cell = fluid.layers.concat(self.cell_array, 1) + # # last_cell = fluid.layers.reshape( + # # last_cell, shape=[-1, self._num_layers, self._hidden_size]) + # # last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) + # # + # return real_res, last_hidden, last_cell + return [1], [2], [3] class PtbModel(fluid.imperative.Layer): @@ -137,8 +143,10 @@ class PtbModel(fluid.imperative.Layer): self.init_scale = init_scale self.num_layers = num_layers self.num_steps = num_steps + self.dropout = dropout self.simple_lstm_rnn = SimpleLSTMRNN( hidden_size, + num_steps, num_layers=num_layers, init_scale=init_scale, dropout=dropout) @@ -153,21 +161,23 @@ class PtbModel(fluid.imperative.Layer): def _build_once(self, input, label, init_hidden, init_cell): self.softmax_weight = fluid.layers.create_parameter( - [self._hidden_size, self._vocab_size], + [self.hidden_size, self.vocab_size], dtype="float32", name="softmax_weight", default_initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)) + low=-self.init_scale, high=self.init_scale)) self.softmax_bias = fluid.layers.create_parameter( - [self._vocab_size], + [self.vocab_size], dtype="float32", name='softmax_bias', default_initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)) + low=-self.init_scale, high=self.init_scale)) def forward(self, input, label, init_hidden, init_cell): + init_h = fluid.layers.reshape( init_hidden, shape=[self.num_layers, -1, self.hidden_size]) + init_c = fluid.layers.reshape( init_cell, shape=[self.num_layers, -1, self.hidden_size]) @@ -179,6 +189,7 @@ class PtbModel(fluid.imperative.Layer): x_emb, dropout_prob=self.drop_out, dropout_implementation='upscale_in_train') + print("init_c is {}".format(init_c)) rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h, init_c) rnn_out = fluid.layers.reshape( @@ -202,14 +213,53 @@ class PtbModel(fluid.imperative.Layer): class TestImperativePtbRnn(unittest.TestCase): def test_mnist_cpu_float32(self): seed = 90 + hidden_size = 10 + vocab_size = 1000 + num_layers = 1 + num_steps = 3 + init_scale = 0.1 + batch_size = 4 with fluid.imperative.guard(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed # TODO: marsyang1993 Change seed to ptb_model = PtbModel( - hidden_size=10, - vocab_size=1000, - num_layers=1, - num_steps=3, - init_scale=0.1) + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale) + + sgd = SGDOptimizer(learning_rate=1e-3) + print("q") + for i in range(2): + x_data = np.arange(12).reshape(4, 3).astype('int64') + y_data = np.arange(1, 13).reshape(4, 3).astype('int64') + x_data = x_data.reshape((-1, num_steps, 1)) + y_data = y_data.reshape((-1, 1)) + init_hidden_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32') + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32') + x = to_variable(x_data) + y = to_variable(y_data) + init_hidden = to_variable(init_hidden_data) + init_cell = to_variable(init_cell_data) + dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden, + init_cell) + dy_param_init = dict() + if i == 0: + for param in fluid.default_main_program().global_block( + ).all_parameters(): + dy_param_init[param.name] = param._numpy() + dy_loss._backward() + sgd.minimize(dy_loss) + dy_param_updated = dict() + for param in fluid.default_main_program().global_block( + ).all_parameters(): + dy_param_updated[param.name] = param._numpy() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_split.py b/python/paddle/fluid/tests/unittests/test_imperative_split.py new file mode 100644 index 0000000000..696fb5f788 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_imperative_split.py @@ -0,0 +1,48 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import paddle.fluid as fluid +from paddle.fluid.imperative.nn import EMBEDDING +import paddle.fluid.framework as framework +from paddle.fluid.optimizer import SGDOptimizer +from paddle.fluid.imperative.base import to_variable +import numpy as np + + +class Split_test(fluid.imperative.Layer): + def __init__(self): + super(Split_test, self).__init__() + + def _build_once(self, input): + pass + + def forward(self, input): + out = fluid.layers.split(input, num_or_sections=4, dim=-1) + return out + + +class TestImperativePtbRnn(unittest.TestCase): + def test_spilt(self): + with fluid.imperative.guard(): + inp = to_variable(np.arange(160).reshape(4, 40).astype('float32')) + st = Split_test() + out = st(inp) + print(out) + + +if __name__ == '__main__': + unittest.main() From 3be8ffad2fa39679bdbe5864b846a517b50b0106 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Fri, 25 Jan 2019 05:16:10 +0000 Subject: [PATCH 21/46] test=develop, polish code and merge conflict --- paddle/fluid/framework/operator.cc | 14 +- paddle/fluid/framework/tensor_impl.h | 3 +- .../unittests/test_imperative_ptb_rnn.py | 265 ------------------ .../tests/unittests/test_imperative_split.py | 48 ---- 4 files changed, 10 insertions(+), 320 deletions(-) delete mode 100644 python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py delete mode 100644 python/paddle/fluid/tests/unittests/test_imperative_split.py diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index ee9f6a4805..ec5cd1c4c8 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -1073,7 +1073,8 @@ Scope* OperatorWithKernel::PrepareData( proto::VarType::Type OperatorWithKernel::IndicateDataType( const ExecutionContext& ctx) const { - int data_type = -1; + proto::VarType::Type defaut_data_type = static_cast(-1); + proto::VarType::Type data_type = defaut_data_type; for (auto& input : this->inputs_) { const std::vector vars = ctx.MultiInputVar(input.first); for (size_t i = 0; i < vars.size(); ++i) { @@ -1090,18 +1091,19 @@ proto::VarType::Type OperatorWithKernel::IndicateDataType( if (t != nullptr) { PADDLE_ENFORCE(t->IsInitialized(), "Input %s(%lu)is not initialized", input.first, i); - int tmp = static_cast(t->type()); + proto::VarType::Type tmp = t->type(); PADDLE_ENFORCE( - tmp == data_type || data_type == -1, + tmp == data_type || data_type == defaut_data_type, "DataType of Paddle Op %s must be the same. Get (%d) != (%d)", - Type(), data_type, tmp); + Type(), DataTypeToString(data_type), DataTypeToString(tmp)); data_type = tmp; } } } } - PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input"); - return static_cast(data_type); + PADDLE_ENFORCE(data_type != defaut_data_type, + "DataType should be indicated by input"); + return data_type; } OpKernelType OperatorWithKernel::GetExpectedKernelType( diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index ce3ad18b1f..ef5404e475 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -25,7 +25,8 @@ inline const T* Tensor::data() const { check_memory_size(); bool valid = std::is_same::value || type_ == DataTypeTrait::DataType; - PADDLE_ENFORCE(valid, "Tensor holds the wrong type, it holds %d", type_); + PADDLE_ENFORCE(valid, "Tensor holds the wrong type, it holds %d", + DataTypeToString(type_)); return reinterpret_cast( reinterpret_cast(holder_->ptr()) + offset_); diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py deleted file mode 100644 index c64d5964e7..0000000000 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ /dev/null @@ -1,265 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest -import paddle.fluid as fluid -from paddle.fluid.imperative.nn import EMBEDDING -import paddle.fluid.framework as framework -from paddle.fluid.optimizer import SGDOptimizer -from paddle.fluid.imperative.base import to_variable -import numpy as np -from paddle.fluid.backward import append_backward - - -class SimpleLSTMRNN(fluid.imperative.Layer): - def __init__(self, - hidden_size, - num_steps, - num_layers=2, - init_scale=0.1, - dropout=None): - super(SimpleLSTMRNN, self).__init__() - self._hidden_size = hidden_size - self._num_layers = num_layers - self._init_scale = init_scale - self._dropout = dropout - self.input = None - self.num_steps = num_steps - - def _build_once(self, input_embedding, init_hidden=None, init_cell=None): - self.weight_1_arr = [] - self.weight_2_arr = [] - self.bias_arr = [] - self.hidden_array = [] - self.cell_array = [] - self.mask_array = [] - - for i in range(self._num_layers): - weight_1 = fluid.layers.create_parameter( - shape=[self._hidden_size * 2, self._hidden_size * 4], - dtype="float32", - name="fc_weight1_" + str(i), - default_initializer=fluid.initializer.UniformInitializer( - low=-self._init_scale, high=self._init_scale)) - self.weight_1_arr.append(weight_1) - bias_1 = fluid.layers.create_parameter( - [self._hidden_size * 4], - dtype="float32", - name="fc_bias1_" + str(i), - default_initializer=fluid.initializer.Constant(0.0)) - self.bias_arr.append(bias_1) - - pre_hidden = fluid.layers.slice( - init_hidden, axes=[0], starts=[i], ends=[i + 1]) - pre_cell = fluid.layers.slice( - init_cell, axes=[0], starts=[i], ends=[i + 1]) - pre_hidden = fluid.layers.reshape( - pre_hidden, shape=[-1, self._hidden_size]) - pre_cell = fluid.layers.reshape( - pre_cell, shape=[-1, self._hidden_size]) - self.hidden_array.append(pre_hidden) - self.cell_array.append(pre_cell) - - def forward(self, input_embedding, init_hidden=None, init_cell=None): - res = [] - for index in range(self.num_steps): - self.input = fluid.layers.slice( - input_embedding, axes=[1], starts=[index], ends=[index + 1]) - self.input = fluid.layers.reshape( - self.input, shape=[-1, self._hidden_size]) - for k in range(self._num_layers): - pre_hidden = self.hidden_array[k] - print("pre_hidden shape is:{}".format(pre_hidden.shape)) - print("input shape is:{}".format(self.input.shape)) - pre_cell = self.cell_array[k] - weight_1 = self.weight_1_arr[k] - bias = self.bias_arr[k] - - nn = fluid.layers.concat([self.input, pre_hidden], 1) - gate_input = fluid.layers.matmul(x=nn, y=weight_1) - - gate_input = fluid.layers.elementwise_add(gate_input, bias) - print("gate_input shape is: {}".format(gate_input.shape)) - print("gate_input value is :{}".format(gate_input._numpy())) - print("gate_input desc is :{}".format(gate_input)) - # i, j, f, o = fluid.layers.split(gate_input, num_or_sections=4, dim=-1) - # # - # # c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( - # # i) * fluid.layers.tanh(j) - # # m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) - # # - # # self.hidden_array[k] = m - # # self.cell_array[k] = c - # # self.input = m - # # - # # if self.dropout is not None and self.dropout > 0.0: - # # self.input = fluid.layers.dropout( - # # self.input, - # # dropout_prob=self.dropout, - # # dropout_implementation='upscale_in_train') - # # - # # res.append( - # # fluid.layers.reshape( - # # input, shape=[1, -1, self._hidden_size])) - # # real_res = fluid.layers.concat(res, 0) - # # real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) - # # last_hidden = fluid.layers.concat(self.hidden_array, 1) - # # last_hidden = fluid.layers.reshape( - # # last_hidden, shape=[-1, self._num_layers, self._hidden_size]) - # # last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) - # # last_cell = fluid.layers.concat(self.cell_array, 1) - # # last_cell = fluid.layers.reshape( - # # last_cell, shape=[-1, self._num_layers, self._hidden_size]) - # # last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) - # # - # return real_res, last_hidden, last_cell - return [1], [2], [3] - - -class PtbModel(fluid.imperative.Layer): - def __init__(self, - hidden_size, - vocab_size, - num_layers=2, - num_steps=20, - init_scale=0.1, - dropout=None): - super(PtbModel, self).__init__() - self.hidden_size = hidden_size - self.vocab_size = vocab_size - self.init_scale = init_scale - self.num_layers = num_layers - self.num_steps = num_steps - self.dropout = dropout - self.simple_lstm_rnn = SimpleLSTMRNN( - hidden_size, - num_steps, - num_layers=num_layers, - init_scale=init_scale, - dropout=dropout) - self.embedding = EMBEDDING( - size=[vocab_size, hidden_size], - dtype='float32', - is_sparse=False, - param_attr=fluid.ParamAttr( - name='embedding_para', - initializer=fluid.initializer.UniformInitializer( - low=-init_scale, high=init_scale))) - - def _build_once(self, input, label, init_hidden, init_cell): - self.softmax_weight = fluid.layers.create_parameter( - [self.hidden_size, self.vocab_size], - dtype="float32", - name="softmax_weight", - default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) - self.softmax_bias = fluid.layers.create_parameter( - [self.vocab_size], - dtype="float32", - name='softmax_bias', - default_initializer=fluid.initializer.UniformInitializer( - low=-self.init_scale, high=self.init_scale)) - - def forward(self, input, label, init_hidden, init_cell): - - init_h = fluid.layers.reshape( - init_hidden, shape=[self.num_layers, -1, self.hidden_size]) - - init_c = fluid.layers.reshape( - init_cell, shape=[self.num_layers, -1, self.hidden_size]) - - x_emb = self.embedding(input) - x_emb = fluid.layers.reshape( - x_emb, shape=[-1, self.num_steps, self.hidden_size]) - if self.dropout is not None and self.dropout > 0.0: - x_emb = fluid.layers.dropout( - x_emb, - dropout_prob=self.drop_out, - dropout_implementation='upscale_in_train') - print("init_c is {}".format(init_c)) - rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h, - init_c) - rnn_out = fluid.layers.reshape( - rnn_out, shape=[-1, self.num_steps, self.hidden_size]) - projection = fluid.layers.reshape(rnn_out, self.softmax_weight) - projection = fluid.layers.elementwise_add(projection, self.softmax_bias) - projection = fluid.layers.reshape( - projection, shape=[-1, self.vocab_size]) - projection = fluid.layers.reshape( - projection, shape=[-1, self.vocab_size]) - loss = fluid.layers.softmax_with_cross_entropy( - logits=projection, label=label, soft_label=False) - loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) - loss = fluid.layers.reduce_mean(loss, dim=[0]) - loss = fluid.layers.reduce_sum(loss) - loss.permissions = True - - return loss, last_hidden, last_cell - - -class TestImperativePtbRnn(unittest.TestCase): - def test_mnist_cpu_float32(self): - seed = 90 - hidden_size = 10 - vocab_size = 1000 - num_layers = 1 - num_steps = 3 - init_scale = 0.1 - batch_size = 4 - - with fluid.imperative.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed - # TODO: marsyang1993 Change seed to - ptb_model = PtbModel( - hidden_size=hidden_size, - vocab_size=vocab_size, - num_layers=num_layers, - num_steps=num_steps, - init_scale=init_scale) - - sgd = SGDOptimizer(learning_rate=1e-3) - print("q") - for i in range(2): - x_data = np.arange(12).reshape(4, 3).astype('int64') - y_data = np.arange(1, 13).reshape(4, 3).astype('int64') - x_data = x_data.reshape((-1, num_steps, 1)) - y_data = y_data.reshape((-1, 1)) - init_hidden_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - init_cell_data = np.zeros( - (num_layers, batch_size, hidden_size), dtype='float32') - x = to_variable(x_data) - y = to_variable(y_data) - init_hidden = to_variable(init_hidden_data) - init_cell = to_variable(init_cell_data) - dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden, - init_cell) - dy_param_init = dict() - if i == 0: - for param in fluid.default_main_program().global_block( - ).all_parameters(): - dy_param_init[param.name] = param._numpy() - dy_loss._backward() - sgd.minimize(dy_loss) - dy_param_updated = dict() - for param in fluid.default_main_program().global_block( - ).all_parameters(): - dy_param_updated[param.name] = param._numpy() - - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_split.py b/python/paddle/fluid/tests/unittests/test_imperative_split.py deleted file mode 100644 index 696fb5f788..0000000000 --- a/python/paddle/fluid/tests/unittests/test_imperative_split.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest -import paddle.fluid as fluid -from paddle.fluid.imperative.nn import EMBEDDING -import paddle.fluid.framework as framework -from paddle.fluid.optimizer import SGDOptimizer -from paddle.fluid.imperative.base import to_variable -import numpy as np - - -class Split_test(fluid.imperative.Layer): - def __init__(self): - super(Split_test, self).__init__() - - def _build_once(self, input): - pass - - def forward(self, input): - out = fluid.layers.split(input, num_or_sections=4, dim=-1) - return out - - -class TestImperativePtbRnn(unittest.TestCase): - def test_spilt(self): - with fluid.imperative.guard(): - inp = to_variable(np.arange(160).reshape(4, 40).astype('float32')) - st = Split_test() - out = st(inp) - print(out) - - -if __name__ == '__main__': - unittest.main() From 466a10dcddf22c5a88cdb5cb1c38bcd0c0cc7cac Mon Sep 17 00:00:00 2001 From: jerrywgz Date: Fri, 25 Jan 2019 08:32:26 +0000 Subject: [PATCH 22/46] refine code, test=develop --- .../operators/detection/multiclass_nms_op.cc | 2 +- python/paddle/fluid/layers/detection.py | 12 ++++++++---- .../tests/unittests/test_multiclass_nms_op.py | 15 +++++++-------- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/paddle/fluid/operators/detection/multiclass_nms_op.cc b/paddle/fluid/operators/detection/multiclass_nms_op.cc index 265bfc6c75..f357e3ccf9 100644 --- a/paddle/fluid/operators/detection/multiclass_nms_op.cc +++ b/paddle/fluid/operators/detection/multiclass_nms_op.cc @@ -520,7 +520,7 @@ independently for each class. The outputs is a 2-D LoDTenosr, for each image, the offsets in first dimension of LoDTensor are called LoD, the number of offset is N + 1, where N is the batch size. If LoD[i + 1] - LoD[i] == 0, means there is no detected bbox for this image. If there is no detected boxes -for all images, all the elements in LoD are set to {0,1}, and the Out only +for all images, all the elements in LoD are set to {1}, and the Out only contains one value which is -1. )DOC"); } diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 4ee0cce62a..7cf575d253 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -263,8 +263,10 @@ def detection_output(loc, number is N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]` detected results, if it is 0, the i-th image has no detected results. If all images have not detected results, - all the elements in LoD are 0, and output tensor only contains one + LoD will be set to {1}, and output tensor only contains one value, which is -1. + (After version 1.3, when no boxes detected, the lod is changed + from {0} to {1}.) Examples: .. code-block:: python @@ -1967,8 +1969,8 @@ def multiclass_nms(bboxes, scores, score_threshold, nms_top_k, - nms_threshold, keep_top_k, + nms_threshold=0.3, normalized=True, nms_eta=1., background_label=0, @@ -2035,8 +2037,10 @@ def multiclass_nms(bboxes, Each row has 10 values: [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the total number of detections. If there is no detected boxes for all - images, lod will be set to {0, 1} and Out only contains one value - which is -1. + images, lod will be set to {1} and Out only contains one value + which is -1. + (After version 1.3, when no boxes detected, the lod is changed + from {0} to {1}) Examples: .. code-block:: python diff --git a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py index 2a50e0bd85..8fc391a1ff 100644 --- a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py @@ -19,7 +19,7 @@ import copy from op_test import OpTest -def iou(box_a, box_b, normalized): +def iou(box_a, box_b, norm): """Apply intersection-over-union overlap between box_a and box_b """ xmin_a = min(box_a[0], box_a[2]) @@ -32,10 +32,10 @@ def iou(box_a, box_b, normalized): xmax_b = max(box_b[0], box_b[2]) ymax_b = max(box_b[1], box_b[3]) - area_a = (ymax_a - ymin_a + (normalized == False)) * \ - (xmax_a - xmin_a + (normalized == False)) - area_b = (ymax_b - ymin_b + (normalized == False)) * \ - (xmax_b - xmin_b + (normalized == False)) + area_a = (ymax_a - ymin_a + (norm == False)) * (xmax_a - xmin_a + + (norm == False)) + area_b = (ymax_b - ymin_b + (norm == False)) * (xmax_b - xmin_b + + (norm == False)) if area_a <= 0 and area_b <= 0: return 0.0 @@ -44,8 +44,8 @@ def iou(box_a, box_b, normalized): xb = min(xmax_a, xmax_b) yb = min(ymax_a, ymax_b) - inter_area = max(xb - xa + (normalized == False), 0.0) * \ - max(yb - ya + (normalized == False), 0.0) + inter_area = max(xb - xa + (norm == False), + 0.0) * max(yb - ya + (norm == False), 0.0) iou_ratio = inter_area / (area_a + area_b - inter_area) @@ -210,7 +210,6 @@ def batched_multiclass_nms(boxes, normalized, shared=True) if nmsed_num == 0: - # lod.append(1) continue lod.append(nmsed_num) From d9b93962b02b3819b4bba18500b914b68aee818b Mon Sep 17 00:00:00 2001 From: jerrywgz Date: Fri, 25 Jan 2019 08:36:05 +0000 Subject: [PATCH 23/46] test=develop --- paddle/fluid/API.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 6f50b69624..5145013f3a 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -324,7 +324,7 @@ paddle.fluid.layers.iou_similarity ArgSpec(args=['x', 'y', 'name'], varargs=None paddle.fluid.layers.box_coder ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name'], varargs=None, keywords=None, defaults=('encode_center_size', True, None)) paddle.fluid.layers.polygon_box_transform ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.yolov3_loss ArgSpec(args=['x', 'gtbox', 'gtlabel', 'anchors', 'class_num', 'ignore_thresh', 'loss_weight_xy', 'loss_weight_wh', 'loss_weight_conf_target', 'loss_weight_conf_notarget', 'loss_weight_class', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None)) -paddle.fluid.layers.multiclass_nms ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'nms_threshold', 'keep_top_k', 'normalized', 'nms_eta', 'background_label', 'name'], varargs=None, keywords=None, defaults=(True, 1.0, 0, None)) +paddle.fluid.layers.multiclass_nms ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'keep_top_k', 'nms_threshold', 'normalized', 'nms_eta', 'background_label', 'name'], varargs=None, keywords=None, defaults=(0.3, True, 1.0, 0, None)) paddle.fluid.layers.accuracy ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None)) paddle.fluid.layers.auc ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk', 'slide_steps'], varargs=None, keywords=None, defaults=('ROC', 4095, 1, 1)) paddle.fluid.layers.exponential_decay ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)) From c42ef5bf0531dd28df1773de5e2b439643d5c590 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Fri, 25 Jan 2019 18:30:03 +0800 Subject: [PATCH 24/46] remove legacy WITH_DOC option test=develop --- CMakeLists.txt | 6 -- Dockerfile | 2 - cmake/FindSphinx.cmake | 147 --------------------------------- paddle/scripts/paddle_build.sh | 31 ------- 4 files changed, 186 deletions(-) delete mode 100644 cmake/FindSphinx.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 9ec632e206..e85fce5836 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -276,9 +276,3 @@ add_subdirectory(paddle) if(WITH_PYTHON) add_subdirectory(python) endif() - -if(WITH_DOC) - find_package(Sphinx REQUIRED) - find_python_module(recommonmark REQUIRED) - add_subdirectory(doc) -endif() diff --git a/Dockerfile b/Dockerfile index acfd091265..fe0721e9b9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,12 +11,10 @@ RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ub # ENV variables ARG WITH_GPU ARG WITH_AVX -ARG WITH_DOC ENV WOBOQ OFF ENV WITH_GPU=${WITH_GPU:-ON} ENV WITH_AVX=${WITH_AVX:-ON} -ENV WITH_DOC=${WITH_DOC:-OFF} ENV HOME /root # Add bash enhancements diff --git a/cmake/FindSphinx.cmake b/cmake/FindSphinx.cmake deleted file mode 100644 index f74cd4ff8c..0000000000 --- a/cmake/FindSphinx.cmake +++ /dev/null @@ -1,147 +0,0 @@ -# - This module looks for Sphinx -# Find the Sphinx documentation generator -# -# This modules defines -# SPHINX_EXECUTABLE -# SPHINX_FOUND - -find_program(SPHINX_EXECUTABLE - NAMES sphinx-build - PATHS - /usr/bin - /usr/local/bin - /opt/local/bin - DOC "Sphinx documentation generator" -) - -if( NOT SPHINX_EXECUTABLE ) - set(_Python_VERSIONS - 2.7 2.6 2.5 2.4 2.3 2.2 2.1 2.0 1.6 1.5 - ) - - foreach( _version ${_Python_VERSIONS} ) - set( _sphinx_NAMES sphinx-build-${_version} ) - - find_program( SPHINX_EXECUTABLE - NAMES ${_sphinx_NAMES} - PATHS - /usr/bin - /usr/local/bin - /opt/loca/bin - DOC "Sphinx documentation generator" - ) - endforeach() -endif() - -include(FindPackageHandleStandardArgs) - -find_package_handle_standard_args(Sphinx DEFAULT_MSG - SPHINX_EXECUTABLE -) - - -option( SPHINX_HTML_OUTPUT "Build a single HTML with the whole content." ON ) -option( SPHINX_DIRHTML_OUTPUT "Build HTML pages, but with a single directory per document." OFF ) -option( SPHINX_HTMLHELP_OUTPUT "Build HTML pages with additional information for building a documentation collection in htmlhelp." OFF ) -option( SPHINX_QTHELP_OUTPUT "Build HTML pages with additional information for building a documentation collection in qthelp." OFF ) -option( SPHINX_DEVHELP_OUTPUT "Build HTML pages with additional information for building a documentation collection in devhelp." OFF ) -option( SPHINX_EPUB_OUTPUT "Build HTML pages with additional information for building a documentation collection in epub." OFF ) -option( SPHINX_LATEX_OUTPUT "Build LaTeX sources that can be compiled to a PDF document using pdflatex." OFF ) -option( SPHINX_MAN_OUTPUT "Build manual pages in groff format for UNIX systems." OFF ) -option( SPHINX_TEXT_OUTPUT "Build plain text files." OFF ) - - -mark_as_advanced( - SPHINX_EXECUTABLE - SPHINX_HTML_OUTPUT - SPHINX_DIRHTML_OUTPUT - SPHINX_HTMLHELP_OUTPUT - SPHINX_QTHELP_OUTPUT - SPHINX_DEVHELP_OUTPUT - SPHINX_EPUB_OUTPUT - SPHINX_LATEX_OUTPUT - SPHINX_MAN_OUTPUT - SPHINX_TEXT_OUTPUT -) - -function( Sphinx_add_target target_name builder conf cache source destination ) - add_custom_target( ${target_name} ALL - COMMAND ${SPHINX_EXECUTABLE} -b ${builder} - -d ${cache} - -c ${conf} - ${source} - ${destination} - COMMENT "Generating sphinx documentation: ${builder}" - COMMAND cd ${destination} && ln -sf ./index_*.html index.html - ) - - set_property( - DIRECTORY APPEND PROPERTY - ADDITIONAL_MAKE_CLEAN_FILES - ${destination} - ) -endfunction() - -# Target dependencies can be optionally listed at the end. -function( Sphinx_add_targets target_base_name conf source base_destination ) - - set( _dependencies ) - - foreach( arg IN LISTS ARGN ) - set( _dependencies ${_dependencies} ${arg} ) - endforeach() - - if( ${SPHINX_HTML_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_html html ${conf} ${source} ${base_destination}/html ) - - add_dependencies( ${target_base_name}_html ${_dependencies} ) - endif() - - if( ${SPHINX_DIRHTML_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_dirhtml dirhtml ${conf} ${source} ${base_destination}/dirhtml ) - - add_dependencies( ${target_base_name}_dirhtml ${_dependencies} ) - endif() - - if( ${SPHINX_QTHELP_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_qthelp qthelp ${conf} ${source} ${base_destination}/qthelp ) - - add_dependencies( ${target_base_name}_qthelp ${_dependencies} ) - endif() - - if( ${SPHINX_DEVHELP_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_devhelp devhelp ${conf} ${source} ${base_destination}/devhelp ) - - add_dependencies( ${target_base_name}_devhelp ${_dependencies} ) - endif() - - if( ${SPHINX_EPUB_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_epub epub ${conf} ${source} ${base_destination}/epub ) - - add_dependencies( ${target_base_name}_epub ${_dependencies} ) - endif() - - if( ${SPHINX_LATEX_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_latex latex ${conf} ${source} ${base_destination}/latex ) - - add_dependencies( ${target_base_name}_latex ${_dependencies} ) - endif() - - if( ${SPHINX_MAN_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_man man ${conf} ${source} ${base_destination}/man ) - - add_dependencies( ${target_base_name}_man ${_dependencies} ) - endif() - - if( ${SPHINX_TEXT_OUTPUT} ) - Sphinx_add_target( ${target_base_name}_text text ${conf} ${source} ${base_destination}/text ) - - add_dependencies( ${target_base_name}_text ${_dependencies} ) - endif() - - if( ${BUILD_TESTING} ) - sphinx_add_target( ${target_base_name}_linkcheck linkcheck ${conf} ${source} ${base_destination}/linkcheck ) - - add_dependencies( ${target_base_name}_linkcheck ${_dependencies} ) - endif() -endfunction() diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index c2156a436e..1135caf4f8 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -173,7 +173,6 @@ function cmake_gen() { -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} ${PYTHON_FLAGS} -DWITH_DSO=ON - -DWITH_DOC=${WITH_DOC:-OFF} -DWITH_GPU=${WITH_GPU:-OFF} -DWITH_AMD_GPU=${WITH_AMD_GPU:-OFF} -DWITH_DISTRIBUTE=${distibuted_flag} @@ -208,7 +207,6 @@ EOF -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} \ ${PYTHON_FLAGS} \ -DWITH_DSO=ON \ - -DWITH_DOC=${WITH_DOC:-OFF} \ -DWITH_GPU=${WITH_GPU:-OFF} \ -DWITH_AMD_GPU=${WITH_AMD_GPU:-OFF} \ -DWITH_DISTRIBUTE=${distibuted_flag} \ @@ -528,31 +526,6 @@ function bind_test() { wait } - -function gen_docs() { - mkdir -p ${PADDLE_ROOT}/build - cd ${PADDLE_ROOT}/build - cat < Date: Fri, 25 Jan 2019 11:05:27 +0000 Subject: [PATCH 25/46] refine test_detection, test=develop --- python/paddle/fluid/tests/test_detection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index 6645d9a254..8723d9842a 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -476,7 +476,7 @@ class TestMulticlassNMS(unittest.TestCase): bboxes = layers.data( name='bboxes', shape=[-1, 10, 4], dtype='float32') scores = layers.data(name='scores', shape=[-1, 10], dtype='float32') - output = layers.multiclass_nms(bboxes, scores, 0.3, 400, 0.7, 200) + output = layers.multiclass_nms(bboxes, scores, 0.3, 400, 200, 0.7) self.assertIsNotNone(output) From ba981604fdf6e50041453d47369d113e2d5a65e0 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Fri, 25 Jan 2019 13:05:49 +0000 Subject: [PATCH 26/46] fix split --- paddle/fluid/framework/operator.cc | 21 +- python/paddle/fluid/imperative/nn.py | 12 +- .../fluid/tests/unittests/test_imperative.py | 1 - .../unittests/test_imperative_ptb_rnn.py | 265 ++++++++++++++++++ .../tests/unittests/test_imperative_split.py | 45 +++ 5 files changed, 322 insertions(+), 22 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py create mode 100644 python/paddle/fluid/tests/unittests/test_imperative_split.py diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index ec5cd1c4c8..a8cc66b126 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -555,18 +555,17 @@ Tensor* ExecutionContext::LegacyOutput(const std::string& name) const { template <> std::vector ExecutionContext::MultiOutput( const std::string& name) const { - auto names = op().Outputs(name); + auto it = ctx_.outputs.find(name); + if (it == ctx_.outputs.end()) { + return {}; + } + const std::vector& vars = it->second; std::vector res; - res.reserve(names.size()); - std::transform(names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) -> Tensor* { - auto var = scope_.FindVar(sub_name); - if (var == nullptr) return nullptr; - PADDLE_ENFORCE( - var->IsType(), - "%s should be LoDTensor, but the received type is %s", - sub_name, ToTypeName(var->Type())); - return var->GetMutable(); + res.reserve(vars.size()); + std::transform(vars.begin(), vars.end(), std::back_inserter(res), + [&](Variable* var) -> Tensor* { + return var == nullptr ? nullptr + : var->GetMutable(); }); return res; } diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 68fffdfa33..b5c049e927 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -22,13 +22,7 @@ from . import layers from ..framework import Variable, OpProtoHolder from ..param_attr import ParamAttr from ..initializer import Normal, Constant -__all__ = [ - 'Conv2D', - 'Pool2D', - 'FC', - 'BatchNorm', - 'EMBEDDING' -] +__all__ = ['Conv2D', 'Pool2D', 'FC', 'BatchNorm', 'EMBEDDING'] class Conv2D(layers.Layer): @@ -419,8 +413,6 @@ class BatchNorm(layers.Layer): # Currently, we don't support inplace in imperative mode return self._helper.append_activation(batch_norm_out) - outputs={'Out': [bias_out]}, - class EMBEDDING(layers.Layer): @@ -438,7 +430,7 @@ class EMBEDDING(layers.Layer): self._is_distributed = is_distributed self._padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( - size[0] + padding_idx) + size[0] + padding_idx) self._param_attr = param_attr self._dtype = dtype diff --git a/python/paddle/fluid/tests/unittests/test_imperative.py b/python/paddle/fluid/tests/unittests/test_imperative.py index fab60ae756..6cfac57f54 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative.py +++ b/python/paddle/fluid/tests/unittests/test_imperative.py @@ -338,7 +338,6 @@ class TestImperative(unittest.TestCase): dy_grad_i2h = simple_rnn._cell._i2h_w._gradient() with new_program_scope(): - print("im here") inp = fluid.layers.data( name="inp", shape=[1, 4, 3], append_batch_size=False) simple_rnn = SimpleRNN() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py new file mode 100644 index 0000000000..c64d5964e7 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -0,0 +1,265 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import paddle.fluid as fluid +from paddle.fluid.imperative.nn import EMBEDDING +import paddle.fluid.framework as framework +from paddle.fluid.optimizer import SGDOptimizer +from paddle.fluid.imperative.base import to_variable +import numpy as np +from paddle.fluid.backward import append_backward + + +class SimpleLSTMRNN(fluid.imperative.Layer): + def __init__(self, + hidden_size, + num_steps, + num_layers=2, + init_scale=0.1, + dropout=None): + super(SimpleLSTMRNN, self).__init__() + self._hidden_size = hidden_size + self._num_layers = num_layers + self._init_scale = init_scale + self._dropout = dropout + self.input = None + self.num_steps = num_steps + + def _build_once(self, input_embedding, init_hidden=None, init_cell=None): + self.weight_1_arr = [] + self.weight_2_arr = [] + self.bias_arr = [] + self.hidden_array = [] + self.cell_array = [] + self.mask_array = [] + + for i in range(self._num_layers): + weight_1 = fluid.layers.create_parameter( + shape=[self._hidden_size * 2, self._hidden_size * 4], + dtype="float32", + name="fc_weight1_" + str(i), + default_initializer=fluid.initializer.UniformInitializer( + low=-self._init_scale, high=self._init_scale)) + self.weight_1_arr.append(weight_1) + bias_1 = fluid.layers.create_parameter( + [self._hidden_size * 4], + dtype="float32", + name="fc_bias1_" + str(i), + default_initializer=fluid.initializer.Constant(0.0)) + self.bias_arr.append(bias_1) + + pre_hidden = fluid.layers.slice( + init_hidden, axes=[0], starts=[i], ends=[i + 1]) + pre_cell = fluid.layers.slice( + init_cell, axes=[0], starts=[i], ends=[i + 1]) + pre_hidden = fluid.layers.reshape( + pre_hidden, shape=[-1, self._hidden_size]) + pre_cell = fluid.layers.reshape( + pre_cell, shape=[-1, self._hidden_size]) + self.hidden_array.append(pre_hidden) + self.cell_array.append(pre_cell) + + def forward(self, input_embedding, init_hidden=None, init_cell=None): + res = [] + for index in range(self.num_steps): + self.input = fluid.layers.slice( + input_embedding, axes=[1], starts=[index], ends=[index + 1]) + self.input = fluid.layers.reshape( + self.input, shape=[-1, self._hidden_size]) + for k in range(self._num_layers): + pre_hidden = self.hidden_array[k] + print("pre_hidden shape is:{}".format(pre_hidden.shape)) + print("input shape is:{}".format(self.input.shape)) + pre_cell = self.cell_array[k] + weight_1 = self.weight_1_arr[k] + bias = self.bias_arr[k] + + nn = fluid.layers.concat([self.input, pre_hidden], 1) + gate_input = fluid.layers.matmul(x=nn, y=weight_1) + + gate_input = fluid.layers.elementwise_add(gate_input, bias) + print("gate_input shape is: {}".format(gate_input.shape)) + print("gate_input value is :{}".format(gate_input._numpy())) + print("gate_input desc is :{}".format(gate_input)) + # i, j, f, o = fluid.layers.split(gate_input, num_or_sections=4, dim=-1) + # # + # # c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( + # # i) * fluid.layers.tanh(j) + # # m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) + # # + # # self.hidden_array[k] = m + # # self.cell_array[k] = c + # # self.input = m + # # + # # if self.dropout is not None and self.dropout > 0.0: + # # self.input = fluid.layers.dropout( + # # self.input, + # # dropout_prob=self.dropout, + # # dropout_implementation='upscale_in_train') + # # + # # res.append( + # # fluid.layers.reshape( + # # input, shape=[1, -1, self._hidden_size])) + # # real_res = fluid.layers.concat(res, 0) + # # real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) + # # last_hidden = fluid.layers.concat(self.hidden_array, 1) + # # last_hidden = fluid.layers.reshape( + # # last_hidden, shape=[-1, self._num_layers, self._hidden_size]) + # # last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) + # # last_cell = fluid.layers.concat(self.cell_array, 1) + # # last_cell = fluid.layers.reshape( + # # last_cell, shape=[-1, self._num_layers, self._hidden_size]) + # # last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) + # # + # return real_res, last_hidden, last_cell + return [1], [2], [3] + + +class PtbModel(fluid.imperative.Layer): + def __init__(self, + hidden_size, + vocab_size, + num_layers=2, + num_steps=20, + init_scale=0.1, + dropout=None): + super(PtbModel, self).__init__() + self.hidden_size = hidden_size + self.vocab_size = vocab_size + self.init_scale = init_scale + self.num_layers = num_layers + self.num_steps = num_steps + self.dropout = dropout + self.simple_lstm_rnn = SimpleLSTMRNN( + hidden_size, + num_steps, + num_layers=num_layers, + init_scale=init_scale, + dropout=dropout) + self.embedding = EMBEDDING( + size=[vocab_size, hidden_size], + dtype='float32', + is_sparse=False, + param_attr=fluid.ParamAttr( + name='embedding_para', + initializer=fluid.initializer.UniformInitializer( + low=-init_scale, high=init_scale))) + + def _build_once(self, input, label, init_hidden, init_cell): + self.softmax_weight = fluid.layers.create_parameter( + [self.hidden_size, self.vocab_size], + dtype="float32", + name="softmax_weight", + default_initializer=fluid.initializer.UniformInitializer( + low=-self.init_scale, high=self.init_scale)) + self.softmax_bias = fluid.layers.create_parameter( + [self.vocab_size], + dtype="float32", + name='softmax_bias', + default_initializer=fluid.initializer.UniformInitializer( + low=-self.init_scale, high=self.init_scale)) + + def forward(self, input, label, init_hidden, init_cell): + + init_h = fluid.layers.reshape( + init_hidden, shape=[self.num_layers, -1, self.hidden_size]) + + init_c = fluid.layers.reshape( + init_cell, shape=[self.num_layers, -1, self.hidden_size]) + + x_emb = self.embedding(input) + x_emb = fluid.layers.reshape( + x_emb, shape=[-1, self.num_steps, self.hidden_size]) + if self.dropout is not None and self.dropout > 0.0: + x_emb = fluid.layers.dropout( + x_emb, + dropout_prob=self.drop_out, + dropout_implementation='upscale_in_train') + print("init_c is {}".format(init_c)) + rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h, + init_c) + rnn_out = fluid.layers.reshape( + rnn_out, shape=[-1, self.num_steps, self.hidden_size]) + projection = fluid.layers.reshape(rnn_out, self.softmax_weight) + projection = fluid.layers.elementwise_add(projection, self.softmax_bias) + projection = fluid.layers.reshape( + projection, shape=[-1, self.vocab_size]) + projection = fluid.layers.reshape( + projection, shape=[-1, self.vocab_size]) + loss = fluid.layers.softmax_with_cross_entropy( + logits=projection, label=label, soft_label=False) + loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps]) + loss = fluid.layers.reduce_mean(loss, dim=[0]) + loss = fluid.layers.reduce_sum(loss) + loss.permissions = True + + return loss, last_hidden, last_cell + + +class TestImperativePtbRnn(unittest.TestCase): + def test_mnist_cpu_float32(self): + seed = 90 + hidden_size = 10 + vocab_size = 1000 + num_layers = 1 + num_steps = 3 + init_scale = 0.1 + batch_size = 4 + + with fluid.imperative.guard(): + fluid.default_startup_program().random_seed = seed + fluid.default_main_program().random_seed = seed + # TODO: marsyang1993 Change seed to + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale) + + sgd = SGDOptimizer(learning_rate=1e-3) + print("q") + for i in range(2): + x_data = np.arange(12).reshape(4, 3).astype('int64') + y_data = np.arange(1, 13).reshape(4, 3).astype('int64') + x_data = x_data.reshape((-1, num_steps, 1)) + y_data = y_data.reshape((-1, 1)) + init_hidden_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32') + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32') + x = to_variable(x_data) + y = to_variable(y_data) + init_hidden = to_variable(init_hidden_data) + init_cell = to_variable(init_cell_data) + dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden, + init_cell) + dy_param_init = dict() + if i == 0: + for param in fluid.default_main_program().global_block( + ).all_parameters(): + dy_param_init[param.name] = param._numpy() + dy_loss._backward() + sgd.minimize(dy_loss) + dy_param_updated = dict() + for param in fluid.default_main_program().global_block( + ).all_parameters(): + dy_param_updated[param.name] = param._numpy() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_split.py b/python/paddle/fluid/tests/unittests/test_imperative_split.py new file mode 100644 index 0000000000..5dee51f390 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_imperative_split.py @@ -0,0 +1,45 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import paddle.fluid as fluid +from paddle.fluid.imperative.base import to_variable +import numpy as np + + +class Split_test(fluid.imperative.Layer): + def __init__(self): + super(Split_test, self).__init__() + + def _build_once(self, input): + pass + + def forward(self, input): + out = fluid.layers.split(input, num_or_sections=4, dim=-1) + return out + + +class TestImperativePtbRnn(unittest.TestCase): + def test_spilt(self): + with fluid.imperative.guard(): + inp = to_variable(np.arange(160).reshape(4, 40).astype('float32')) + st = Split_test() + out = st(inp) + print(out) + + +if __name__ == '__main__': + unittest.main() From 5639f49b16bcc03c758c7a6c1574c7371ef26dd6 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Fri, 25 Jan 2019 13:12:36 +0000 Subject: [PATCH 27/46] test=develop, fix/multi_output_support_imperative --- paddle/fluid/framework/operator.cc | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index ee9f6a4805..031e719139 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -555,18 +555,17 @@ Tensor* ExecutionContext::LegacyOutput(const std::string& name) const { template <> std::vector ExecutionContext::MultiOutput( const std::string& name) const { - auto names = op().Outputs(name); + auto it = ctx_.outputs.find(name); + if (it == ctx_.outputs.end()) { + return {}; + } + const std::vector& vars = it->second; std::vector res; - res.reserve(names.size()); - std::transform(names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) -> Tensor* { - auto var = scope_.FindVar(sub_name); - if (var == nullptr) return nullptr; - PADDLE_ENFORCE( - var->IsType(), - "%s should be LoDTensor, but the received type is %s", - sub_name, ToTypeName(var->Type())); - return var->GetMutable(); + res.reserve(vars.size()); + std::transform(vars.begin(), vars.end(), std::back_inserter(res), + [&](Variable* var) -> Tensor* { + return var == nullptr ? nullptr + : var->GetMutable(); }); return res; } From f364b722075f9be9cffd2afc02a1e4ed85ed5930 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Fri, 25 Jan 2019 15:07:48 +0000 Subject: [PATCH 28/46] test=develop, add ptb_rnn test in imperative --- .../unittests/test_imperative_ptb_rnn.py | 169 +++++++++++++----- .../tests/unittests/test_imperative_split.py | 1 - 2 files changed, 120 insertions(+), 50 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index c64d5964e7..1610d49d82 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -20,7 +20,9 @@ from paddle.fluid.imperative.nn import EMBEDDING import paddle.fluid.framework as framework from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.imperative.base import to_variable +from test_imperative_base import new_program_scope import numpy as np +import six from paddle.fluid.backward import append_backward @@ -36,8 +38,8 @@ class SimpleLSTMRNN(fluid.imperative.Layer): self._num_layers = num_layers self._init_scale = init_scale self._dropout = dropout - self.input = None - self.num_steps = num_steps + self._input = None + self._num_steps = num_steps def _build_once(self, input_embedding, init_hidden=None, init_cell=None): self.weight_1_arr = [] @@ -75,58 +77,49 @@ class SimpleLSTMRNN(fluid.imperative.Layer): def forward(self, input_embedding, init_hidden=None, init_cell=None): res = [] - for index in range(self.num_steps): - self.input = fluid.layers.slice( + for index in range(self._num_steps): + self._input = fluid.layers.slice( input_embedding, axes=[1], starts=[index], ends=[index + 1]) - self.input = fluid.layers.reshape( - self.input, shape=[-1, self._hidden_size]) + self._input = fluid.layers.reshape( + self._input, shape=[-1, self._hidden_size]) for k in range(self._num_layers): pre_hidden = self.hidden_array[k] - print("pre_hidden shape is:{}".format(pre_hidden.shape)) - print("input shape is:{}".format(self.input.shape)) pre_cell = self.cell_array[k] weight_1 = self.weight_1_arr[k] bias = self.bias_arr[k] - nn = fluid.layers.concat([self.input, pre_hidden], 1) + nn = fluid.layers.concat([self._input, pre_hidden], 1) gate_input = fluid.layers.matmul(x=nn, y=weight_1) gate_input = fluid.layers.elementwise_add(gate_input, bias) - print("gate_input shape is: {}".format(gate_input.shape)) - print("gate_input value is :{}".format(gate_input._numpy())) - print("gate_input desc is :{}".format(gate_input)) - # i, j, f, o = fluid.layers.split(gate_input, num_or_sections=4, dim=-1) - # # - # # c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( - # # i) * fluid.layers.tanh(j) - # # m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) - # # - # # self.hidden_array[k] = m - # # self.cell_array[k] = c - # # self.input = m - # # - # # if self.dropout is not None and self.dropout > 0.0: - # # self.input = fluid.layers.dropout( - # # self.input, - # # dropout_prob=self.dropout, - # # dropout_implementation='upscale_in_train') - # # - # # res.append( - # # fluid.layers.reshape( - # # input, shape=[1, -1, self._hidden_size])) - # # real_res = fluid.layers.concat(res, 0) - # # real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) - # # last_hidden = fluid.layers.concat(self.hidden_array, 1) - # # last_hidden = fluid.layers.reshape( - # # last_hidden, shape=[-1, self._num_layers, self._hidden_size]) - # # last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) - # # last_cell = fluid.layers.concat(self.cell_array, 1) - # # last_cell = fluid.layers.reshape( - # # last_cell, shape=[-1, self._num_layers, self._hidden_size]) - # # last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) - # # - # return real_res, last_hidden, last_cell - return [1], [2], [3] + i, j, f, o = fluid.layers.split( + gate_input, num_or_sections=4, dim=-1) + c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid( + i) * fluid.layers.tanh(j) + m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o) + self.hidden_array[k] = m + self.cell_array[k] = c + self._input = m + + if self._dropout is not None and self._dropout > 0.0: + self._input = fluid.layers.dropout( + self._input, + dropout_prob=self._dropout, + dropout_implementation='upscale_in_train') + res.append( + fluid.layers.reshape( + self._input, shape=[1, -1, self._hidden_size])) + real_res = fluid.layers.concat(res, 0) + real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2]) + last_hidden = fluid.layers.concat(self.hidden_array, 1) + last_hidden = fluid.layers.reshape( + last_hidden, shape=[-1, self._num_layers, self._hidden_size]) + last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2]) + last_cell = fluid.layers.concat(self.cell_array, 1) + last_cell = fluid.layers.reshape( + last_cell, shape=[-1, self._num_layers, self._hidden_size]) + last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2]) + return real_res, last_hidden, last_cell class PtbModel(fluid.imperative.Layer): @@ -189,12 +182,11 @@ class PtbModel(fluid.imperative.Layer): x_emb, dropout_prob=self.drop_out, dropout_implementation='upscale_in_train') - print("init_c is {}".format(init_c)) rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h, init_c) rnn_out = fluid.layers.reshape( rnn_out, shape=[-1, self.num_steps, self.hidden_size]) - projection = fluid.layers.reshape(rnn_out, self.softmax_weight) + projection = fluid.layers.matmul(rnn_out, self.softmax_weight) projection = fluid.layers.elementwise_add(projection, self.softmax_bias) projection = fluid.layers.reshape( projection, shape=[-1, self.vocab_size]) @@ -232,7 +224,8 @@ class TestImperativePtbRnn(unittest.TestCase): init_scale=init_scale) sgd = SGDOptimizer(learning_rate=1e-3) - print("q") + dy_param_updated = dict() + dy_param_init = dict() for i in range(2): x_data = np.arange(12).reshape(4, 3).astype('int64') y_data = np.arange(1, 13).reshape(4, 3).astype('int64') @@ -248,17 +241,95 @@ class TestImperativePtbRnn(unittest.TestCase): init_cell = to_variable(init_cell_data) dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden, init_cell) - dy_param_init = dict() if i == 0: for param in fluid.default_main_program().global_block( ).all_parameters(): dy_param_init[param.name] = param._numpy() dy_loss._backward() sgd.minimize(dy_loss) - dy_param_updated = dict() for param in fluid.default_main_program().global_block( ).all_parameters(): dy_param_updated[param.name] = param._numpy() + # print("dy_loss is {}".format(dy_loss._numpy())) + # print("last_hidden is {}".format(last_hidden._numpy())) + # print("last_cell is {}".format(last_cell._numpy())) + + with new_program_scope(): + fluid.default_startup_program().random_seed = seed + fluid.default_main_program().random_seed = seed + # TODO: marsyang1993 Change seed to + ptb_model = PtbModel( + hidden_size=hidden_size, + vocab_size=vocab_size, + num_layers=num_layers, + num_steps=num_steps, + init_scale=init_scale) + + exe = fluid.Executor(fluid.CPUPlace()) + sgd = SGDOptimizer(learning_rate=1e-3) + x = fluid.layers.data(name="x", shape=[-1, 3, 1], dtype='int64') + y = fluid.layers.data(name="y", shape=[-1, 1], dtype='float32') + init_hidden = fluid.layers.data( + name="init_hidden", shape=[1], dtype='float32') + init_cell = fluid.layers.data( + name="init_cell", shape=[1], dtype='float32') + + static_loss, static_last_hidden, static_last_cell = ptb_model( + x, y, init_hidden, init_cell) + sgd.minimize(static_loss) + static_param_updated = dict() + static_param_init = dict() + static_param_name_list = list() + for param in fluid.default_startup_program().global_block( + ).all_parameters(): + static_param_name_list.append(param.name) + + out = exe.run(framework.default_startup_program(), + fetch_list=static_param_name_list) + for i in range(len(static_param_name_list)): + static_param_init[static_param_name_list[i]] = out[i] + + for i in range(2): + x_data = np.arange(12).reshape(4, 3).astype('int64') + y_data = np.arange(1, 13).reshape(4, 3).astype('int64') + x_data = x_data.reshape((-1, num_steps, 1)) + y_data = y_data.reshape((-1, 1)) + init_hidden_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32') + init_cell_data = np.zeros( + (num_layers, batch_size, hidden_size), dtype='float32') + fetch_list = [static_loss, static_last_hidden, static_last_cell] + fetch_list.extend(static_param_name_list) + out = exe.run(fluid.default_main_program(), + feed={ + "x": x_data, + "y": y_data, + "init_hidden": init_hidden_data, + "init_cell": init_cell_data + }, + fetch_list=fetch_list) + static_loss_value = out[0] + static_last_cell_value = out[1] + static_last_hidden_value = out[2] + # print("static_loss is {}".format(out[0])) + # print("last_hidden is {}".format(out[1])) + # print("last_cell is {}".format(out[2])) + for i in range(3, len(out)): + static_param_updated[static_param_name_list[i - 3]] = out[i] + self.assertTrue( + np.allclose(static_loss_value.all(), dy_loss._numpy().all())) + self.assertTrue( + np.allclose(static_last_cell_value.all(), + last_cell._numpy().all())) + self.assertTrue( + np.allclose(static_last_hidden_value.all(), + last_hidden._numpy().all())) + for key, value in six.iteritems(static_param_init): + self.assertTrue( + np.allclose(value.all(), dy_param_init[key].all())) + for key, value in six.iteritems(static_param_updated): + self.assertTrue( + np.allclose(value.all(), dy_param_updated[key].all())) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_split.py b/python/paddle/fluid/tests/unittests/test_imperative_split.py index 5dee51f390..fb2049760a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_split.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_split.py @@ -38,7 +38,6 @@ class TestImperativePtbRnn(unittest.TestCase): inp = to_variable(np.arange(160).reshape(4, 40).astype('float32')) st = Split_test() out = st(inp) - print(out) if __name__ == '__main__': From 8e3da976f4c34f086c7213739d4839cacabf3c98 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Mon, 28 Jan 2019 02:35:44 +0000 Subject: [PATCH 29/46] test=develop, polish code --- .../tests/unittests/test_imperative_ptb_rnn.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index 1610d49d82..9c6ec331e6 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -226,6 +226,9 @@ class TestImperativePtbRnn(unittest.TestCase): sgd = SGDOptimizer(learning_rate=1e-3) dy_param_updated = dict() dy_param_init = dict() + dy_loss = None + last_hidden = None + last_cell = None for i in range(2): x_data = np.arange(12).reshape(4, 3).astype('int64') y_data = np.arange(1, 13).reshape(4, 3).astype('int64') @@ -288,7 +291,9 @@ class TestImperativePtbRnn(unittest.TestCase): fetch_list=static_param_name_list) for i in range(len(static_param_name_list)): static_param_init[static_param_name_list[i]] = out[i] - + static_loss_value = None + static_last_cell_value = None + static_last_hidden_value = None for i in range(2): x_data = np.arange(12).reshape(4, 3).astype('int64') y_data = np.arange(1, 13).reshape(4, 3).astype('int64') @@ -311,11 +316,9 @@ class TestImperativePtbRnn(unittest.TestCase): static_loss_value = out[0] static_last_cell_value = out[1] static_last_hidden_value = out[2] - # print("static_loss is {}".format(out[0])) - # print("last_hidden is {}".format(out[1])) - # print("last_cell is {}".format(out[2])) - for i in range(3, len(out)): - static_param_updated[static_param_name_list[i - 3]] = out[i] + for k in range(3, len(out)): + static_param_updated[static_param_name_list[k - 3]] = out[k] + self.assertTrue( np.allclose(static_loss_value.all(), dy_loss._numpy().all())) self.assertTrue( From ba4f43fd620c1c4cc7160136723bfa3cae975bde Mon Sep 17 00:00:00 2001 From: sneaxiy Date: Mon, 28 Jan 2019 05:25:44 +0000 Subject: [PATCH 30/46] fix compile error in distributed mode test=develop --- .../distributed/proto_encoder_helper.h | 2 +- paddle/fluid/platform/enforce.h | 137 ++++++------------ paddle/fluid/platform/nccl_helper.h | 2 +- paddle/fluid/string/printf.h | 2 + 4 files changed, 45 insertions(+), 98 deletions(-) diff --git a/paddle/fluid/operators/distributed/proto_encoder_helper.h b/paddle/fluid/operators/distributed/proto_encoder_helper.h index 27ca1f4edc..e9f06f5432 100644 --- a/paddle/fluid/operators/distributed/proto_encoder_helper.h +++ b/paddle/fluid/operators/distributed/proto_encoder_helper.h @@ -85,7 +85,7 @@ class ProtoEncodeHelper { #define REPLACE_ENFORCE_GLOG 1 // Make sure callers didn't do operations that went over max_size promised if (paddle::platform::is_error(p_ <= limit_)) { - paddle::platform::throw_on_error(p_ <= limit_); + paddle::platform::throw_on_error(p_ <= limit_, ""); } #undef REPLACE_ENFORCE_GLOG } diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index 15413785ba..142d38f060 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -71,9 +71,8 @@ struct EnforceNotMet : public std::exception { } } - template - EnforceNotMet(const char* f, int l, ARGS... args) { - Init(string::Sprintf(args...), f, l); + EnforceNotMet(const std::string& str, const char* f, int l) { + Init(str, f, l); } const char* what() const noexcept override { return err_str_.c_str(); } @@ -142,28 +141,23 @@ struct EOFException : public std::exception { inline bool is_error(bool stat) { return !stat; } -template -inline typename std::enable_if::type throw_on_error( - bool stat, const Args&... args) { +inline void throw_on_error(bool stat, const std::string& msg) { #ifndef REPLACE_ENFORCE_GLOG - throw std::runtime_error(string::Sprintf(args...)); + throw std::runtime_error(msg); #else - LOG(FATAL) << string::Sprintf(args...); + LOG(FATAL) << msg; #endif } #ifdef PADDLE_WITH_CUDA -inline bool is_error(cudaError_t e) { return UNLIKELY(e); } +inline bool is_error(cudaError_t e) { return e != cudaSuccess; } -template -inline typename std::enable_if::type throw_on_error( - cudaError_t e, const Args&... args) { +inline void throw_on_error(cudaError_t e, const std::string& msg) { #ifndef REPLACE_ENFORCE_GLOG - throw thrust::system_error(e, thrust::cuda_category(), - string::Sprintf(args...)); + throw thrust::system_error(e, thrust::cuda_category(), msg); #else - LOG(FATAL) << string::Sprintf(args...); + LOG(FATAL) << msg; #endif } @@ -171,14 +165,12 @@ inline bool is_error(curandStatus_t stat) { return stat != CURAND_STATUS_SUCCESS; } -template -inline typename std::enable_if::type throw_on_error( - curandStatus_t stat, const Args&... args) { +inline void throw_on_error(curandStatus_t stat, const std::string& msg) { #ifndef REPLACE_ENFORCE_GLOG throw thrust::system_error(cudaErrorLaunchFailure, thrust::cuda_category(), - string::Sprintf(args...)); + msg); #else - LOG(FATAL) << string::Sprintf(args...); + LOG(FATAL) << msg; #endif } @@ -186,14 +178,11 @@ inline bool is_error(cudnnStatus_t stat) { return stat != CUDNN_STATUS_SUCCESS; } -template -inline typename std::enable_if::type throw_on_error( - cudnnStatus_t stat, const Args&... args) { +inline void throw_on_error(cudnnStatus_t stat, const std::string& msg) { #ifndef REPLACE_ENFORCE_GLOG - throw std::runtime_error(platform::dynload::cudnnGetErrorString(stat) + - string::Sprintf(args...)); + throw std::runtime_error(platform::dynload::cudnnGetErrorString(stat) + msg); #else - LOG(FATAL) << string::Sprintf(args...); + LOG(FATAL) << platform::dynload::cudnnGetErrorString(stat) << msg; #endif } @@ -201,9 +190,7 @@ inline bool is_error(cublasStatus_t stat) { return stat != CUBLAS_STATUS_SUCCESS; } -template -inline typename std::enable_if::type throw_on_error( - cublasStatus_t stat, const Args&... args) { +inline void throw_on_error(cublasStatus_t stat, const std::string& msg) { std::string err; if (stat == CUBLAS_STATUS_NOT_INITIALIZED) { err = "CUBLAS: not initialized, "; @@ -225,87 +212,45 @@ inline typename std::enable_if::type throw_on_error( err = "CUBLAS: license error, "; } #ifndef REPLACE_ENFORCE_GLOG - throw std::runtime_error(err + string::Sprintf(args...)); + throw std::runtime_error(err + msg); #else - LOG(FATAL) << err << string::Sprintf(args...); + LOG(FATAL) << err << msg; #endif } #if !defined(__APPLE__) && !defined(_WIN32) -template -inline typename std::enable_if::type throw_on_error( - ncclResult_t stat, const Args&... args) { - if (stat == ncclSuccess) { - return; - } else { +inline bool is_error(ncclResult_t nccl_result) { + return nccl_result != ncclSuccess; +} + +inline void throw_on_error(ncclResult_t stat, const std::string& msg) { #ifndef REPLACE_ENFORCE_GLOG - throw std::runtime_error(platform::dynload::ncclGetErrorString(stat) + - string::Sprintf(args...)); + throw std::runtime_error(platform::dynload::ncclGetErrorString(stat) + msg); #else - LOG(FATAL) << platform::dynload::ncclGetErrorString(stat) - << string::Sprintf(args...); + LOG(FATAL) << platform::dynload::ncclGetErrorString(stat) << msg; #endif - } } #endif // __APPLE__ and windows #endif // PADDLE_WITH_CUDA -template -inline void throw_on_error(T e) { - throw_on_error(e, ""); -} - -#define PADDLE_THROW(...) \ - throw ::paddle::platform::EnforceNotMet(__FILE__, __LINE__, __VA_ARGS__) - -#define __PADDLE_THROW_ERROR_I(_, _9, _8, _7, _6, _5, _4, _3, _2, X_, ...) X_; - -#define __THROW_ON_ERROR_ONE_ARG(COND, ARG) \ - ::paddle::platform::throw_on_error(COND, ::paddle::string::Sprintf(ARG)); - -#ifdef _WIN32 -#define __PADDLE_THROW_ON_ERROR(COND, ...) \ - __THROW_ON_ERROR_ONE_ARG(COND, __VA_ARGS__) -#else // _WIN32 -#define __PADDLE_THROW_ON_ERROR(COND, ...) \ - __PADDLE_THROW_ERROR_I( \ - __VA_ARGS__, ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - ::paddle::platform::throw_on_error(COND, __VA_ARGS__), \ - __THROW_ON_ERROR_ONE_ARG(COND, __VA_ARGS__)) -#endif // _WIN32 - -#define __PADDLE_UNARY_COMPARE(COND, ...) \ - do { \ - auto __cond = COND; \ - if (UNLIKELY(::paddle::platform::is_error(__cond))) { \ - __PADDLE_THROW_ON_ERROR(__cond, __VA_ARGS__); \ - } \ +#define PADDLE_THROW(...) \ + throw ::paddle::platform::EnforceNotMet( \ + ::paddle::string::Sprintf(__VA_ARGS__), __FILE__, __LINE__) + +#define PADDLE_ENFORCE(COND, ...) \ + do { \ + auto __cond__ = (COND); \ + if (UNLIKELY(::paddle::platform::is_error(__cond__))) { \ + try { \ + ::paddle::platform::throw_on_error( \ + __cond__, ::paddle::string::Sprintf(__VA_ARGS__)); \ + } catch (...) { \ + throw ::paddle::platform::EnforceNotMet(std::current_exception(), \ + __FILE__, __LINE__); \ + } \ + } \ } while (0) -#ifndef REPLACE_ENFORCE_GLOG -#define __PADDLE_ENFORCE_I(COND, ...) \ - do { \ - try { \ - __PADDLE_UNARY_COMPARE(COND, __VA_ARGS__); \ - } catch (...) { \ - throw ::paddle::platform::EnforceNotMet(std::current_exception(), \ - __FILE__, __LINE__); \ - } \ - } while (0) - -#else -#define __PADDLE_ENFORCE_I(COND, ...) __PADDLE_UNARY_COMPARE(COND, __VA_ARGS__); -#endif // REPLACE_ENFORCE_GLOG - -#define __PADDLE_ENFORCE(__args) __PADDLE_ENFORCE_I __args -#define PADDLE_ENFORCE(...) __PADDLE_ENFORCE((__VA_ARGS__)) - #define PADDLE_THROW_EOF() \ do { \ throw ::paddle::platform::EOFException("There is no next data.", __FILE__, \ diff --git a/paddle/fluid/platform/nccl_helper.h b/paddle/fluid/platform/nccl_helper.h index 8df8e32098..6ae21ee829 100644 --- a/paddle/fluid/platform/nccl_helper.h +++ b/paddle/fluid/platform/nccl_helper.h @@ -64,7 +64,7 @@ class NCCLGroupGuard { } inline ~NCCLGroupGuard() { - CHECK_EQ(dynload::ncclGroupEnd(), ncclSuccess); + PADDLE_ENFORCE(dynload::ncclGroupEnd()); NCCLMutex().unlock(); } }; diff --git a/paddle/fluid/string/printf.h b/paddle/fluid/string/printf.h index 0b94b60018..16bb3771f2 100644 --- a/paddle/fluid/string/printf.h +++ b/paddle/fluid/string/printf.h @@ -84,6 +84,8 @@ void Fprintf(std::ostream& out, const char* fmt, const Args&... args) { tinyformat::vformat(out, fmt, tinyformat::makeFormatList(args...)); } +inline std::string Sprintf() { return ""; } + template std::string Sprintf(const Args&... args) { std::ostringstream oss; From 3dfbef290b98d30ac7f1f94da07e07f52dc41374 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Mon, 28 Jan 2019 06:17:42 +0000 Subject: [PATCH 31/46] polish code and add comments for Embedding --- python/paddle/fluid/imperative/nn.py | 45 ++++++++++++++++--- .../unittests/test_imperative_ptb_rnn.py | 11 ++--- 2 files changed, 46 insertions(+), 10 deletions(-) diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index b5c049e927..ea04475493 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -22,7 +22,7 @@ from . import layers from ..framework import Variable, OpProtoHolder from ..param_attr import ParamAttr from ..initializer import Normal, Constant -__all__ = ['Conv2D', 'Pool2D', 'FC', 'BatchNorm', 'EMBEDDING'] +__all__ = ['Conv2D', 'Pool2D', 'FC', 'BatchNorm', 'Embedding'] class Conv2D(layers.Layer): @@ -415,7 +415,44 @@ class BatchNorm(layers.Layer): return self._helper.append_activation(batch_norm_out) -class EMBEDDING(layers.Layer): +class Embedding(layers.Layer): + """ + **Embedding Layer** + + This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in + a lookup table. The result of this lookup is the embedding of each ID in the + :attr:`input`. + + All the input variables are passed in as local variables to the LayerHelper + constructor. + + Args: + size(tuple|list): The shape of the look up table parameter. It should + have two elements which indicate the size of the dictionary of + embeddings and the size of each embedding vector respectively. + is_sparse(bool): The flag indicating whether to use sparse update. + is_distributed(bool): Whether to run lookup table from remote parameter server. + padding_idx(int|long|None): If :attr:`None`, it makes no effect to lookup. + Otherwise the given :attr:`padding_idx` indicates padding the output + with zeros whenever lookup encounters it in :attr:`input`. If + :math:`padding_idx < 0`, the :attr:`padding_idx` to use in lookup is + :math:`size[0] + dim`. + param_attr(ParamAttr): Parameters for this layer + dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc + + Returns: + Variable: The tensor variable storing the embeddings of the \ + supplied inputs. + + Examples: + .. code-block:: python + + dict_size = len(dataset.ids) + input = fluid.layers.data(name='ids', shape=[32, 32], dtype='float32') + embedding = fluid.imperative.Embedding(size=[dict_size, 16]) + fc = embedding(input) + """ + def __init__(self, size, is_sparse=False, @@ -424,7 +461,7 @@ class EMBEDDING(layers.Layer): param_attr=None, dtype='float32'): - super(EMBEDDING, self).__init__() + super(Embedding, self).__init__() self._size = size self._is_sparse = is_sparse self._is_distributed = is_distributed @@ -440,8 +477,6 @@ class EMBEDDING(layers.Layer): from ..layer_helper import LayerHelper self._helper = LayerHelper('embedding', param_attr=param_attr) - - def _build_once(self, input): self._w = self._helper.create_parameter( attr=self._param_attr, shape=self._size, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index 9c6ec331e6..a3e3f96713 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import paddle.fluid as fluid -from paddle.fluid.imperative.nn import EMBEDDING +from paddle.fluid.imperative.nn import Embedding import paddle.fluid.framework as framework from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.imperative.base import to_variable @@ -143,7 +143,7 @@ class PtbModel(fluid.imperative.Layer): num_layers=num_layers, init_scale=init_scale, dropout=dropout) - self.embedding = EMBEDDING( + self.embedding = Embedding( size=[vocab_size, hidden_size], dtype='float32', is_sparse=False, @@ -151,8 +151,6 @@ class PtbModel(fluid.imperative.Layer): name='embedding_para', initializer=fluid.initializer.UniformInitializer( low=-init_scale, high=init_scale))) - - def _build_once(self, input, label, init_hidden, init_cell): self.softmax_weight = fluid.layers.create_parameter( [self.hidden_size, self.vocab_size], dtype="float32", @@ -166,6 +164,9 @@ class PtbModel(fluid.imperative.Layer): default_initializer=fluid.initializer.UniformInitializer( low=-self.init_scale, high=self.init_scale)) + def _build_once(self, input, label, init_hidden, init_cell): + pass + def forward(self, input, label, init_hidden, init_cell): init_h = fluid.layers.reshape( @@ -203,7 +204,7 @@ class PtbModel(fluid.imperative.Layer): class TestImperativePtbRnn(unittest.TestCase): - def test_mnist_cpu_float32(self): + def test_ptb_rnn_cpu_float32(self): seed = 90 hidden_size = 10 vocab_size = 1000 From 79d62c5402a89276dfe9e3d798cf9fc0fc5cb9cc Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 28 Jan 2019 14:20:25 +0800 Subject: [PATCH 32/46] Fix mnist --- python/paddle/fluid/framework.py | 12 +---- python/paddle/fluid/imperative/layers.py | 23 ++++++++- .../fluid/tests/unittests/CMakeLists.txt | 3 ++ .../unittests/test_imperative_optimizer.py | 22 ++++---- .../tests/unittests/test_imperative_resnet.py | 51 ++++++++++--------- 5 files changed, 67 insertions(+), 44 deletions(-) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 17798e359c..4692f20c1b 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1308,16 +1308,8 @@ class Block(object): attrs=kwargs.get("attrs", None)) self.ops.append(op) - # set stop_gradient in static mode - if kwargs.get("stop_gradient", False): - outputs = kwargs.get("outputs", None) - if outputs is not None: - for k, v in six.iteritems(outputs): - if isinstance(v, Variable): - v.stop_gradient = True - elif isinstance(v, list) or isinstance(v, tuple): - for var in v: - var.stop_gradient = True + # TODO(minqiyang): add stop_gradient support in static mode too. + # currently, we only support stop_gradient in imperative mode. self._trace_op(op, kwargs.get("stop_gradient", False)) return op diff --git a/python/paddle/fluid/imperative/layers.py b/python/paddle/fluid/imperative/layers.py index f457f56203..57c45f764b 100644 --- a/python/paddle/fluid/imperative/layers.py +++ b/python/paddle/fluid/imperative/layers.py @@ -15,6 +15,7 @@ import contextlib import sys import numpy as np +import collections from paddle.fluid import core from paddle.fluid import framework @@ -31,11 +32,29 @@ class Layer(core.Layer): self._dtype = dtype def parameters(self): - return [] + params = [] + for key in self.__dict__.keys(): + value = self.__dict__[key] + if isinstance(value, framework.Parameter): + params.append(value) + elif isinstance(value, core.Layer): + params.extend(value.parameters()) + elif isinstance(value, collections.Container): + if len(value) == 0: + continue + if isinstance(value[0], framework.Parameter): + params.extend(value) + elif isinstance(value[0], core.Layer): + for v in value: + params.extend(v.parameters()) + + return params def clear_gradients(self): + print([p.name for p in self.parameters()]) for p in self.parameters(): - p._clear_gradient() + if p.name not in set(['batch_norm_0.w_2', 'batch_norm_0.w_1']): + p._clear_gradient() def _build_once(self, inputs): pass diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index c23dfa01e7..7e693c6a41 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -85,6 +85,7 @@ list(REMOVE_ITEM TEST_OPS test_image_classification_resnet) list(REMOVE_ITEM TEST_OPS test_bilinear_interp_op) list(REMOVE_ITEM TEST_OPS test_nearest_interp_op) list(REMOVE_ITEM TEST_OPS test_imperative_resnet) +list(REMOVE_ITEM TEST_OPS test_imperative_optimizer) foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) endforeach(TEST_OP) @@ -94,6 +95,8 @@ py_test_modules(test_bilinear_interp_op MODULES test_bilinear_interp_op SERIAL) py_test_modules(test_nearest_interp_op MODULES test_nearest_interp_op SERIAL) py_test_modules(test_imperative_resnet MODULES test_imperative_resnet ENVS FLAGS_cudnn_deterministic=1) +py_test_modules(test_imperative_optimizer MODULES test_imperative_optimizer ENVS + FLAGS_cudnn_deterministic=1) if(WITH_DISTRIBUTE) py_test_modules(test_dist_train MODULES test_dist_train SERIAL) set_tests_properties(test_listen_and_serv_op PROPERTIES TIMEOUT 20) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index 91637cac5b..08b155acc6 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -82,13 +82,14 @@ class MNIST(fluid.imperative.Layer): self._simple_img_conv_pool_2 = SimpleImgConvPool( 20, 50, 5, 2, 2, act="relu") - pool_2_shape = 50 * 8 * 8 + pool_2_shape = 50 * 4 * 4 SIZE = 10 scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5 self._fc = FC(10, param_attr=fluid.param_attr.ParamAttr( initializer=fluid.initializer.NormalInitializer( - loc=0.0, scale=scale))) + loc=0.0, scale=scale)), + act="softmax") def forward(self, inputs): x = self._simple_img_conv_pool_1(inputs) @@ -100,7 +101,7 @@ class MNIST(fluid.imperative.Layer): class TestImperativeMnist(unittest.TestCase): def test_mnist_float32(self): seed = 90 - + batch_num = 2 with fluid.imperative.guard(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed @@ -112,15 +113,15 @@ class TestImperativeMnist(unittest.TestCase): dy_param_init_value = {} for batch_id, data in enumerate(train_reader()): - if batch_id >= 2: + if batch_id >= batch_num: break - x_data = np.array( + dy_x_data = np.array( [x[0].reshape(1, 28, 28) for x in data]).astype('float32') y_data = np.array([x[1] for x in data]).astype('int64').reshape( 128, 1) - img = to_variable(x_data) + img = to_variable(dy_x_data) label = to_variable(y_data) label._stop_gradient = True @@ -136,6 +137,7 @@ class TestImperativeMnist(unittest.TestCase): avg_loss._backward() sgd.minimize(avg_loss) + mnist.clear_gradients() dy_param_value = {} for param in fluid.default_main_program().global_block( ).all_parameters(): @@ -175,10 +177,10 @@ class TestImperativeMnist(unittest.TestCase): static_param_init_value[static_param_name_list[i]] = out[i] for batch_id, data in enumerate(train_reader()): - if batch_id >= 2: + if batch_id >= batch_num: break - x_data = np.array( + static_x_data = np.array( [x[0].reshape(1, 28, 28) for x in data]).astype('float32') y_data = np.array([x[1] for x in data]).astype('int64').reshape( [128, 1]) @@ -186,7 +188,7 @@ class TestImperativeMnist(unittest.TestCase): fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) out = exe.run(fluid.default_main_program(), - feed={"pixel": x_data, + feed={"pixel": static_x_data, "label": y_data}, fetch_list=fetch_list) @@ -197,7 +199,9 @@ class TestImperativeMnist(unittest.TestCase): for key, value in six.iteritems(static_param_init_value): self.assertTrue(np.allclose(value, dy_param_init_value[key])) + self.assertTrue(np.allclose(static_out, dy_out)) + for key, value in six.iteritems(static_param_value): self.assertTrue(np.allclose(value, dy_param_value[key])) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index 87a72dd04e..dfaaae0de3 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -168,22 +168,22 @@ class ResNet(fluid.imperative.Layer): self.pool2d_max = Pool2D( pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') - self.bottleneck_block_list = [] - num_channels = 64 - for block in range(len(depth)): - shortcut = False - for i in range(depth[block]): - bottleneck_block = BottleneckBlock( - num_channels=num_channels, - num_filters=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - shortcut=shortcut) - num_channels = bottleneck_block._num_channels_out - self.bottleneck_block_list.append(bottleneck_block) - shortcut = True - - self.pool2d_avg = Pool2D( - pool_size=7, pool_type='avg', global_pooling=True) + # self.bottleneck_block_list = [] + # num_channels = 64 + # for block in range(len(depth)): + # shortcut = False + # for i in range(depth[block]): + # bottleneck_block = BottleneckBlock( + # num_channels=num_channels, + # num_filters=num_filters[block], + # stride=2 if i == 0 and block != 0 else 1, + # shortcut=shortcut) + # num_channels = bottleneck_block._num_channels_out + # self.bottleneck_block_list.append(bottleneck_block) + # shortcut = True + + # self.pool2d_avg = Pool2D( + # pool_size=7, pool_type='avg', global_pooling=True) import math stdv = 1.0 / math.sqrt(2048 * 1.0) @@ -196,9 +196,9 @@ class ResNet(fluid.imperative.Layer): def forward(self, inputs): y = self.conv(inputs) y = self.pool2d_max(y) - for bottleneck_block in self.bottleneck_block_list: - y = bottleneck_block(y) - y = self.pool2d_avg(y) + # for bottleneck_block in self.bottleneck_block_list: + # y = bottleneck_block(y) + # y = self.pool2d_avg(y) y = self.out(y) return y @@ -209,7 +209,7 @@ class TestImperativeResnet(unittest.TestCase): batch_size = train_parameters["batch_size"] batch_num = 1 - with fluid.imperative.guard(): + with fluid.imperative.guard(place=fluid.CPUPlace()): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed @@ -264,6 +264,7 @@ class TestImperativeResnet(unittest.TestCase): )] = np_array optimizer.minimize(avg_loss) + resnet.clear_gradients() dy_param_value = {} for param in fluid.default_main_program().global_block( @@ -274,8 +275,9 @@ class TestImperativeResnet(unittest.TestCase): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - exe = fluid.Executor(fluid.CPUPlace( - ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor(fluid.CPUPlace()) + # exe = fluid.Executor(fluid.CPUPlace( + # ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) resnet = ResNet() optimizer = optimizer_setting(train_parameters) @@ -345,6 +347,7 @@ class TestImperativeResnet(unittest.TestCase): static_grad_value[static_grad_name_list[ i - grad_start_pos]] = out[i] + print(static_out, dy_out) self.assertTrue(np.allclose(static_out, dy_out)) self.assertEqual(len(dy_param_init_value), len(static_param_init_value)) @@ -355,7 +358,9 @@ class TestImperativeResnet(unittest.TestCase): self.assertEqual(len(dy_grad_value), len(static_grad_value)) for key, value in six.iteritems(static_grad_value): - self.assertTrue(np.allclose(value, dy_grad_value[key])) + if not np.allclose(value, dy_grad_value[key]): + print(key) + #self.assertTrue(np.allclose(value, dy_grad_value[key])) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) From 0ea7c9c129c52fc006fef6b37a100cea81c70cb7 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Mon, 28 Jan 2019 06:20:40 +0000 Subject: [PATCH 33/46] remove test split op in imperative --- .../tests/unittests/test_imperative_split.py | 44 ------------------- 1 file changed, 44 deletions(-) delete mode 100644 python/paddle/fluid/tests/unittests/test_imperative_split.py diff --git a/python/paddle/fluid/tests/unittests/test_imperative_split.py b/python/paddle/fluid/tests/unittests/test_imperative_split.py deleted file mode 100644 index fb2049760a..0000000000 --- a/python/paddle/fluid/tests/unittests/test_imperative_split.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import unittest -import paddle.fluid as fluid -from paddle.fluid.imperative.base import to_variable -import numpy as np - - -class Split_test(fluid.imperative.Layer): - def __init__(self): - super(Split_test, self).__init__() - - def _build_once(self, input): - pass - - def forward(self, input): - out = fluid.layers.split(input, num_or_sections=4, dim=-1) - return out - - -class TestImperativePtbRnn(unittest.TestCase): - def test_spilt(self): - with fluid.imperative.guard(): - inp = to_variable(np.arange(160).reshape(4, 40).astype('float32')) - st = Split_test() - out = st(inp) - - -if __name__ == '__main__': - unittest.main() From fff67a9481ca9cdd7437297811c483b441263fa3 Mon Sep 17 00:00:00 2001 From: JiabinYang Date: Mon, 28 Jan 2019 06:54:45 +0000 Subject: [PATCH 34/46] test=develop, use parameters() to get parameters --- python/paddle/fluid/imperative/nn.py | 3 +++ .../unittests/test_imperative_ptb_rnn.py | 25 ++++++++++++++----- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index ea04475493..6c010314a2 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -483,6 +483,9 @@ class Embedding(layers.Layer): dtype=self._dtype, is_bias=False) + def parameters(self): + return [self._w] + def forward(self, input): out = self._helper.create_variable_for_type_inference(self._dtype) self._helper.append_op( diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index a3e3f96713..5877e91f92 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -75,6 +75,16 @@ class SimpleLSTMRNN(fluid.imperative.Layer): self.hidden_array.append(pre_hidden) self.cell_array.append(pre_cell) + def parameters(self): + parameters = list() + for param in self.weight_1_arr: + parameters.append(param) + for param in self.weight_2_arr: + parameters.append(param) + for bias in self.bias_arr: + parameters.append(bias) + return parameters + def forward(self, input_embedding, init_hidden=None, init_cell=None): res = [] for index in range(self._num_steps): @@ -167,6 +177,12 @@ class PtbModel(fluid.imperative.Layer): def _build_once(self, input, label, init_hidden, init_cell): pass + def parameters(self): + parameters = self.simple_lstm_rnn.parameters() + [ + self.softmax_weight, self.softmax_bias + ] + self.embedding.parameters() + return parameters + def forward(self, input, label, init_hidden, init_cell): init_h = fluid.layers.reshape( @@ -246,13 +262,11 @@ class TestImperativePtbRnn(unittest.TestCase): dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden, init_cell) if i == 0: - for param in fluid.default_main_program().global_block( - ).all_parameters(): + for param in ptb_model.parameters(): dy_param_init[param.name] = param._numpy() dy_loss._backward() sgd.minimize(dy_loss) - for param in fluid.default_main_program().global_block( - ).all_parameters(): + for param in ptb_model.parameters(): dy_param_updated[param.name] = param._numpy() # print("dy_loss is {}".format(dy_loss._numpy())) # print("last_hidden is {}".format(last_hidden._numpy())) @@ -284,8 +298,7 @@ class TestImperativePtbRnn(unittest.TestCase): static_param_updated = dict() static_param_init = dict() static_param_name_list = list() - for param in fluid.default_startup_program().global_block( - ).all_parameters(): + for param in ptb_model.parameters(): static_param_name_list.append(param.name) out = exe.run(framework.default_startup_program(), From a6910f900e5683f70a9110d4b1a22f54e051c8e5 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Mon, 28 Jan 2019 15:26:22 +0800 Subject: [PATCH 35/46] Always create variables in analysis_predictor before OptimizeInferenceProgram. (#15533) Otherwise, some other persistable variable (like RAW type) will not be created --- .../fluid/inference/api/analysis_predictor.cc | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 3a5f21d475..66374cb7f0 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -123,6 +123,15 @@ bool AnalysisPredictor::PrepareProgram( if (!program) { if (!LoadProgramDesc()) return false; + // If not cloned, the parameters should be loaded. + // If config_.ir_optim() is True, parameters is loaded in + // OptimizeInferenceProgram(), but other persistable variables + // (like RAW type var) are not created in scope. + // If config_.ir_optim() is False, parameters is loaded in LoadParameters(), + // still need to create other persistable variables. + // So in both case, create persistable variables at first. + executor_->CreateVariables(*inference_program_, 0, true, sub_scope_); + // Optimize the program, and load parameters and modify them in the // scope_. // This will change the scope_ address. @@ -130,15 +139,6 @@ bool AnalysisPredictor::PrepareProgram( status_ir_optim_enabled_ = true; OptimizeInferenceProgram(); } else { - // If the parent_scope is passed, we assert that the persistable variables - // are already created, so just create the no persistable variables. - - // If not cloned, the parameters should be loaded - // OptimizeInferenceProgram. - // So in both cases, just the local variables are needed to load, not the - // parematers. - executor_->CreateVariables(*inference_program_, 0, true, sub_scope_); - // Load parameters LOG(INFO) << "load parameters "; LoadParameters(); @@ -376,7 +376,7 @@ void AnalysisPredictor::OptimizeInferenceProgram() { } argument_.SetIrAnalysisPasses(passes); argument_.SetAnalysisPasses(config_.pass_builder()->AnalysisPasses()); - argument_.SetScopeNotOwned(const_cast(scope_.get())); + argument_.SetScopeNotOwned(scope_.get()); Analyzer().Run(&argument_); PADDLE_ENFORCE(argument_.scope_valid()); From 5c7768776c2a0b0a3b7c39e618897d17bb5bf882 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 28 Jan 2019 17:00:04 +0800 Subject: [PATCH 36/46] Fix batch_norm's stop_gradient bug test=develop --- paddle/fluid/imperative/layer.cc | 2 ++ paddle/fluid/imperative/layer.h | 9 +++++++-- paddle/fluid/imperative/tracer.cc | 6 ++++-- python/paddle/fluid/imperative/nn.py | 4 ++++ 4 files changed, 17 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 8029129b9a..64d4d999d1 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -156,6 +156,8 @@ class Autograd { for (auto it : candidate->pre_ops_) { for (OpBase* pre_op : it.second) { if (!pre_op) continue; + VLOG(5) << "op dep " << candidate->op_desc_->Type() << " <---- " + << it.first << " <---- " << pre_op->op_desc_->Type(); if (visited.find(pre_op) == visited.end()) { visited.insert(pre_op); queue.push_back(pre_op); diff --git a/paddle/fluid/imperative/layer.h b/paddle/fluid/imperative/layer.h index 633924aa41..0151a80816 100644 --- a/paddle/fluid/imperative/layer.h +++ b/paddle/fluid/imperative/layer.h @@ -28,6 +28,7 @@ #include "paddle/fluid/framework/var_desc.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/imperative/type_defs.h" @@ -148,8 +149,12 @@ class VarBase { } void ClearGradient() { - delete grads_; - grads_ = new VarBase(true); + VLOG(1) << "clear gradient of " << var_desc_->Name(); + auto grads_t = grads_->var_->GetMutable(); + operators::math::set_constant( + *(platform::DeviceContextPool::Instance().Get( + grads_->var_->Get().place())), + grads_t, 0.0); } framework::LoDTensor& GradValue(); diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index 5b87839f45..c8af936c33 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -83,11 +83,12 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs, op->input_vars_ = inputs; for (auto it : op->input_vars_) { auto& invars = invars_map[it.first]; + invars.reserve(it.second.size()); for (VarBase* inp : it.second) { PADDLE_ENFORCE_NOT_NULL(inp->var_, "op %s input %s nullptr", op->op_desc_->Type(), inp->var_desc_->Name()); - invars.push_back(inp->var_); + invars.emplace_back(inp->var_); vars[inp->var_desc_->Name()] = inp; if (inp->PreOp()) { op->pre_ops_[it.first].push_back(inp->PreOp()); @@ -104,9 +105,10 @@ void Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs, for (auto it : op->output_vars_) { auto& outvars = outvars_map[it.first]; const std::vector& outputs = it.second; + outvars.reserve(outputs.size()); for (size_t i = 0; i < outputs.size(); ++i) { VarBase* out = outputs[i]; - outvars.push_back(out->var_); + outvars.emplace_back(out->var_); vars[out->var_desc_->Name()] = out; framework::VarDesc* var_desc = block->FindVar(out->var_desc_->Name()); diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index fe5014f5e6..543f573890 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -334,6 +334,7 @@ class BatchNorm(layers.Layer): default_initializer=Constant(1.0)) if use_global_stats and self._helper.param_attr.learning_rate == 0.: self._scale.stop_gradient = True + self._scale._stop_gradient = True self._bias = self._helper.create_parameter( attr=self._helper.bias_attr, @@ -342,6 +343,7 @@ class BatchNorm(layers.Layer): is_bias=True) if use_global_stats and self._helper.bias_attr.learning_rate == 0.: self._bias.stop_gradient = True + self._bias._stop_gradient = True self._mean = self._helper.create_parameter( attr=ParamAttr( @@ -352,6 +354,7 @@ class BatchNorm(layers.Layer): shape=param_shape, dtype=self._dtype) self._mean.stop_gradient = True + self._mean._stop_gradient = True self._variance = self._helper.create_parameter( attr=ParamAttr( @@ -362,6 +365,7 @@ class BatchNorm(layers.Layer): shape=param_shape, dtype=self._dtype) self._variance.stop_gradient = True + self._variance._stop_gradient = True self._in_place = in_place self._momentum = momentum From edf742cfacd8e6f4b9e9c33d619f1d12aa9d8aa6 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 28 Jan 2019 17:03:19 +0800 Subject: [PATCH 37/46] Polish code test=develop --- python/paddle/fluid/framework.py | 9 +++++++-- python/paddle/fluid/imperative/nn.py | 4 ---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 4692f20c1b..195245a12f 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -442,11 +442,16 @@ class Variable(object): @property def _stop_gradient(self): - return self._ivar.stop_gradient + if _in_imperative_mode(): + return self._ivar.stop_gradient + else: + return self.stop_gradient @_stop_gradient.setter def _stop_gradient(self, s): - self._ivar.stop_gradient = s + if _in_imperative_mode(): + self._ivar.stop_gradient = s + self.stop_gradient = s @property def persistable(self): diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 543f573890..dc90603c37 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -333,7 +333,6 @@ class BatchNorm(layers.Layer): dtype=self._dtype, default_initializer=Constant(1.0)) if use_global_stats and self._helper.param_attr.learning_rate == 0.: - self._scale.stop_gradient = True self._scale._stop_gradient = True self._bias = self._helper.create_parameter( @@ -342,7 +341,6 @@ class BatchNorm(layers.Layer): dtype=self._dtype, is_bias=True) if use_global_stats and self._helper.bias_attr.learning_rate == 0.: - self._bias.stop_gradient = True self._bias._stop_gradient = True self._mean = self._helper.create_parameter( @@ -353,7 +351,6 @@ class BatchNorm(layers.Layer): do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=self._dtype) - self._mean.stop_gradient = True self._mean._stop_gradient = True self._variance = self._helper.create_parameter( @@ -364,7 +361,6 @@ class BatchNorm(layers.Layer): do_model_average=do_model_average_for_mean_and_var), shape=param_shape, dtype=self._dtype) - self._variance.stop_gradient = True self._variance._stop_gradient = True self._in_place = in_place From 49a7fba8485c71d0da32a31bb56ef88035a7832f Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 28 Jan 2019 17:42:23 +0800 Subject: [PATCH 38/46] Polish code test=develop --- paddle/fluid/imperative/layer.h | 6 ++- python/paddle/fluid/imperative/layers.py | 3 +- .../tests/unittests/test_imperative_resnet.py | 50 +++++++++---------- 3 files changed, 28 insertions(+), 31 deletions(-) diff --git a/paddle/fluid/imperative/layer.h b/paddle/fluid/imperative/layer.h index 1d109259f3..46107341a4 100644 --- a/paddle/fluid/imperative/layer.h +++ b/paddle/fluid/imperative/layer.h @@ -141,11 +141,13 @@ class VarBase { void RunBackward(); void TrackPreOp(OpBase* pre_op, const std::string& pre_op_out_name, - int pre_op_out_idx, bool stop_gradient) { + int pre_op_out_idx, bool pre_op_stop_gradient) { pre_op_ = pre_op; pre_op_out_name_ = pre_op_out_name; pre_op_out_idx_ = pre_op_out_idx; - stop_gradient_ = stop_gradient; + if (pre_op_stop_gradient) { + stop_gradient_ = pre_op_stop_gradient; + } } void ClearGradient() { diff --git a/python/paddle/fluid/imperative/layers.py b/python/paddle/fluid/imperative/layers.py index 57c45f764b..c338c65a76 100644 --- a/python/paddle/fluid/imperative/layers.py +++ b/python/paddle/fluid/imperative/layers.py @@ -51,9 +51,8 @@ class Layer(core.Layer): return params def clear_gradients(self): - print([p.name for p in self.parameters()]) for p in self.parameters(): - if p.name not in set(['batch_norm_0.w_2', 'batch_norm_0.w_1']): + if not p._stop_gradient: p._clear_gradient() def _build_once(self, inputs): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index dfaaae0de3..c27fd0b802 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -168,22 +168,22 @@ class ResNet(fluid.imperative.Layer): self.pool2d_max = Pool2D( pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') - # self.bottleneck_block_list = [] - # num_channels = 64 - # for block in range(len(depth)): - # shortcut = False - # for i in range(depth[block]): - # bottleneck_block = BottleneckBlock( - # num_channels=num_channels, - # num_filters=num_filters[block], - # stride=2 if i == 0 and block != 0 else 1, - # shortcut=shortcut) - # num_channels = bottleneck_block._num_channels_out - # self.bottleneck_block_list.append(bottleneck_block) - # shortcut = True - - # self.pool2d_avg = Pool2D( - # pool_size=7, pool_type='avg', global_pooling=True) + self.bottleneck_block_list = [] + num_channels = 64 + for block in range(len(depth)): + shortcut = False + for i in range(depth[block]): + bottleneck_block = BottleneckBlock( + num_channels=num_channels, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut) + num_channels = bottleneck_block._num_channels_out + self.bottleneck_block_list.append(bottleneck_block) + shortcut = True + + self.pool2d_avg = Pool2D( + pool_size=7, pool_type='avg', global_pooling=True) import math stdv = 1.0 / math.sqrt(2048 * 1.0) @@ -196,9 +196,9 @@ class ResNet(fluid.imperative.Layer): def forward(self, inputs): y = self.conv(inputs) y = self.pool2d_max(y) - # for bottleneck_block in self.bottleneck_block_list: - # y = bottleneck_block(y) - # y = self.pool2d_avg(y) + for bottleneck_block in self.bottleneck_block_list: + y = bottleneck_block(y) + y = self.pool2d_avg(y) y = self.out(y) return y @@ -209,7 +209,7 @@ class TestImperativeResnet(unittest.TestCase): batch_size = train_parameters["batch_size"] batch_num = 1 - with fluid.imperative.guard(place=fluid.CPUPlace()): + with fluid.imperative.guard(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed @@ -275,9 +275,8 @@ class TestImperativeResnet(unittest.TestCase): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - exe = fluid.Executor(fluid.CPUPlace()) - # exe = fluid.Executor(fluid.CPUPlace( - # ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + exe = fluid.Executor(fluid.CPUPlace( + ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) resnet = ResNet() optimizer = optimizer_setting(train_parameters) @@ -347,7 +346,6 @@ class TestImperativeResnet(unittest.TestCase): static_grad_value[static_grad_name_list[ i - grad_start_pos]] = out[i] - print(static_out, dy_out) self.assertTrue(np.allclose(static_out, dy_out)) self.assertEqual(len(dy_param_init_value), len(static_param_init_value)) @@ -358,9 +356,7 @@ class TestImperativeResnet(unittest.TestCase): self.assertEqual(len(dy_grad_value), len(static_grad_value)) for key, value in six.iteritems(static_grad_value): - if not np.allclose(value, dy_grad_value[key]): - print(key) - #self.assertTrue(np.allclose(value, dy_grad_value[key])) + self.assertTrue(np.allclose(value, dy_grad_value[key])) self.assertTrue(np.isfinite(value.all())) self.assertFalse(np.isnan(value.any())) From 07822fef2c692dd884abb7aa54b416a70409bb9c Mon Sep 17 00:00:00 2001 From: minqiyang Date: Mon, 28 Jan 2019 18:43:51 +0800 Subject: [PATCH 39/46] Clear all parameters' gradient test=develop --- paddle/fluid/imperative/layer.h | 12 +++++++----- python/paddle/fluid/imperative/layers.py | 3 +-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/imperative/layer.h b/paddle/fluid/imperative/layer.h index 46107341a4..78205486c5 100644 --- a/paddle/fluid/imperative/layer.h +++ b/paddle/fluid/imperative/layer.h @@ -152,11 +152,13 @@ class VarBase { void ClearGradient() { VLOG(1) << "clear gradient of " << var_desc_->Name(); - auto grads_t = grads_->var_->GetMutable(); - operators::math::set_constant( - *(platform::DeviceContextPool::Instance().Get( - grads_->var_->Get().place())), - grads_t, 0.0); + if (grads_ && grads_->var_ && grads_->var_->IsInitialized()) { + auto grads_t = grads_->var_->GetMutable(); + operators::math::set_constant( + *(platform::DeviceContextPool::Instance().Get( + grads_->var_->Get().place())), + grads_t, 0.0); + } } framework::LoDTensor& GradValue(); diff --git a/python/paddle/fluid/imperative/layers.py b/python/paddle/fluid/imperative/layers.py index c338c65a76..71ff95bdea 100644 --- a/python/paddle/fluid/imperative/layers.py +++ b/python/paddle/fluid/imperative/layers.py @@ -52,8 +52,7 @@ class Layer(core.Layer): def clear_gradients(self): for p in self.parameters(): - if not p._stop_gradient: - p._clear_gradient() + p._clear_gradient() def _build_once(self, inputs): pass From 81177258522c11340c8b91a1bbcd4de1479786df Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 25 Jan 2019 08:43:20 +0000 Subject: [PATCH 40/46] add jit kernel hsum, hmax and softmax refer code test=develop --- paddle/fluid/operators/jit/benchmark.cc | 101 ++++---- paddle/fluid/operators/jit/helper.cc | 3 + paddle/fluid/operators/jit/kernel_base.h | 15 ++ .../fluid/operators/jit/refer/CMakeLists.txt | 3 + paddle/fluid/operators/jit/refer/refer.cc | 5 + paddle/fluid/operators/jit/refer/refer.h | 39 +++ paddle/fluid/operators/jit/test.cc | 222 +++++++++++------- paddle/fluid/platform/dynload/mklml.h | 2 + 8 files changed, 269 insertions(+), 121 deletions(-) diff --git a/paddle/fluid/operators/jit/benchmark.cc b/paddle/fluid/operators/jit/benchmark.cc index 186c37c56e..383532d8d2 100644 --- a/paddle/fluid/operators/jit/benchmark.cc +++ b/paddle/fluid/operators/jit/benchmark.cc @@ -158,7 +158,7 @@ void BenchAllImpls(const typename KernelTuples::attr_type& attr, Args... args) { using Tensor = paddle::framework::Tensor; -template +template void BenchXYZNKernel() { for (int d : TestSizes()) { Tensor x, y, z; @@ -175,7 +175,7 @@ void BenchXYZNKernel() { } } -template +template void BenchAXYNKernel() { for (int d : TestSizes()) { const T a = static_cast(3); @@ -190,7 +190,17 @@ void BenchAXYNKernel() { } } -template +template +void BenchXRNKernel() { + for (int d : TestSizes()) { + Tensor x; + RandomVec(d, x.mutable_data({d}, PlaceType())); + T res; + BenchAllImpls, PlaceType>(d, x.data(), &res, d); + } +} + +template void BenchXYNKernel() { for (int d : TestSizes()) { Tensor x, y; @@ -203,7 +213,7 @@ void BenchXYNKernel() { } } -template +template void BenchLSTMKernel() { for (bool use_peephole : {true, false}) { for (int d : TestSizes()) { @@ -240,7 +250,7 @@ void BenchLSTMKernel() { } } -template +template void BenchGRUKernel() { for (int d : TestSizes()) { const jit::gru_attr_t attr(d, jit::kVSigmoid, jit::kVTanh); @@ -262,7 +272,7 @@ void BenchGRUKernel() { } } -template +template void BenchSeqPoolKernel() { std::vector pool_types = { jit::SeqPoolType::kSum, jit::SeqPoolType::kAvg, jit::SeqPoolType::kSqrt}; @@ -284,7 +294,7 @@ void BenchSeqPoolKernel() { } } -template +template void BenchMatMulKernel() { for (int m : {1, 2, 3, 4}) { for (int n : TestSizes()) { @@ -305,57 +315,64 @@ void BenchMatMulKernel() { } } +template +void BenchSoftmaxKernel() { + for (int bs : {1, 2, 10}) { + for (int n : TestSizes()) { + Tensor x, y; + x.Resize({bs, n}); + y.Resize({bs, n}); + RandomVec(bs * n, x.mutable_data(PlaceType()), -2.f, 2.f); + const T* x_data = x.data(); + T* y_data = y.mutable_data(PlaceType()); + BenchAllImpls, PlaceType>(n, x_data, y_data, n, + bs); + } + } +} + using T = float; -using PlaceType = paddle::platform::CPUPlace; +using CPUPlace = paddle::platform::CPUPlace; // xyzn -BENCH_FP32_CPU(kVMul) { BenchXYZNKernel(); } - -BENCH_FP32_CPU(kVAdd) { BenchXYZNKernel(); } - -BENCH_FP32_CPU(kVAddRelu) { BenchXYZNKernel(); } - -BENCH_FP32_CPU(kVSub) { BenchXYZNKernel(); } +BENCH_FP32_CPU(kVMul) { BenchXYZNKernel(); } +BENCH_FP32_CPU(kVAdd) { BenchXYZNKernel(); } +BENCH_FP32_CPU(kVAddRelu) { BenchXYZNKernel(); } +BENCH_FP32_CPU(kVSub) { BenchXYZNKernel(); } // axyn -BENCH_FP32_CPU(kVScal) { BenchAXYNKernel(); } +BENCH_FP32_CPU(kVScal) { BenchAXYNKernel(); } +BENCH_FP32_CPU(kVAddBias) { BenchAXYNKernel(); } -BENCH_FP32_CPU(kVAddBias) { BenchAXYNKernel(); } +// xrn +BENCH_FP32_CPU(kHSum) { BenchXRNKernel(); } +BENCH_FP32_CPU(kHMax) { BenchXRNKernel(); } // xyn -BENCH_FP32_CPU(kVRelu) { BenchXYNKernel(); } - -BENCH_FP32_CPU(kVIdentity) { BenchXYNKernel(); } - -BENCH_FP32_CPU(kVSquare) { BenchXYNKernel(); } - -BENCH_FP32_CPU(kVExp) { BenchXYNKernel(); } - -BENCH_FP32_CPU(kVSigmoid) { BenchXYNKernel(); } - -BENCH_FP32_CPU(kVTanh) { BenchXYNKernel(); } +BENCH_FP32_CPU(kVRelu) { BenchXYNKernel(); } +BENCH_FP32_CPU(kVIdentity) { BenchXYNKernel(); } +BENCH_FP32_CPU(kVSquare) { BenchXYNKernel(); } +BENCH_FP32_CPU(kVExp) { BenchXYNKernel(); } +BENCH_FP32_CPU(kVSigmoid) { BenchXYNKernel(); } +BENCH_FP32_CPU(kVTanh) { BenchXYNKernel(); } // lstm and peephole -BENCH_FP32_CPU(kLSTMCtHt) { BenchLSTMKernel(); } - -BENCH_FP32_CPU(kLSTMC1H1) { BenchLSTMKernel(); } +BENCH_FP32_CPU(kLSTMCtHt) { BenchLSTMKernel(); } +BENCH_FP32_CPU(kLSTMC1H1) { BenchLSTMKernel(); } // gru functions -BENCH_FP32_CPU(kGRUH1) { BenchGRUKernel(); } - -BENCH_FP32_CPU(kGRUHtPart1) { - BenchGRUKernel(); -} - -BENCH_FP32_CPU(kGRUHtPart2) { - BenchGRUKernel(); -} +BENCH_FP32_CPU(kGRUH1) { BenchGRUKernel(); } +BENCH_FP32_CPU(kGRUHtPart1) { BenchGRUKernel(); } +BENCH_FP32_CPU(kGRUHtPart2) { BenchGRUKernel(); } // seq pool function -BENCH_FP32_CPU(kSeqPool) { BenchSeqPoolKernel(); } +BENCH_FP32_CPU(kSeqPool) { BenchSeqPoolKernel(); } // matmul -BENCH_FP32_CPU(kMatMul) { BenchMatMulKernel(); } +BENCH_FP32_CPU(kMatMul) { BenchMatMulKernel(); } + +// softmax +BENCH_FP32_CPU(kSoftmax) { BenchSoftmaxKernel(); } // Benchmark all jit kernels including jitcode, mkl and refer. // To use this tool, run command: ./benchmark [options...] diff --git a/paddle/fluid/operators/jit/helper.cc b/paddle/fluid/operators/jit/helper.cc index 5dbe22a81b..4dac2f2460 100644 --- a/paddle/fluid/operators/jit/helper.cc +++ b/paddle/fluid/operators/jit/helper.cc @@ -49,6 +49,9 @@ const char* to_string(KernelType kt) { ONE_CASE(kNCHW16CMulNC); ONE_CASE(kSeqPool); ONE_CASE(kMatMul); + ONE_CASE(kHMax); + ONE_CASE(kHSum); + ONE_CASE(kSoftmax); default: PADDLE_THROW("Not support type: %d, or forget to add it.", kt); return "NOT JITKernel"; diff --git a/paddle/fluid/operators/jit/kernel_base.h b/paddle/fluid/operators/jit/kernel_base.h index adb101bd5c..42a58580f7 100644 --- a/paddle/fluid/operators/jit/kernel_base.h +++ b/paddle/fluid/operators/jit/kernel_base.h @@ -20,6 +20,7 @@ namespace paddle { namespace operators { namespace jit { +// TODO(TJ): reorder by alphabet typedef enum { kNone = 0, kVMul = 1, @@ -44,6 +45,9 @@ typedef enum { kNCHW16CMulNC, kSeqPool, kMatMul, + kHSum, // horizontal max + kHMax, // horizontal sum + kSoftmax, } KernelType; typedef enum { @@ -70,6 +74,10 @@ struct XYNTuples { typedef void (*func_type)(const T*, T*, int); }; +// x, return and int +template +struct XRNTuples : public XYNTuples {}; + typedef struct { void* gates; // gates: x_ch, x_ih, x_fh, x_oh const void* ct_1; @@ -159,6 +167,13 @@ struct LayerNormTuples { const float, int); }; +template +struct SoftmaxTuples { + typedef T data_type; + typedef int attr_type; + typedef void (*func_type)(const T*, T*, int, int); +}; + // nChw16c = nChw16c .* NC template struct NCHW16CMulNCTuples { diff --git a/paddle/fluid/operators/jit/refer/CMakeLists.txt b/paddle/fluid/operators/jit/refer/CMakeLists.txt index 4b9bc5e8d4..9f2935828c 100644 --- a/paddle/fluid/operators/jit/refer/CMakeLists.txt +++ b/paddle/fluid/operators/jit/refer/CMakeLists.txt @@ -29,3 +29,6 @@ USE_JITKERNEL_REFER(kNCHW16CMulNC) USE_JITKERNEL_REFER(kSeqPool) USE_JITKERNEL_REFER(kMatMul) USE_JITKERNEL_REFER(kVSquare) +USE_JITKERNEL_REFER(kHSum) +USE_JITKERNEL_REFER(kHMax) +USE_JITKERNEL_REFER(kSoftmax) diff --git a/paddle/fluid/operators/jit/refer/refer.cc b/paddle/fluid/operators/jit/refer/refer.cc index 3512ad7fe7..b8adb40ec7 100644 --- a/paddle/fluid/operators/jit/refer/refer.cc +++ b/paddle/fluid/operators/jit/refer/refer.cc @@ -52,4 +52,9 @@ REGISTER_REFER_KERNEL(kSeqPool, SeqPool); REGISTER_REFER_KERNEL(kMatMul, MatMul); +REGISTER_REFER_KERNEL(kHMax, HMax); +REGISTER_REFER_KERNEL(kHSum, HSum); + +REGISTER_REFER_KERNEL(kSoftmax, Softmax); + #undef REGISTER_REFER_KERNEL diff --git a/paddle/fluid/operators/jit/refer/refer.h b/paddle/fluid/operators/jit/refer/refer.h index 97d0293585..5a074db7e0 100644 --- a/paddle/fluid/operators/jit/refer/refer.h +++ b/paddle/fluid/operators/jit/refer/refer.h @@ -378,6 +378,40 @@ void MatMul(const T* A, const T* B, T* C, int M, int N, int K) { } } +template +void HMax(const T* x, T* res, int n) { + res[0] = x[0]; + for (int i = 1; i < n; ++i) { + res[0] = res[0] < x[i] ? x[i] : res[0]; + } +} + +template +void HSum(const T* x, T* res, int n) { + res[0] = x[0]; + for (int i = 1; i < n; ++i) { + res[0] += x[i]; + } +} + +// y = e^(x - max(x)) +// y = y / sum(y) +template +void Softmax(const T* x, T* y, int n, int bs = 1) { + for (int i = 0; i < bs; ++i) { + T scalar; + HMax(x, &scalar, n); + scalar = static_cast(0) - scalar; + VAddBias(&scalar, x, y, n); // x - max + VExp(y, y, n); + HSum(y, &scalar, n); + scalar = static_cast(1) / scalar; + VScal(&scalar, y, y, n); + x += n; + y += n; + } +} + #define DECLARE_REFER_KERNEL(name, tuples) \ template \ class name##Kernel : public ReferKernel> { \ @@ -421,6 +455,11 @@ DECLARE_REFER_KERNEL(SeqPool, SeqPoolTuples); DECLARE_REFER_KERNEL(MatMul, MatMulTuples); +DECLARE_REFER_KERNEL(HMax, XRNTuples); +DECLARE_REFER_KERNEL(HSum, XRNTuples); + +DECLARE_REFER_KERNEL(Softmax, SoftmaxTuples); + #undef DECLARE_REFER_KERNEL } // namespace refer diff --git a/paddle/fluid/operators/jit/test.cc b/paddle/fluid/operators/jit/test.cc index 68a79b6314..2578b282ab 100644 --- a/paddle/fluid/operators/jit/test.cc +++ b/paddle/fluid/operators/jit/test.cc @@ -61,6 +61,7 @@ std::vector TestSizes() { } namespace jit = paddle::operators::jit; +using CPUPlace = paddle::platform::CPUPlace; template struct TestFuncWithRefer { @@ -121,6 +122,40 @@ struct TestFuncWithRefer, T, std::vector, } }; +template +struct TestFuncWithRefer, std::vector, std::vector, + int, int> { + void operator()(const typename jit::SoftmaxTuples::func_type tgt, + const std::vector& x, const std::vector& yref, int n, + int bs) { + EXPECT_TRUE(tgt != nullptr); + EXPECT_EQ(yref.size(), x.size()); + EXPECT_EQ(x.size(), static_cast(n * bs)); + const T* x_data = x.data(); + const T* yref_data = yref.data(); + std::vector ytgt(n * bs); + T* ytgt_data = ytgt.data(); + // test normal + tgt(x_data, ytgt_data, n, bs); + ExpectEQ(ytgt_data, yref_data, n * bs); + // test inplace x + std::copy(x.begin(), x.end(), ytgt.begin()); + tgt(ytgt_data, ytgt_data, n, bs); + ExpectEQ(ytgt_data, yref_data, n * bs); + } +}; + +template +struct TestFuncWithRefer, std::vector, T> { + void operator()(const typename jit::XRNTuples::func_type tgt, + const std::vector& x, const T ref_res) { + EXPECT_TRUE(tgt != nullptr); + T tgt_res; + tgt(x.data(), &tgt_res, x.size()); + ExpectEQ(&tgt_res, &ref_res, 1); + } +}; + template struct TestFuncWithRefer, std::vector, std::vector> { void operator()(const typename jit::XYNTuples::func_type tgt, @@ -172,7 +207,7 @@ struct TestFuncWithRefer, std::vector, std::vector, T* ht_data = ht.data(); T* checked_data = checked.data(); - paddle::operators::jit::lstm_t step; + jit::lstm_t step; step.gates = x_data; step.ct_1 = ct_1_data; step.ct = ct_data; @@ -208,7 +243,7 @@ struct TestFuncWithRefer, std::vector, std::vector, const T* ht_ref_data = ht_ref.data(); T* x_data = x.data(); T* ht_data = ht.data(); - paddle::operators::jit::gru_t step; + jit::gru_t step; step.gates = x_data; step.ht_1 = ht_1_data; step.ht = ht_data; @@ -255,8 +290,8 @@ struct TestFuncWithRefer, std::vector, std::vector, } }; -template +template void TestAllImpls(const typename KernelTuples::attr_type& attr, Args... args) { TestFuncWithRefer test; // test jitcode @@ -286,9 +321,8 @@ void TestAllImpls(const typename KernelTuples::attr_type& attr, Args... args) { test(tgt, args...); } -template +template void TestXYZNKernel() { - namespace jit = paddle::operators::jit; VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); for (int d : TestSizes()) { auto ref = jit::GetRefer>(); @@ -320,9 +354,8 @@ void TestXYZNKernel() { } } -template +template void TestAXYNKernel() { - namespace jit = paddle::operators::jit; VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); for (int d : TestSizes()) { auto ref = jit::GetRefer>(); @@ -347,9 +380,23 @@ void TestAXYNKernel() { } } -template +template +void TestXRNKernel() { + VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); + for (int d : TestSizes()) { + auto ref = jit::GetRefer>(); + EXPECT_TRUE(ref != nullptr); + std::vector x(d); + RandomVec(d, x.data()); + T ref_res; + ref(x.data(), &ref_res, d); + TestAllImpls, PlaceType, std::vector, T>(d, x, + ref_res); + } +} + +template void TestXYNKernel() { - namespace jit = paddle::operators::jit; VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); for (int d : TestSizes()) { auto ref = jit::GetRefer>(); @@ -373,9 +420,8 @@ void TestXYNKernel() { } } -template +template void TestLSTMKernel() { - namespace jit = paddle::operators::jit; VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); std::vector all_acts = {"sigmoid", "tanh", "relu", "identity"}; for (int d : TestSizes()) { @@ -424,9 +470,8 @@ void TestLSTMKernel() { } } -template +template void TestGRUKernel() { - namespace jit = paddle::operators::jit; VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); std::vector all_acts = {"sigmoid", "tanh", "relu", "identity"}; for (int d : TestSizes()) { @@ -459,7 +504,7 @@ void TestGRUKernel() { } } -template +template void TestSeqPoolKernel() { VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); std::vector pool_types = { @@ -484,7 +529,7 @@ void TestSeqPoolKernel() { } } -template +template void TestMatMulKernel() { VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); auto last_acc = acc; @@ -510,7 +555,32 @@ void TestMatMulKernel() { acc = last_acc; } -template +template +void TestSoftmaxKernel() { + VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); + for (int bs : {1, 2, 10}) { + for (int n : TestSizes()) { + auto ref = jit::GetRefer>(); + EXPECT_TRUE(ref != nullptr); + std::vector x(bs * n), y(bs * n); + RandomVec(bs * n, x.data(), -2.f, 2.f); + const T* x_data = x.data(); + T* y_data = y.data(); + + std::vector xinp(x.size()); // inplace test + std::copy(x.begin(), x.end(), xinp.begin()); + ref(x_data, y_data, n, bs); + T* xinp_data = xinp.data(); + ref(xinp_data, xinp_data, n, bs); + ExpectEQ(xinp_data, y_data, n * bs); + + TestAllImpls, PlaceType, std::vector, + std::vector>(n, x, y, n, bs); + } + } +} + +template void TestNCHW16CMulNCKernel() { VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); const int n = 3, c = 16 * 4, h = 10, w = 10; @@ -565,129 +635,123 @@ void TestNCHW16CMulNCKernel() { // XYZNTuple TEST(JITKernel, kVMul) { - namespace jit = paddle::operators::jit; - TestXYZNKernel(); - TestXYZNKernel(); + TestXYZNKernel(); + TestXYZNKernel(); } TEST(JITKernel, kVAdd) { - namespace jit = paddle::operators::jit; - TestXYZNKernel(); - TestXYZNKernel(); + TestXYZNKernel(); + TestXYZNKernel(); } TEST(JITKernel, kVAddRelu) { - namespace jit = paddle::operators::jit; - TestXYZNKernel(); - TestXYZNKernel(); + TestXYZNKernel(); + TestXYZNKernel(); } TEST(JITKernel, kVSub) { - namespace jit = paddle::operators::jit; - TestXYZNKernel(); - TestXYZNKernel(); + TestXYZNKernel(); + TestXYZNKernel(); } // AXYNTuples TEST(JITKernel, kVScal) { - namespace jit = paddle::operators::jit; - TestAXYNKernel(); - TestAXYNKernel(); + TestAXYNKernel(); + TestAXYNKernel(); } TEST(JITKernel, kVAddBias) { - namespace jit = paddle::operators::jit; - TestAXYNKernel(); - TestAXYNKernel(); + TestAXYNKernel(); + TestAXYNKernel(); +} + +// XRNTuples +TEST(JITKernel, kHMax) { + TestXRNKernel(); + TestXRNKernel(); +} + +TEST(JITKernel, kHSum) { + TestXRNKernel(); + TestXRNKernel(); } // XYNTuples TEST(JITKernel, kVRelu) { - namespace jit = paddle::operators::jit; - TestXYNKernel(); - TestXYNKernel(); + TestXYNKernel(); + TestXYNKernel(); } TEST(JITKernel, kVIdentity) { - namespace jit = paddle::operators::jit; - TestXYNKernel(); - TestXYNKernel(); + TestXYNKernel(); + TestXYNKernel(); } TEST(JITKernel, kVSquare) { - namespace jit = paddle::operators::jit; - TestXYNKernel(); - TestXYNKernel(); + TestXYNKernel(); + TestXYNKernel(); } TEST(JITKernel, kVExp) { - namespace jit = paddle::operators::jit; - TestXYNKernel(); - TestXYNKernel(); + TestXYNKernel(); + TestXYNKernel(); } TEST(JITKernel, kVSigmoid) { - namespace jit = paddle::operators::jit; - TestXYNKernel(); - TestXYNKernel(); + TestXYNKernel(); + TestXYNKernel(); } TEST(JITKernel, kVTanh) { - namespace jit = paddle::operators::jit; - TestXYNKernel(); - TestXYNKernel(); + TestXYNKernel(); + TestXYNKernel(); } // LSTM TEST(JITKernel, kLSTMCtHt) { - namespace jit = paddle::operators::jit; - TestLSTMKernel(); - TestLSTMKernel(); + TestLSTMKernel(); + TestLSTMKernel(); } TEST(JITKernel, kLSTMC1H1) { - namespace jit = paddle::operators::jit; - TestLSTMKernel(); - TestLSTMKernel(); + TestLSTMKernel(); + TestLSTMKernel(); } // GRU TEST(JITKernel, kGRUH1) { - namespace jit = paddle::operators::jit; - TestGRUKernel(); - TestGRUKernel(); + TestGRUKernel(); + TestGRUKernel(); } TEST(JITKernel, kGRUHtPart1) { - namespace jit = paddle::operators::jit; - TestGRUKernel(); - TestGRUKernel(); + TestGRUKernel(); + TestGRUKernel(); } TEST(JITKernel, kGRUHtPart2) { - namespace jit = paddle::operators::jit; - TestGRUKernel(); - TestGRUKernel(); + TestGRUKernel(); + TestGRUKernel(); } TEST(JITKernel, kSeqPool) { - namespace jit = paddle::operators::jit; - TestSeqPoolKernel(); - TestSeqPoolKernel(); + TestSeqPoolKernel(); + TestSeqPoolKernel(); } TEST(JITKernel, kMatMul) { - namespace jit = paddle::operators::jit; - TestMatMulKernel(); - TestMatMulKernel(); + TestMatMulKernel(); + TestMatMulKernel(); +} + +TEST(JITKernel, kSoftmax) { + TestSoftmaxKernel(); + TestSoftmaxKernel(); } TEST(JITKernel, kNCHW16CMulNC) { - namespace jit = paddle::operators::jit; - TestNCHW16CMulNCKernel(); - TestNCHW16CMulNCKernel(); + TestNCHW16CMulNCKernel(); + TestNCHW16CMulNCKernel(); } // TODO(yihua/TJ): add crf decoding and layer norm unit tests diff --git a/paddle/fluid/platform/dynload/mklml.h b/paddle/fluid/platform/dynload/mklml.h index d0619293ac..a260cda491 100644 --- a/paddle/fluid/platform/dynload/mklml.h +++ b/paddle/fluid/platform/dynload/mklml.h @@ -70,6 +70,8 @@ extern void* mklml_dso_handle; __macro(cblas_ddot); \ __macro(cblas_sasum); \ __macro(cblas_dasum); \ + __macro(cblas_isamax); \ + __macro(cblas_idamax); \ __macro(cblas_sscal); \ __macro(cblas_dscal); \ __macro(vsAdd); \ From 50945685f2b8f896acec25efb966a0b865ca6ea8 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 28 Jan 2019 09:04:12 +0000 Subject: [PATCH 41/46] add hmax, hsum jitcode test=develop --- paddle/fluid/operators/jit/gen/CMakeLists.txt | 2 + paddle/fluid/operators/jit/gen/hopv.cc | 103 ++++++++++++++++++ paddle/fluid/operators/jit/gen/hopv.h | 90 +++++++++++++++ paddle/fluid/operators/jit/gen/jitcode.h | 1 + paddle/fluid/operators/jit/test.cc | 5 +- 5 files changed, 200 insertions(+), 1 deletion(-) create mode 100644 paddle/fluid/operators/jit/gen/hopv.cc create mode 100644 paddle/fluid/operators/jit/gen/hopv.h diff --git a/paddle/fluid/operators/jit/gen/CMakeLists.txt b/paddle/fluid/operators/jit/gen/CMakeLists.txt index 40310c2d2b..2ea8f927e1 100644 --- a/paddle/fluid/operators/jit/gen/CMakeLists.txt +++ b/paddle/fluid/operators/jit/gen/CMakeLists.txt @@ -28,3 +28,5 @@ USE_JITKERNEL_GEN(kGRUHtPart1) USE_JITKERNEL_GEN(kGRUHtPart2) USE_JITKERNEL_GEN(kNCHW16CMulNC) USE_JITKERNEL_GEN(kSeqPool) +USE_JITKERNEL_GEN(kHMax) +USE_JITKERNEL_GEN(kHSum) diff --git a/paddle/fluid/operators/jit/gen/hopv.cc b/paddle/fluid/operators/jit/gen/hopv.cc new file mode 100644 index 0000000000..e788401719 --- /dev/null +++ b/paddle/fluid/operators/jit/gen/hopv.cc @@ -0,0 +1,103 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#include "paddle/fluid/operators/jit/gen/hopv.h" +#include "paddle/fluid/operators/jit/registry.h" +#include "paddle/fluid/platform/cpu_info.h" + +namespace paddle { +namespace operators { +namespace jit { +namespace gen { + +void HOPVJitCode::genCode() { + const int num_blocks = num_ / YMM_FLOAT_BLOCK; + int offset = 0; + + if (num_blocks > 0) { + // load one firstly + vmovups(ymm_tmp, ptr[param_src]); + offset += sizeof(float) * YMM_FLOAT_BLOCK; + for (int i = 1; i < num_blocks; ++i) { + vmovups(ymm_src, ptr[param_src + offset]); + process(ymm_tmp, ymm_src, ymm_tmp); + offset += sizeof(float) * YMM_FLOAT_BLOCK; + } + vextractf128(xmm_dst, ymm_tmp, 1); + process(xmm_dst, xmm_dst, xmm_tmp); + } else { + if (type_ == operand_type::MAX) { + vbroadcastss(ymm_dst, ptr[param_src]); + } else if (type_ == operand_type::ADD) { + vxorps(ymm_dst, ymm_dst, ymm_dst); + } + } + + int rest = num_ % YMM_FLOAT_BLOCK; + if (rest >= 4) { + vmovups(xmm_src, ptr[param_src + offset]); + offset += sizeof(float) * 4; + rest -= 4; + process(xmm_dst, xmm_dst, xmm_src); + } + + vpermilps(xmm_tmp, xmm_dst, 16 + 8 + 3); + process(xmm_dst, xmm_dst, xmm_tmp); + + if (rest >= 2) { + vmovq(xmm_src, ptr[param_src + offset]); + offset += sizeof(float) * 2; + rest -= 2; + process(xmm_dst, xmm_dst, xmm_src); + } + + vpermilps(xmm_tmp, xmm_dst, 1); + process(xmm_dst, xmm_dst, xmm_tmp); + + if (rest >= 1) { + vmovss(xmm_src, ptr[param_src + offset]); + process(xmm_dst, xmm_dst, xmm_src); + } + vmovss(ptr[param_dst], xmm_dst); + ret(); +} + +#define DECLARE_HOP_CREATOR(name) \ + class name##Creator : public JitCodeCreator { \ + public: \ + bool UseMe(const int& attr) const override { \ + return platform::MayIUse(platform::avx); \ + } \ + size_t CodeSize(const int& d) const override { \ + return 96 + d / YMM_FLOAT_BLOCK * 4 * 8; \ + } \ + std::unique_ptr CreateJitCode(const int& attr) const override { \ + return make_unique(attr, CodeSize(attr)); \ + } \ + } + +DECLARE_HOP_CREATOR(HMax); +DECLARE_HOP_CREATOR(HSum); + +#undef DECLARE_HOP_CREATOR + +} // namespace gen +} // namespace jit +} // namespace operators +} // namespace paddle + +namespace gen = paddle::operators::jit::gen; + +REGISTER_JITKERNEL_GEN(kHMax, gen::HMaxCreator); +REGISTER_JITKERNEL_GEN(kHSum, gen::HSumCreator); diff --git a/paddle/fluid/operators/jit/gen/hopv.h b/paddle/fluid/operators/jit/gen/hopv.h new file mode 100644 index 0000000000..d3bc94b63d --- /dev/null +++ b/paddle/fluid/operators/jit/gen/hopv.h @@ -0,0 +1,90 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#pragma once + +#include +#include "glog/logging.h" +#include "paddle/fluid/operators/jit/gen/jitcode.h" + +namespace paddle { +namespace operators { +namespace jit { +namespace gen { + +// horizontal operand vector +class HOPVJitCode : public JitCode { + public: + explicit HOPVJitCode(int d, operand_type type, size_t code_size = 256 * 1024, + void* code_ptr = nullptr) + : JitCode(code_size, code_ptr), num_(d), type_(type) { + if (!(type_ == operand_type::MAX || type_ == operand_type::ADD)) { + LOG(FATAL) << "Do not support this operand type: " << type_; + } + this->genCode(); + } + + virtual const char* name() const { + std::string base = "VXXJitCode"; + if (type_ == operand_type::MAX) { + base += "_MAX"; + } else { + base += "_SUM"; + } + return base.c_str(); + } + void genCode() override; + + protected: + template + void process(JMM& dst, JMM& src1, JMM& src2) { // NOLINT + if (type_ == operand_type::MAX) { + vmaxps(dst, src1, src2); + } else if (type_ == operand_type::ADD) { + vaddps(dst, src1, src2); + } + } + + private: + int num_; + operand_type type_; + reg64_t param_src{abi_param1}; + reg64_t param_dst{abi_param2}; + reg64_t param_attr{abi_param3}; + + ymm_t ymm_tmp = ymm_t(0); + ymm_t ymm_src = ymm_t(1); + ymm_t ymm_dst = ymm_t(2); + + xmm_t xmm_tmp = xmm_t(0); + xmm_t xmm_src = xmm_t(1); + xmm_t xmm_dst = xmm_t(2); +}; + +#define DECLARE_HOP_JITCODE(name, op_type) \ + class name##JitCode : public HOPVJitCode { \ + public: \ + explicit name##JitCode(int d, size_t code_size, void* code_ptr = nullptr) \ + : HOPVJitCode(d, op_type, code_size, code_ptr) {} \ + }; + +DECLARE_HOP_JITCODE(HMax, operand_type::MAX); +DECLARE_HOP_JITCODE(HSum, operand_type::ADD); + +#undef DECLARE_HOP_JITCODE + +} // namespace gen +} // namespace jit +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/jit/gen/jitcode.h b/paddle/fluid/operators/jit/gen/jitcode.h index f63d40ad5a..c388109604 100644 --- a/paddle/fluid/operators/jit/gen/jitcode.h +++ b/paddle/fluid/operators/jit/gen/jitcode.h @@ -47,6 +47,7 @@ using Label = Xbyak::Label; typedef enum { MUL = 0, + MAX, ADD, SUB, RELU, diff --git a/paddle/fluid/operators/jit/test.cc b/paddle/fluid/operators/jit/test.cc index 2578b282ab..cc46155289 100644 --- a/paddle/fluid/operators/jit/test.cc +++ b/paddle/fluid/operators/jit/test.cc @@ -383,16 +383,19 @@ void TestAXYNKernel() { template void TestXRNKernel() { VLOG(10) << "===== Test JITKernel " << jit::to_string(KT); + auto last_acc = acc; + acc = 1e-4; for (int d : TestSizes()) { auto ref = jit::GetRefer>(); EXPECT_TRUE(ref != nullptr); std::vector x(d); - RandomVec(d, x.data()); + RandomVec(d, x.data(), -2.f, 2.f); T ref_res; ref(x.data(), &ref_res, d); TestAllImpls, PlaceType, std::vector, T>(d, x, ref_res); } + acc = last_acc; } template From 7383eefd2db74a593563ea35bc5aeb831e557a32 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 28 Jan 2019 13:30:06 +0000 Subject: [PATCH 42/46] add softmax mix and mkl code test=develop --- .../operators/jit/more/mix/CMakeLists.txt | 1 + paddle/fluid/operators/jit/more/mix/mix.cc | 24 +++++++++++++++++ paddle/fluid/operators/jit/more/mix/mix.h | 4 +++ .../operators/jit/more/mkl/CMakeLists.txt | 1 + paddle/fluid/operators/jit/more/mkl/mkl.cc | 17 ++++++++++++ paddle/fluid/operators/jit/more/mkl/mkl.h | 27 +++++++++++++++++++ 6 files changed, 74 insertions(+) diff --git a/paddle/fluid/operators/jit/more/mix/CMakeLists.txt b/paddle/fluid/operators/jit/more/mix/CMakeLists.txt index e05f204b1e..dd039d2915 100644 --- a/paddle/fluid/operators/jit/more/mix/CMakeLists.txt +++ b/paddle/fluid/operators/jit/more/mix/CMakeLists.txt @@ -12,3 +12,4 @@ USE_JITKERNEL_MORE(kLSTMC1H1, mix) USE_JITKERNEL_MORE(kGRUH1, mix) USE_JITKERNEL_MORE(kGRUHtPart1, mix) USE_JITKERNEL_MORE(kGRUHtPart2, mix) +USE_JITKERNEL_MORE(kSoftmax, mix) diff --git a/paddle/fluid/operators/jit/more/mix/mix.cc b/paddle/fluid/operators/jit/more/mix/mix.cc index df0a85256b..2a75eb23cd 100644 --- a/paddle/fluid/operators/jit/more/mix/mix.cc +++ b/paddle/fluid/operators/jit/more/mix/mix.cc @@ -48,6 +48,27 @@ void VTanh(const T* x, T* y, int n) { compute_addbias(&b, y, y, n); } +void Softmax(const T* x, T* y, int n, int bs) { + auto compute_hmax = Get, platform::CPUPlace>(n); + auto compute_hsum = Get, platform::CPUPlace>(n); + auto compute_vscal = Get, platform::CPUPlace>(n); + auto compute_vaddbias = Get, platform::CPUPlace>(n); + auto compute_vexp = + Get, platform::CPUPlace>(n); + for (int i = 0; i < bs; ++i) { + T scalar; + compute_hmax(x, &scalar, n); + scalar = static_cast(0) - scalar; + compute_vaddbias(&scalar, x, y, n); // x - max + compute_vexp(y, y, n); + compute_hsum(y, &scalar, n); + scalar = static_cast(1) / scalar; + compute_vscal(&scalar, y, y, n); + x += n; + y += n; + } +} + void (*getActFunc(KernelType type, int d))(const T*, T*, int) { // NOLINT if (type == kVSigmoid) { return Get, platform::CPUPlace>(d); @@ -184,6 +205,8 @@ bool VSigmoidKernel::UseMe(const int& d) const { return true; } bool VTanhKernel::UseMe(const int& d) const { return true; } +bool SoftmaxKernel::UseMe(const int& d) const { return true; } + bool LSTMCtHtKernel::UseMe(const lstm_attr_t& attr) const { return true; } bool LSTMC1H1Kernel::UseMe(const lstm_attr_t& attr) const { return true; } @@ -207,6 +230,7 @@ namespace mix = paddle::operators::jit::more::mix; REGISTER_MORE_KERNEL(kVSigmoid, VSigmoid); REGISTER_MORE_KERNEL(kVTanh, VTanh); +REGISTER_MORE_KERNEL(kSoftmax, Softmax); REGISTER_MORE_KERNEL(kLSTMCtHt, LSTMCtHt); REGISTER_MORE_KERNEL(kLSTMC1H1, LSTMC1H1); REGISTER_MORE_KERNEL(kGRUH1, GRUH1); diff --git a/paddle/fluid/operators/jit/more/mix/mix.h b/paddle/fluid/operators/jit/more/mix/mix.h index a70ecdf934..d64af19219 100644 --- a/paddle/fluid/operators/jit/more/mix/mix.h +++ b/paddle/fluid/operators/jit/more/mix/mix.h @@ -26,6 +26,7 @@ using T = float; void VSigmoid(const T* x, T* y, int n); void VTanh(const T* x, T* y, int n); +void Softmax(const T* x, T* y, int n, int bs); void LSTMCtHt(lstm_t* step, const lstm_attr_t* attr); void LSTMC1H1(lstm_t* step, const lstm_attr_t* attr); @@ -45,6 +46,9 @@ void GRUHtPart2(gru_t* step, const gru_attr_t* attr); DECLARE_MORE_KERNEL(VSigmoid, XYNTuples); DECLARE_MORE_KERNEL(VTanh, XYNTuples); +// XRN +DECLARE_MORE_KERNEL(Softmax, SoftmaxTuples); + DECLARE_MORE_KERNEL(LSTMCtHt, LSTMTuples); DECLARE_MORE_KERNEL(LSTMC1H1, LSTMTuples); diff --git a/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt b/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt index 667c6dfad6..f9e5aea32e 100644 --- a/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt +++ b/paddle/fluid/operators/jit/more/mkl/CMakeLists.txt @@ -12,3 +12,4 @@ USE_JITKERNEL_MORE(kVSquare, mkl) USE_JITKERNEL_MORE(kVSigmoid, mkl) USE_JITKERNEL_MORE(kVTanh, mkl) USE_JITKERNEL_MORE(kSeqPool, mkl) +USE_JITKERNEL_MORE(kSoftmax, mkl) diff --git a/paddle/fluid/operators/jit/more/mkl/mkl.cc b/paddle/fluid/operators/jit/more/mkl/mkl.cc index fccdc68f5e..b13b8638e2 100644 --- a/paddle/fluid/operators/jit/more/mkl/mkl.cc +++ b/paddle/fluid/operators/jit/more/mkl/mkl.cc @@ -116,6 +116,16 @@ void VAXPY(double a, const double* x, double* y, int n) { platform::dynload::cblas_daxpy(n, a, x, 1, y, 1); } +template <> +void ASum(const float* x, float* res, int n) { + res[0] = platform::dynload::cblas_sasum(n, x, 1); +} + +template <> +void ASum(const double* x, double* res, int n) { + res[0] = platform::dynload::cblas_dasum(n, x, 1); +} + // TODO(TJ): tuning me carefully on AVX, AVX2 and AVX512 template <> bool MatMulKernel::UseMe(const int& d) const { @@ -167,6 +177,11 @@ bool SeqPoolKernel::UseMe(const seq_pool_attr_t& attr) const { return true; } +template <> +bool SoftmaxKernel::UseMe(const int& d) const { + return true; +} + #define AWALYS_USE_ME_WITH_DOUBLE(func) \ template <> \ bool func##Kernel::UseMe(const int& d) const { \ @@ -181,6 +196,7 @@ AWALYS_USE_ME_WITH_DOUBLE(VExp); AWALYS_USE_ME_WITH_DOUBLE(VSigmoid); AWALYS_USE_ME_WITH_DOUBLE(VTanh); AWALYS_USE_ME_WITH_DOUBLE(VSquare); +AWALYS_USE_ME_WITH_DOUBLE(Softmax); #undef AWALYS_USE_ME_WITH_DOUBLE } // namespace mkl @@ -204,5 +220,6 @@ REGISTER_MKL_KERNEL(kVSquare, VSquare); REGISTER_MKL_KERNEL(kVSigmoid, VSigmoid); REGISTER_MKL_KERNEL(kVTanh, VTanh); REGISTER_MKL_KERNEL(kSeqPool, SeqPool); +REGISTER_MKL_KERNEL(kSoftmax, Softmax); #undef REGISTER_MKL_KERNEL diff --git a/paddle/fluid/operators/jit/more/mkl/mkl.h b/paddle/fluid/operators/jit/more/mkl/mkl.h index a27196fa19..6b95b9c872 100644 --- a/paddle/fluid/operators/jit/more/mkl/mkl.h +++ b/paddle/fluid/operators/jit/more/mkl/mkl.h @@ -16,6 +16,7 @@ #include #include +#include #include "paddle/fluid/operators/jit/kernel_base.h" namespace paddle { @@ -90,6 +91,30 @@ void SeqPool(const T* x, T* y, const seq_pool_attr_t* attr) { } } +template +void ASum(const T* x, T* res, int n); + +template +void Softmax(const T* x, T* y, int n, int bs) { + std::vector entities(bs); + for (int i = 0; i < bs; ++i) { + entities[i] = x[i * n]; + for (int c = 1; c < n; ++c) { + entities[i] = x[i * n + c] > entities[i] ? x[i * n + c] : entities[i]; + } + for (int c = 0; c < n; ++c) { + y[i * n + c] = x[i * n + c] - entities[i]; + } + } + VExp(y, y, n * bs); + for (int i = 0; i < bs; ++i) { + T sum; + ASum(&y[i * n], &sum, n); + sum = static_cast(1) / sum; + VScal(&sum, &y[i * n], &y[i * n], n); + } +} + #define DECLARE_MKL_KERNEL(name, tuples) \ template \ class name##Kernel : public KernelMore> { \ @@ -117,6 +142,8 @@ DECLARE_MKL_KERNEL(VSquare, XYNTuples); DECLARE_MKL_KERNEL(SeqPool, SeqPoolTuples); +DECLARE_MKL_KERNEL(Softmax, SoftmaxTuples); + #undef DECLARE_MKL_KERNEL } // namespace mkl From d59f7335515ac769d8f4d288b7eb32b1669490b2 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 28 Jan 2019 18:06:56 +0000 Subject: [PATCH 43/46] refine softmax and use with cache test=develop --- paddle/fluid/operators/jit/benchmark.cc | 3 ++ paddle/fluid/operators/jit/gen/act.cc | 28 ++++++++++-- paddle/fluid/operators/jit/helper.h | 22 ++++++++++ paddle/fluid/operators/jit/more/mix/mix.cc | 50 +++++++++++++++++++--- paddle/fluid/operators/jit/more/mkl/mkl.cc | 3 +- paddle/fluid/operators/math/CMakeLists.txt | 2 +- paddle/fluid/operators/math/softmax_impl.h | 28 +++--------- 7 files changed, 102 insertions(+), 34 deletions(-) diff --git a/paddle/fluid/operators/jit/benchmark.cc b/paddle/fluid/operators/jit/benchmark.cc index 383532d8d2..5c5a61f640 100644 --- a/paddle/fluid/operators/jit/benchmark.cc +++ b/paddle/fluid/operators/jit/benchmark.cc @@ -187,6 +187,9 @@ void BenchAXYNKernel() { RandomVec(d, x_data); BenchAllImpls, PlaceType>(d, &a, x.data(), y_data, d); + // test inplace + BenchAllImpls, PlaceType>(d, &a, x.data(), x_data, + d); } } diff --git a/paddle/fluid/operators/jit/gen/act.cc b/paddle/fluid/operators/jit/gen/act.cc index a2a5661b93..e7a7375879 100644 --- a/paddle/fluid/operators/jit/gen/act.cc +++ b/paddle/fluid/operators/jit/gen/act.cc @@ -81,9 +81,7 @@ void VActJitCode::genCode() { #define DECLARE_ACT_CREATOR(name) \ class name##Creator : public JitCodeCreator { \ public: \ - bool UseMe(const int& attr) const override { \ - return platform::MayIUse(platform::avx); \ - } \ + bool UseMe(const int& attr) const override; \ size_t CodeSize(const int& d) const override; \ std::unique_ptr CreateJitCode(const int& attr) const override { \ return make_unique(attr, CodeSize(attr)); \ @@ -98,6 +96,30 @@ DECLARE_ACT_CREATOR(VSigmoid); DECLARE_ACT_CREATOR(VTanh); // TODO(TJ): tuning use me +bool VReluCreator::UseMe(const int& d) const { + return platform::MayIUse(platform::avx); +} + +bool VSquareCreator::UseMe(const int& d) const { + return platform::MayIUse(platform::avx); +} + +bool VIdentityCreator::UseMe(const int& d) const { + return platform::MayIUse(platform::avx); +} + +bool VExpCreator::UseMe(const int& d) const { + return platform::MayIUse(platform::avx) && d < 32; +} + +bool VSigmoidCreator::UseMe(const int& d) const { + return platform::MayIUse(platform::avx); +} + +bool VTanhCreator::UseMe(const int& d) const { + return platform::MayIUse(platform::avx); +} + size_t VReluCreator::CodeSize(const int& d) const { return 96 /* init size */ + (d / YMM_FLOAT_BLOCK + 3) * 4 /* instructions */ * diff --git a/paddle/fluid/operators/jit/helper.h b/paddle/fluid/operators/jit/helper.h index fbf34fc4b3..7bdc45779b 100644 --- a/paddle/fluid/operators/jit/helper.h +++ b/paddle/fluid/operators/jit/helper.h @@ -118,6 +118,28 @@ typename KernelTuples::func_type Get( return GetRefer(); } +template +class KernelFuncsCache { + public: + KernelFuncsCache() = default; + static KernelFuncsCache& Instance() { + static thread_local KernelFuncsCache g_func_cache; + return g_func_cache; + } + + bool Has(int key) const { return funcs_.find(key) != funcs_.end(); } + + typename KernelTuples::func_type At(int key) { return funcs_.at(key); } + + void Insert(int key, typename KernelTuples::func_type func) { + funcs_.emplace(key, func); + } + + private: + std::unordered_map funcs_; + DISABLE_COPY_AND_ASSIGN(KernelFuncsCache); +}; + const char* to_string(KernelType kt); const char* to_string(SeqPoolType kt); diff --git a/paddle/fluid/operators/jit/more/mix/mix.cc b/paddle/fluid/operators/jit/more/mix/mix.cc index 2a75eb23cd..0f42ac158c 100644 --- a/paddle/fluid/operators/jit/more/mix/mix.cc +++ b/paddle/fluid/operators/jit/more/mix/mix.cc @@ -49,12 +49,50 @@ void VTanh(const T* x, T* y, int n) { } void Softmax(const T* x, T* y, int n, int bs) { - auto compute_hmax = Get, platform::CPUPlace>(n); - auto compute_hsum = Get, platform::CPUPlace>(n); - auto compute_vscal = Get, platform::CPUPlace>(n); - auto compute_vaddbias = Get, platform::CPUPlace>(n); - auto compute_vexp = - Get, platform::CPUPlace>(n); + typename XRNTuples::func_type compute_hmax{nullptr}; + typename XRNTuples::func_type compute_hsum{nullptr}; + typename AXYNTuples::func_type compute_vscal{nullptr}; + typename AXYNTuples::func_type compute_vaddbias{nullptr}; + typename XYNTuples::func_type compute_vexp{nullptr}; + + if (!KernelFuncsCache>::Instance().Has(n)) { + compute_hmax = Get, platform::CPUPlace>(n); + KernelFuncsCache>::Instance().Insert(n, compute_hmax); + } else { + compute_hmax = KernelFuncsCache>::Instance().At(n); + } + + if (!KernelFuncsCache>::Instance().Has(n)) { + compute_hsum = Get, platform::CPUPlace>(n); + KernelFuncsCache>::Instance().Insert(n, compute_hsum); + } else { + compute_hsum = KernelFuncsCache>::Instance().At(n); + } + + if (!KernelFuncsCache>::Instance().Has(n)) { + compute_vscal = Get, platform::CPUPlace>(n); + KernelFuncsCache>::Instance().Insert(n, + compute_vscal); + } else { + compute_vscal = KernelFuncsCache>::Instance().At(n); + } + + if (!KernelFuncsCache>::Instance().Has(n)) { + compute_vaddbias = Get, platform::CPUPlace>(n); + KernelFuncsCache>::Instance().Insert( + n, compute_vaddbias); + } else { + compute_vaddbias = + KernelFuncsCache>::Instance().At(n); + } + + if (!KernelFuncsCache>::Instance().Has(n)) { + compute_vexp = Get, platform::CPUPlace>(n); + KernelFuncsCache>::Instance().Insert(n, compute_vexp); + } else { + compute_vexp = KernelFuncsCache>::Instance().At(n); + } + for (int i = 0; i < bs; ++i) { T scalar; compute_hmax(x, &scalar, n); diff --git a/paddle/fluid/operators/jit/more/mkl/mkl.cc b/paddle/fluid/operators/jit/more/mkl/mkl.cc index b13b8638e2..28a37198da 100644 --- a/paddle/fluid/operators/jit/more/mkl/mkl.cc +++ b/paddle/fluid/operators/jit/more/mkl/mkl.cc @@ -179,7 +179,8 @@ bool SeqPoolKernel::UseMe(const seq_pool_attr_t& attr) const { template <> bool SoftmaxKernel::UseMe(const int& d) const { - return true; + // tuned on avx2 + return platform::MayIUse(platform::avx) && d < 60; } #define AWALYS_USE_ME_WITH_DOUBLE(func) \ diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index 6bbb7155dd..e20524012a 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -53,7 +53,7 @@ math_library(sequence2batch) math_library(sequence_padding) math_library(sequence_pooling DEPS math_function jit_kernel_helper) math_library(sequence_scale) -math_library(softmax DEPS math_function) +math_library(softmax DEPS math_function jit_kernel_helper) math_library(beam_search DEPS math_function) math_library(matrix_bit_code) diff --git a/paddle/fluid/operators/math/softmax_impl.h b/paddle/fluid/operators/math/softmax_impl.h index 1d9d98b106..1ff9ff684f 100644 --- a/paddle/fluid/operators/math/softmax_impl.h +++ b/paddle/fluid/operators/math/softmax_impl.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/jit/kernels.h" -#include "paddle/fluid/operators/math/blas.h" namespace paddle { namespace operators { namespace math { @@ -81,28 +81,10 @@ class SoftmaxFunctor> { const int kBatchDim = 0; const int kClassDim = 1; // 2D data. Batch x C - const int batch_size = in_dims[kBatchDim]; - const int num_classes = in_dims[kClassDim]; - std::vector entities(batch_size); - auto blas = math::GetBlas(context); - for (int n = 0; n < batch_size; ++n) { - entities[n] = in_data[n * num_classes]; - for (int c = 1; c < num_classes; ++c) { - entities[n] = in_data[n * num_classes + c] > entities[n] - ? in_data[n * num_classes + c] - : entities[n]; - } - for (int c = 0; c < num_classes; ++c) { - out_data[n * num_classes + c] = - in_data[n * num_classes + c] - entities[n]; - } - } - - blas.VEXP(num_classes * batch_size, out_data, out_data); - for (int n = 0; n < batch_size; ++n) { - auto sum = blas.ASUM(num_classes, &out_data[n * num_classes], 1); - blas.SCAL(num_classes, 1.0f / sum, &out_data[n * num_classes]); - } + auto compute_softmax = + jit::Get, platform::CPUPlace>( + in_dims[kClassDim]); + compute_softmax(in_data, out_data, in_dims[kClassDim], in_dims[kBatchDim]); } }; From ab4715840d0da3cde6f024fd5268f4d55701bbba Mon Sep 17 00:00:00 2001 From: Wu Yi Date: Tue, 29 Jan 2019 09:25:16 +0800 Subject: [PATCH 44/46] fix default create_parameter dtype maching initializers (#15521) * fix default create_parameter dtype maching initializers test=develop * update type check test=develop * update test=develop --- python/paddle/fluid/layer_helper.py | 11 +++++++++++ python/paddle/fluid/tests/unittests/test_layers.py | 3 ++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index 972c51938f..a172141b3a 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -300,6 +300,17 @@ class LayerHelper(object): attr.name = unique_name.generate(".".join([self.name, suffix])) if default_initializer is None and attr.initializer is None: + if isinstance(dtype, core.VarDesc.VarType): + if dtype != core.VarDesc.VarType.FP32 and \ + dtype != core.VarDesc.VarType.FP64: + raise TypeError( + "Can not create parameter with default initializer when dtype is not float type. Set default_initializer to fit the parameter dtype!" + ) + else: + if not (dtype.startswith("float") or dtype == "double"): + raise TypeError( + "Can not create parameter with default initializer when dtype is not float type. Set default_initializer to fit the parameter dtype!" + ) if is_bias: attr._set_default_bias_initializer() else: diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index c13f03e86f..e7bc1601a5 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -58,7 +58,8 @@ class TestBook(unittest.TestCase): def test_simple_conv2d(self): program = Program() with program_guard(program, startup_program=Program()): - images = layers.data(name='pixel', shape=[3, 48, 48], dtype='int32') + images = layers.data( + name='pixel', shape=[3, 48, 48], dtype='float32') layers.conv2d(input=images, num_filters=3, filter_size=[4, 4]) print(str(program)) From 655179089f79718b85ebb3fd9f9ea196773ea2f6 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Tue, 29 Jan 2019 11:36:20 +0800 Subject: [PATCH 45/46] AnalysisConfig remove contrib namespace (#15540) --- paddle/fluid/inference/analysis/argument.h | 2 +- paddle/fluid/inference/analysis/helper.h | 2 +- .../inference/analysis/ir_pass_manager.cc | 2 +- paddle/fluid/inference/api/analysis_config.cc | 50 +++++++++---------- .../fluid/inference/api/analysis_predictor.cc | 9 ++-- .../fluid/inference/api/analysis_predictor.h | 3 +- .../api/analysis_predictor_tester.cc | 1 - paddle/fluid/inference/api/api_impl_tester.cc | 2 +- .../api/demo_ci/trt_mobilenet_demo.cc | 2 +- .../fluid/inference/api/demo_ci/vis_demo.cc | 1 - .../inference/api/paddle_analysis_config.h | 6 --- paddle/fluid/inference/api/paddle_api.h | 2 +- .../inference/tensorrt/trt_int8_calibrator.h | 8 +-- .../tests/api/analyzer_dam_tester.cc | 11 ++-- .../tests/api/analyzer_lac_tester.cc | 2 - .../tests/api/analyzer_mm_dnn_tester.cc | 9 ++-- .../tests/api/analyzer_ner_tester.cc | 9 ++-- .../tests/api/analyzer_pyramid_dnn_tester.cc | 9 ++-- .../tests/api/analyzer_rnn1_tester.cc | 8 +-- .../tests/api/analyzer_vis_tester.cc | 1 - .../inference/tests/api/config_printer.h | 5 +- .../fluid/inference/tests/api/tester_helper.h | 6 +-- .../inference/tests/api/trt_models_tester.cc | 24 ++++----- paddle/fluid/pybind/inference_api.cc | 1 - 24 files changed, 78 insertions(+), 97 deletions(-) diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index a2546ead93..2f31b182af 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -132,7 +132,7 @@ struct Argument { DECL_ARGUMENT_FIELD(tensorrt_workspace_size, TensorRtWorkspaceSize, int); DECL_ARGUMENT_FIELD(tensorrt_min_subgraph_size, TensorRtMinSubgraphSize, int); DECL_ARGUMENT_FIELD(tensorrt_precision_mode, TensorRtPrecisionMode, - contrib::AnalysisConfig::Precision); + AnalysisConfig::Precision); // Memory optimized related. DECL_ARGUMENT_FIELD(enable_memory_optim, EnableMemoryOptim, bool); diff --git a/paddle/fluid/inference/analysis/helper.h b/paddle/fluid/inference/analysis/helper.h index 120f6ef27d..59107f2808 100644 --- a/paddle/fluid/inference/analysis/helper.h +++ b/paddle/fluid/inference/analysis/helper.h @@ -32,7 +32,7 @@ limitations under the License. */ #ifdef _WIN32 #include #include -#define GCC_ATTRIBUTE(attr__) ; +#define GCC_ATTRIBUTE(attr__) #define MKDIR(path) _mkdir(path) #else #include diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.cc b/paddle/fluid/inference/analysis/ir_pass_manager.cc index 99611ce84b..fe3c841186 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.cc +++ b/paddle/fluid/inference/analysis/ir_pass_manager.cc @@ -71,7 +71,7 @@ void IRPassManager::CreatePasses(Argument *argument, new framework::ProgramDesc *(&argument->main_program())); bool enable_int8 = argument->tensorrt_precision_mode() == - contrib::AnalysisConfig::Precision::kInt8; + AnalysisConfig::Precision::kInt8; pass->Set("enable_int8", new bool(enable_int8)); std::string model_opt_cache_dir = diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 8efd514bd8..eecab238a8 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -22,7 +22,7 @@ namespace paddle { -PassStrategy *contrib::AnalysisConfig::pass_builder() const { +PassStrategy *AnalysisConfig::pass_builder() const { if (!pass_builder_.get()) { if (use_gpu_) { LOG(INFO) << "Create GPU IR passes"; @@ -42,27 +42,27 @@ PassStrategy *contrib::AnalysisConfig::pass_builder() const { return pass_builder_.get(); } -contrib::AnalysisConfig::AnalysisConfig(const std::string &model_dir) { +AnalysisConfig::AnalysisConfig(const std::string &model_dir) { model_dir_ = model_dir; Update(); } -contrib::AnalysisConfig::AnalysisConfig(const std::string &prog_file, - const std::string ¶ms_file) { +AnalysisConfig::AnalysisConfig(const std::string &prog_file, + const std::string ¶ms_file) { prog_file_ = prog_file; params_file_ = params_file; Update(); } -void contrib::AnalysisConfig::SetModel(const std::string &prog_file_path, - const std::string ¶ms_file_path) { +void AnalysisConfig::SetModel(const std::string &prog_file_path, + const std::string ¶ms_file_path) { prog_file_ = prog_file_path; params_file_ = params_file_path; Update(); } -void contrib::AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb, - int device_id) { +void AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb, + int device_id) { #ifdef PADDLE_WITH_CUDA use_gpu_ = true; memory_pool_init_size_mb_ = memory_pool_init_size_mb; @@ -74,13 +74,13 @@ void contrib::AnalysisConfig::EnableUseGpu(uint64_t memory_pool_init_size_mb, Update(); } -void contrib::AnalysisConfig::DisableGpu() { +void AnalysisConfig::DisableGpu() { use_gpu_ = false; Update(); } -contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) { +AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { #define CP_MEMBER(member__) member__ = other.member__; // Model related. @@ -130,7 +130,7 @@ contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) { Update(); } -void contrib::AnalysisConfig::EnableMKLDNN() { +void AnalysisConfig::EnableMKLDNN() { #ifdef PADDLE_WITH_MKLDNN pass_builder()->EnableMKLDNN(); use_mkldnn_ = true; @@ -142,9 +142,9 @@ void contrib::AnalysisConfig::EnableMKLDNN() { Update(); } -void contrib::AnalysisConfig::EnableTensorRtEngine( +void AnalysisConfig::EnableTensorRtEngine( int workspace_size, int max_batch_size, int min_subgraph_size, - contrib::AnalysisConfig::Precision precision_mode) { + AnalysisConfig::Precision precision_mode) { #ifdef PADDLE_WITH_CUDA if (!use_gpu()) { LOG(ERROR) << "To use TensorRT engine, please call EnableGpu() first"; @@ -165,7 +165,7 @@ void contrib::AnalysisConfig::EnableTensorRtEngine( } // TODO(Superjomn) refactor this, buggy. -void contrib::AnalysisConfig::Update() { +void AnalysisConfig::Update() { auto info = SerializeInfoCache(); if (info == serialized_info_cache_) return; @@ -225,7 +225,7 @@ void contrib::AnalysisConfig::Update() { } } -std::string contrib::AnalysisConfig::SerializeInfoCache() { +std::string AnalysisConfig::SerializeInfoCache() { std::stringstream ss; ss << model_dir_; ss << prog_file_; @@ -260,14 +260,14 @@ std::string contrib::AnalysisConfig::SerializeInfoCache() { return ss.str(); } -void contrib::AnalysisConfig::SetCpuMathLibraryNumThreads( +void AnalysisConfig::SetCpuMathLibraryNumThreads( int cpu_math_library_num_threads) { cpu_math_library_num_threads_ = cpu_math_library_num_threads; Update(); } -float contrib::AnalysisConfig::fraction_of_gpu_memory_for_pool() const { +float AnalysisConfig::fraction_of_gpu_memory_for_pool() const { #ifdef PADDLE_WITH_CUDA // Get the GPU memory details and calculate the fraction of memory for the // GPU memory pool. @@ -282,8 +282,8 @@ float contrib::AnalysisConfig::fraction_of_gpu_memory_for_pool() const { #endif } -void contrib::AnalysisConfig::EnableMemoryOptim( - bool static_optim, bool force_update_static_cache) { +void AnalysisConfig::EnableMemoryOptim(bool static_optim, + bool force_update_static_cache) { enable_memory_optim_ = true; static_memory_optim_ = static_optim; static_memory_optim_force_update_ = force_update_static_cache; @@ -291,14 +291,14 @@ void contrib::AnalysisConfig::EnableMemoryOptim( Update(); } -bool contrib::AnalysisConfig::enable_memory_optim() const { +bool AnalysisConfig::enable_memory_optim() const { return enable_memory_optim_; } -void contrib::AnalysisConfig::SetModelBuffer(const char *prog_buffer, - size_t prog_buffer_size, - const char *param_buffer, - size_t param_buffer_size) { +void AnalysisConfig::SetModelBuffer(const char *prog_buffer, + size_t prog_buffer_size, + const char *param_buffer, + size_t param_buffer_size) { prog_file_ = std::string(prog_buffer, prog_buffer + prog_buffer_size); params_file_ = std::string(param_buffer, param_buffer + param_buffer_size); model_from_memory_ = true; @@ -306,7 +306,7 @@ void contrib::AnalysisConfig::SetModelBuffer(const char *prog_buffer, Update(); } -NativeConfig contrib::AnalysisConfig::ToNativeConfig() const { +NativeConfig AnalysisConfig::ToNativeConfig() const { NativeConfig config; config.model_dir = model_dir_; config.prog_file = prog_file_; diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 66374cb7f0..14d6ba8c56 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -47,7 +47,6 @@ DECLARE_bool(profile); namespace paddle { -using contrib::AnalysisConfig; using inference::Singleton; #if PADDLE_WITH_TENSORRT using inference::tensorrt::TRTInt8Calibrator; @@ -731,10 +730,10 @@ std::string AnalysisPredictor::GetSeriazlizedProgram() const { } template <> -std::unique_ptr CreatePaddlePredictor( - const contrib::AnalysisConfig &config) { - return CreatePaddlePredictor(config); +std::unique_ptr CreatePaddlePredictor( + const AnalysisConfig &config) { + return CreatePaddlePredictor( + config); } } // namespace paddle diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index fa1d0d596d..014df4ee8b 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -33,7 +33,6 @@ using inference::analysis::Argument; using inference::analysis::Analyzer; using framework::proto::ProgramDesc; using framework::NaiveExecutor; -using contrib::AnalysisConfig; /** \brief This predictor is based on the original native predictor with IR and * Analysis support. @@ -123,7 +122,7 @@ class AnalysisPredictor : public PaddlePredictor { #endif private: - contrib::AnalysisConfig config_; + AnalysisConfig config_; Argument argument_; std::unique_ptr executor_; platform::Place place_; diff --git a/paddle/fluid/inference/api/analysis_predictor_tester.cc b/paddle/fluid/inference/api/analysis_predictor_tester.cc index 20b61344da..6d11b46108 100644 --- a/paddle/fluid/inference/api/analysis_predictor_tester.cc +++ b/paddle/fluid/inference/api/analysis_predictor_tester.cc @@ -24,7 +24,6 @@ DEFINE_string(dirname, "", "dirname to tests."); namespace paddle { -using contrib::AnalysisConfig; TEST(AnalysisPredictor, analysis_off) { AnalysisConfig config; diff --git a/paddle/fluid/inference/api/api_impl_tester.cc b/paddle/fluid/inference/api/api_impl_tester.cc index 54895679ca..e82cb53bf0 100644 --- a/paddle/fluid/inference/api/api_impl_tester.cc +++ b/paddle/fluid/inference/api/api_impl_tester.cc @@ -295,7 +295,7 @@ TEST(inference_api_native, image_classification_gpu) { #endif TEST(PassBuilder, Delete) { - contrib::AnalysisConfig config; + AnalysisConfig config; config.DisableGpu(); config.pass_builder()->DeletePass("attention_lstm_fuse_pass"); const auto& passes = config.pass_builder()->AllPasses(); diff --git a/paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc b/paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc index 338a0cec16..f7da55c9ae 100644 --- a/paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc +++ b/paddle/fluid/inference/api/demo_ci/trt_mobilenet_demo.cc @@ -36,7 +36,7 @@ namespace demo { */ void Main() { std::unique_ptr predictor; - paddle::contrib::AnalysisConfig config; + paddle::AnalysisConfig config; config.EnableUseGpu(100, 0); config.SetModel(FLAGS_modeldir + "/__model__", FLAGS_modeldir + "/__params__"); diff --git a/paddle/fluid/inference/api/demo_ci/vis_demo.cc b/paddle/fluid/inference/api/demo_ci/vis_demo.cc index 5320992b7e..0d2c418c56 100644 --- a/paddle/fluid/inference/api/demo_ci/vis_demo.cc +++ b/paddle/fluid/inference/api/demo_ci/vis_demo.cc @@ -34,7 +34,6 @@ DEFINE_bool(use_gpu, false, "Whether use gpu."); namespace paddle { namespace demo { -using contrib::AnalysisConfig; /* * Use the native and analysis fluid engine to inference the demo. */ diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index 5b899b26d6..9d9ed6a39d 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -29,11 +29,6 @@ namespace paddle { class AnalysisPredictor; -// == -// -// ----------------------------------------------------------------------------------- -// NOTE: The following APIs are not mature yet, we are still working on them. -namespace contrib { // NOTE WIP, not stable yet. struct AnalysisConfig { @@ -260,5 +255,4 @@ struct AnalysisConfig { mutable std::unique_ptr pass_builder_; }; -} // namespace contrib } // namespace paddle diff --git a/paddle/fluid/inference/api/paddle_api.h b/paddle/fluid/inference/api/paddle_api.h index 4069832246..8ac8bc5291 100644 --- a/paddle/fluid/inference/api/paddle_api.h +++ b/paddle/fluid/inference/api/paddle_api.h @@ -221,7 +221,7 @@ class PaddlePredictor { virtual std::string GetSeriazlizedProgram() const { assert(false); // Force raise error. return "NotImplemented"; - }; + } /** The common configs for all the predictors. */ diff --git a/paddle/fluid/inference/tensorrt/trt_int8_calibrator.h b/paddle/fluid/inference/tensorrt/trt_int8_calibrator.h index 919f5d55f8..5815bc9a14 100644 --- a/paddle/fluid/inference/tensorrt/trt_int8_calibrator.h +++ b/paddle/fluid/inference/tensorrt/trt_int8_calibrator.h @@ -13,16 +13,16 @@ // limitations under the License. #pragma once + +#include +#include #include #include -#include +#include // NOLINT #include #include #include #include - -#include -#include #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/inference/tensorrt/engine.h" #include "paddle/fluid/platform/place.h" diff --git a/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc b/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc index e78ab942d1..735e4fb563 100644 --- a/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc @@ -19,7 +19,6 @@ DEFINE_int32(max_turn_num, 9, namespace paddle { namespace inference { -using contrib::AnalysisConfig; constexpr int32_t kMaxTurnLen = 50; @@ -165,7 +164,7 @@ void PrepareInputs(std::vector *input_slots, DataRecord *data, input_slots->push_back(std::move(response_mask_tensor)); } -void SetConfig(contrib::AnalysisConfig *cfg) { +void SetConfig(AnalysisConfig *cfg) { cfg->SetModel(FLAGS_infer_model + "/__model__", FLAGS_infer_model + "/param"); cfg->SwitchSpecifyInputNames(); cfg->SwitchIrOptim(true); @@ -187,7 +186,7 @@ void SetInput(std::vector> *inputs) { // Easy for profiling independently. void profile(bool use_mkldnn = false) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); if (use_mkldnn) { @@ -223,7 +222,7 @@ TEST(Analyzer_dam, profile_mkldnn) { profile(true /* use_mkldnn */); } // Check the fuse status TEST(Analyzer_dam, fuse_statis) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); int num_ops; @@ -256,7 +255,7 @@ void compare(bool use_mkldnn = false) { TEST(Analyzer_dam, compare_with_static_memory_optim) { // The small dam will core in CI, but works in local. if (FLAGS_max_turn_num == 9) { - contrib::AnalysisConfig cfg, cfg1; + AnalysisConfig cfg, cfg1; DataRecord data(FLAGS_infer_data, FLAGS_batch_size); std::vector> input_slots_all; @@ -282,7 +281,7 @@ TEST(Analyzer_dam, compare_with_static_memory_optim) { TEST(Analyzer_dam, compare_with_dynamic_memory_optim) { // The small dam will core in CI, but works in local. if (FLAGS_max_turn_num == 9) { - contrib::AnalysisConfig cfg, cfg1; + AnalysisConfig cfg, cfg1; DataRecord data(FLAGS_infer_data, FLAGS_batch_size); std::vector> input_slots_all; diff --git a/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc b/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc index b9666e01ad..347672eaae 100644 --- a/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_lac_tester.cc @@ -18,8 +18,6 @@ namespace paddle { namespace inference { namespace analysis { -using contrib::AnalysisConfig; - struct DataRecord { std::vector data; std::vector lod; diff --git a/paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc b/paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc index 529a0174c8..089f655c18 100644 --- a/paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_mm_dnn_tester.cc @@ -16,7 +16,6 @@ namespace paddle { namespace inference { -using contrib::AnalysisConfig; struct DataRecord { std::vector> query, title; @@ -75,7 +74,7 @@ void PrepareInputs(std::vector *input_slots, DataRecord *data, } } -void SetConfig(contrib::AnalysisConfig *cfg) { +void SetConfig(AnalysisConfig *cfg) { cfg->SetModel(FLAGS_infer_model); cfg->DisableGpu(); cfg->SwitchSpecifyInputNames(); @@ -95,7 +94,7 @@ void SetInput(std::vector> *inputs) { // Easy for profiling independently. void profile(bool use_mkldnn = false) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); std::vector outputs; @@ -130,7 +129,7 @@ TEST(Analyzer_MM_DNN, profile_mkldnn) { profile(true /* use_mkldnn */); } // Check the fuse status TEST(Analyzer_MM_DNN, fuse_statis) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); int num_ops; @@ -141,7 +140,7 @@ TEST(Analyzer_MM_DNN, fuse_statis) { // Compare result of NativeConfig and AnalysisConfig void compare(bool use_mkldnn = false) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); if (use_mkldnn) { diff --git a/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc b/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc index 6fef79dc46..a70aa7a6ac 100644 --- a/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc @@ -16,7 +16,6 @@ namespace paddle { namespace inference { -using contrib::AnalysisConfig; struct DataRecord { std::vector> word, mention; @@ -76,7 +75,7 @@ void PrepareInputs(std::vector *input_slots, DataRecord *data) { } } -void SetConfig(contrib::AnalysisConfig *cfg, bool memory_load = false) { +void SetConfig(AnalysisConfig *cfg, bool memory_load = false) { if (memory_load) { std::string buffer_prog, buffer_param; ReadBinaryFile(FLAGS_infer_model + "/__model__", &buffer_prog); @@ -105,7 +104,7 @@ void SetInput(std::vector> *inputs) { // Easy for profiling independently. void profile(bool memory_load = false) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg, memory_load); std::vector outputs; @@ -136,7 +135,7 @@ TEST(Analyzer_Chinese_ner, profile_memory_load) { // Check the fuse status TEST(Analyzer_Chinese_ner, fuse_statis) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); int num_ops; @@ -152,7 +151,7 @@ TEST(Analyzer_Chinese_ner, fuse_statis) { // Compare result of NativeConfig and AnalysisConfig TEST(Analyzer_Chinese_ner, compare) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); std::vector> input_slots_all; diff --git a/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc index ad2c46e48d..3f6c933f2b 100644 --- a/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc @@ -16,7 +16,6 @@ namespace paddle { namespace inference { -using contrib::AnalysisConfig; struct DataRecord { std::vector> query_basic, query_phrase, title_basic, @@ -103,7 +102,7 @@ void PrepareInputs(std::vector *input_slots, DataRecord *data, } } -void SetConfig(contrib::AnalysisConfig *cfg) { +void SetConfig(AnalysisConfig *cfg) { cfg->SetModel(FLAGS_infer_model); cfg->DisableGpu(); cfg->SwitchSpecifyInputNames(); @@ -123,7 +122,7 @@ void SetInput(std::vector> *inputs) { // Easy for profiling independently. TEST(Analyzer_Pyramid_DNN, profile) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); std::vector outputs; @@ -147,7 +146,7 @@ TEST(Analyzer_Pyramid_DNN, profile) { // Check the fuse status TEST(Analyzer_Pyramid_DNN, fuse_statis) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); int num_ops; @@ -158,7 +157,7 @@ TEST(Analyzer_Pyramid_DNN, fuse_statis) { // Compare result of NativeConfig and AnalysisConfig TEST(Analyzer_Pyramid_DNN, compare) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); std::vector> input_slots_all; diff --git a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc index 22e6366fb5..5ab8577050 100644 --- a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc @@ -223,7 +223,7 @@ void SetInput(std::vector> *inputs) { // Easy for profiling independently. TEST(Analyzer_rnn1, profile) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); cfg.DisableGpu(); cfg.SwitchIrDebug(); @@ -237,7 +237,7 @@ TEST(Analyzer_rnn1, profile) { // Check the fuse status TEST(Analyzer_rnn1, fuse_statis) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); int num_ops; @@ -254,7 +254,7 @@ TEST(Analyzer_rnn1, fuse_statis) { // Compare result of NativeConfig and AnalysisConfig TEST(Analyzer_rnn1, compare) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); std::vector> input_slots_all; @@ -276,7 +276,7 @@ TEST(Analyzer_rnn1, compare_determine) { // Test Multi-Thread. TEST(Analyzer_rnn1, multi_thread) { - contrib::AnalysisConfig cfg; + AnalysisConfig cfg; SetConfig(&cfg); std::vector outputs; diff --git a/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc b/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc index f3e75ffbb5..ca04c1365c 100644 --- a/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_vis_tester.cc @@ -20,7 +20,6 @@ limitations under the License. */ namespace paddle { namespace inference { namespace analysis { -using contrib::AnalysisConfig; struct Record { std::vector data; diff --git a/paddle/fluid/inference/tests/api/config_printer.h b/paddle/fluid/inference/tests/api/config_printer.h index ecc10bafd6..b0c23fbd53 100644 --- a/paddle/fluid/inference/tests/api/config_printer.h +++ b/paddle/fluid/inference/tests/api/config_printer.h @@ -58,9 +58,8 @@ std::ostream &operator<<(std::ostream &os, const NativeConfig &config) { return os; } -std::ostream &operator<<(std::ostream &os, - const contrib::AnalysisConfig &config) { - os << GenSpaces(num_spaces) << "contrib::AnalysisConfig {\n"; +std::ostream &operator<<(std::ostream &os, const AnalysisConfig &config) { + os << GenSpaces(num_spaces) << "AnalysisConfig {\n"; num_spaces++; os << config.ToNativeConfig(); if (!config.model_from_memory()) { diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index b1f7a3464a..c743354e0e 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -65,7 +65,7 @@ float Random(float low, float high) { void PrintConfig(const PaddlePredictor::Config *config, bool use_analysis) { const auto *analysis_config = - reinterpret_cast(config); + reinterpret_cast(config); if (use_analysis) { LOG(INFO) << *analysis_config; return; @@ -109,9 +109,9 @@ void CompareResult(const std::vector &outputs, std::unique_ptr CreateTestPredictor( const PaddlePredictor::Config *config, bool use_analysis = true) { const auto *analysis_config = - reinterpret_cast(config); + reinterpret_cast(config); if (use_analysis) { - return CreatePaddlePredictor(*analysis_config); + return CreatePaddlePredictor(*analysis_config); } auto native_config = analysis_config->ToNativeConfig(); return CreatePaddlePredictor(native_config); diff --git a/paddle/fluid/inference/tests/api/trt_models_tester.cc b/paddle/fluid/inference/tests/api/trt_models_tester.cc index db7109b750..d70b324a4a 100644 --- a/paddle/fluid/inference/tests/api/trt_models_tester.cc +++ b/paddle/fluid/inference/tests/api/trt_models_tester.cc @@ -42,9 +42,9 @@ void SetConfig(ConfigType* config, std::string model_dir, bool use_gpu, } template <> -void SetConfig(contrib::AnalysisConfig* config, - std::string model_dir, bool use_gpu, - bool use_tensorrt, int batch_size) { +void SetConfig(AnalysisConfig* config, std::string model_dir, + bool use_gpu, bool use_tensorrt, + int batch_size) { if (!FLAGS_prog_filename.empty() && !FLAGS_param_filename.empty()) { config->SetModel(model_dir + "/" + FLAGS_prog_filename, model_dir + "/" + FLAGS_param_filename); @@ -75,11 +75,11 @@ void profile(std::string model_dir, bool use_analysis, bool use_tensorrt) { std::vector outputs; if (use_analysis || use_tensorrt) { - contrib::AnalysisConfig config; + AnalysisConfig config; config.EnableUseGpu(100, 0); config.pass_builder()->TurnOnDebug(); - SetConfig(&config, model_dir, true, use_tensorrt, - FLAGS_batch_size); + SetConfig(&config, model_dir, true, use_tensorrt, + FLAGS_batch_size); TestPrediction(reinterpret_cast(&config), inputs_all, &outputs, FLAGS_num_threads, true); } else { @@ -99,18 +99,18 @@ void compare(std::string model_dir, bool use_tensorrt) { SetFakeImageInput(&inputs_all, model_dir, false, "__model__", ""); } - contrib::AnalysisConfig analysis_config; - SetConfig(&analysis_config, model_dir, true, - use_tensorrt, FLAGS_batch_size); + AnalysisConfig analysis_config; + SetConfig(&analysis_config, model_dir, true, use_tensorrt, + FLAGS_batch_size); CompareNativeAndAnalysis( reinterpret_cast(&analysis_config), inputs_all); } void compare_continuous_input(std::string model_dir, bool use_tensorrt) { - contrib::AnalysisConfig analysis_config; - SetConfig(&analysis_config, model_dir, true, - use_tensorrt, FLAGS_batch_size); + AnalysisConfig analysis_config; + SetConfig(&analysis_config, model_dir, true, use_tensorrt, + FLAGS_batch_size); auto config = reinterpret_cast(&analysis_config); auto native_pred = CreateTestPredictor(config, false); diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index e05667d2c7..39e47be606 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -33,7 +33,6 @@ using paddle::PaddlePredictor; using paddle::NativeConfig; using paddle::NativePaddlePredictor; using paddle::AnalysisPredictor; -using paddle::contrib::AnalysisConfig; static void BindPaddleDType(py::module *m); static void BindPaddleBuf(py::module *m); From 5504425eb32d1e2263e5bcf45fa2a3dc5ced0b3c Mon Sep 17 00:00:00 2001 From: luotao1 Date: Tue, 29 Jan 2019 12:09:46 +0800 Subject: [PATCH 46/46] fix compiler error, use len20 dataset for bert test=develop --- .../fluid/inference/tests/api/CMakeLists.txt | 8 +++--- .../tests/api/analyzer_bert_tester.cc | 28 ++++++++----------- .../tests/api/analyzer_rnn1_tester.cc | 1 - 3 files changed, 15 insertions(+), 22 deletions(-) diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index b0f7dcc0df..aa3da397ff 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -128,10 +128,10 @@ inference_analysis_api_test_with_fake_data(test_analyzer_resnet50 inference_analysis_api_test_with_fake_data(test_analyzer_mobilenet_depthwise_conv "${INFERENCE_DEMO_INSTALL_DIR}/mobilenet_depthwise_conv" analyzer_resnet50_tester.cc "mobilenet_model.tar.gz" SERIAL) -# bert -set(BERT_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/bert") -download_model_and_data(${BERT_INSTALL_DIR} "bert_model.tar.gz" "bert_data.txt.tar.gz") -inference_analysis_api_test(test_analyzer_bert ${BERT_INSTALL_DIR} analyzer_bert_tester.cc) +# bert, max_len=20 +set(BERT_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/bert20") +download_model_and_data(${BERT_INSTALL_DIR} "bert_model.tar.gz" "bert_data_len20.txt.tar.gz") +inference_analysis_api_test(test_analyzer_bert ${BERT_INSTALL_DIR} analyzer_bert_tester.cc SERIAL) # anakin if (WITH_ANAKIN AND WITH_MKL) # only needed in CI diff --git a/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc index 24cbd39ea0..f646fd6d91 100644 --- a/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_bert_tester.cc @@ -18,7 +18,6 @@ namespace paddle { namespace inference { using paddle::PaddleTensor; -using paddle::contrib::AnalysisConfig; template void GetValueFromStream(std::stringstream *ss, T *t) { @@ -158,12 +157,10 @@ bool LoadInputData(std::vector> *inputs) { return true; } -void SetConfig(contrib::AnalysisConfig *config) { - config->SetModel(FLAGS_infer_model); -} +void SetConfig(AnalysisConfig *config) { config->SetModel(FLAGS_infer_model); } void profile(bool use_mkldnn = false) { - contrib::AnalysisConfig config; + AnalysisConfig config; SetConfig(&config); if (use_mkldnn) { @@ -213,17 +210,14 @@ TEST(Analyzer_bert, compare_mkldnn) { compare(true /* use_mkldnn */); } #endif // Compare Deterministic result -// TODO(luotao): Since each unit-test on CI only have 10 minutes, cancel this to -// decrease the CI time. -// TEST(Analyzer_bert, compare_determine) { -// AnalysisConfig cfg; -// SetConfig(&cfg); -// -// std::vector> inputs; -// LoadInputData(&inputs); -// CompareDeterministic(reinterpret_cast(&cfg), -// inputs); -// } +TEST(Analyzer_bert, compare_determine) { + AnalysisConfig cfg; + SetConfig(&cfg); + + std::vector> inputs; + LoadInputData(&inputs); + CompareDeterministic(reinterpret_cast(&cfg), + inputs); +} } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc index 5ab8577050..c27c39f40a 100644 --- a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc @@ -20,7 +20,6 @@ namespace paddle { namespace inference { using namespace framework; // NOLINT -using namespace contrib; // NOLINT struct DataRecord { std::vector>> link_step_data_all;