From 593ad763cded0c75e9c300127720005c45343e4b Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 28 Sep 2018 14:55:06 +0800 Subject: [PATCH 01/53] refactor(op): polish generate_proposals_op Polish styles in generate_proposals_op. 1. inline lambda functions rathar than use std::function to save var. 2. add `static inline` to template functions .cc * Make them static to prevent generating symbols. * Make them inline to give compiler a hit inline them as possible. * Not if the function is not static, they cannot be inlined since the symbols should be exported. 3. add `static` to global functions in .cc * Make them static to prevent generating symbols. 4. Use Vector instead manually manange storage between devices. 5. Prefer to use platform::ForRange, so we can optimize `ForRange` by just changing `for_range.h` if it is needed. 6. Do not change shape of inputs test=develop --- .../detection/generate_proposals_op.cc | 194 +++++++++--------- .../detection/generate_proposals_op.cu | 168 ++++++++------- paddle/fluid/operators/gather.h | 6 +- 3 files changed, 190 insertions(+), 178 deletions(-) diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cc b/paddle/fluid/operators/detection/generate_proposals_op.cc index 818d58ea9e..e9f966b577 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cc +++ b/paddle/fluid/operators/detection/generate_proposals_op.cc @@ -12,10 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include +#include #include #include #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/var_type.h" +#include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/gather.h" #include "paddle/fluid/operators/math/math_function.h" @@ -25,21 +27,17 @@ namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; -struct AppendProposalsFunctor { - LoDTensor *out_; - int64_t offset_; - Tensor *to_add_; +static const double kBBoxClipDefault = std::log(1000.0 / 16.0); - AppendProposalsFunctor(LoDTensor *out, int64_t offset, Tensor *to_add) - : out_(out), offset_(offset), to_add_(to_add) {} - - template - void apply() const { - auto *out_data = out_->data(); - auto *to_add_data = to_add_->data(); - memcpy(out_data + offset_, to_add_data, to_add_->numel() * sizeof(T)); - } -}; +static void AppendProposals(Tensor *dst, int64_t offset, const Tensor &src) { + auto *out_data = dst->data(); + auto *to_add_data = src.data(); + size_t size_of_t = framework::SizeOfType(src.type()); + offset *= size_of_t; + std::memcpy( + reinterpret_cast(reinterpret_cast(out_data) + offset), + to_add_data, src.numel() * size_of_t); +} class GenerateProposalsOp : public framework::OperatorWithKernel { public: @@ -75,8 +73,9 @@ class GenerateProposalsOp : public framework::OperatorWithKernel { }; template -void BoxCoder(const platform::DeviceContext &ctx, Tensor *all_anchors, - Tensor *bbox_deltas, Tensor *variances, Tensor *proposals) { +static inline void BoxCoder(const platform::DeviceContext &ctx, + Tensor *all_anchors, Tensor *bbox_deltas, + Tensor *variances, Tensor *proposals) { T *proposals_data = proposals->mutable_data(ctx.GetPlace()); int64_t row = all_anchors->dims()[0]; @@ -108,11 +107,11 @@ void BoxCoder(const platform::DeviceContext &ctx, Tensor *all_anchors, anchor_center_y; bbox_width = std::exp(std::min(variances_data[i * len + 2] * bbox_deltas_data[i * len + 2], - std::log(1000.0 / 16.0))) * + kBBoxClipDefault)) * anchor_width; bbox_height = std::exp(std::min(variances_data[i * len + 3] * bbox_deltas_data[i * len + 3], - std::log(1000.0 / 16.0))) * + kBBoxClipDefault)) * anchor_height; } else { bbox_center_x = @@ -120,10 +119,10 @@ void BoxCoder(const platform::DeviceContext &ctx, Tensor *all_anchors, bbox_center_y = bbox_deltas_data[i * len + 1] * anchor_height + anchor_center_y; bbox_width = std::exp(std::min(bbox_deltas_data[i * len + 2], - std::log(1000.0 / 16.0))) * + kBBoxClipDefault)) * anchor_width; bbox_height = std::exp(std::min(bbox_deltas_data[i * len + 3], - std::log(1000.0 / 16.0))) * + kBBoxClipDefault)) * anchor_height; } @@ -136,30 +135,32 @@ void BoxCoder(const platform::DeviceContext &ctx, Tensor *all_anchors, } template -void ClipTiledBoxes(const platform::DeviceContext &ctx, const Tensor &im_info, - Tensor *boxes) { +static inline void ClipTiledBoxes(const platform::DeviceContext &ctx, + const Tensor &im_info, Tensor *boxes) { T *boxes_data = boxes->mutable_data(ctx.GetPlace()); const T *im_info_data = im_info.data(); + T zero(0); for (int64_t i = 0; i < boxes->numel(); ++i) { if (i % 4 == 0) { boxes_data[i] = - std::max(std::min(boxes_data[i], im_info_data[1] - 1), 0.0f); + std::max(std::min(boxes_data[i], im_info_data[1] - 1), zero); } else if (i % 4 == 1) { boxes_data[i] = - std::max(std::min(boxes_data[i], im_info_data[0] - 1), 0.0f); + std::max(std::min(boxes_data[i], im_info_data[0] - 1), zero); } else if (i % 4 == 2) { boxes_data[i] = - std::max(std::min(boxes_data[i], im_info_data[1] - 1), 0.0f); + std::max(std::min(boxes_data[i], im_info_data[1] - 1), zero); } else { boxes_data[i] = - std::max(std::min(boxes_data[i], im_info_data[0] - 1), 0.0f); + std::max(std::min(boxes_data[i], im_info_data[0] - 1), zero); } } } template -void FilterBoxes(const platform::DeviceContext &ctx, Tensor *boxes, - float min_size, const Tensor &im_info, Tensor *keep) { +static inline void FilterBoxes(const platform::DeviceContext &ctx, + Tensor *boxes, float min_size, + const Tensor &im_info, Tensor *keep) { const T *im_info_data = im_info.data(); T *boxes_data = boxes->mutable_data(ctx.GetPlace()); T im_scale = im_info_data[2]; @@ -185,24 +186,24 @@ void FilterBoxes(const platform::DeviceContext &ctx, Tensor *boxes, keep->Resize({keep_len}); } -bool SortScorePairDescend(const std::pair &pair1, - const std::pair &pair2) { - return pair1.first > pair2.first; -} - template -void GetMaxScoreIndex(const std::vector &scores, - std::vector> *sorted_indices) { +static inline std::vector> GetSortedScoreIndex( + const std::vector &scores) { + std::vector> sorted_indices; + sorted_indices.reserve(scores.size()); for (size_t i = 0; i < scores.size(); ++i) { - sorted_indices->push_back(std::make_pair(scores[i], i)); + sorted_indices.emplace_back(scores[i], i); } // Sort the score pair according to the scores in descending order - std::stable_sort(sorted_indices->begin(), sorted_indices->end(), - SortScorePairDescend); + std::stable_sort(sorted_indices.begin(), sorted_indices.end(), + [](const std::pair &a, const std::pair &b) { + return a.first < b.first; + }); + return sorted_indices; } template -T BBoxArea(const T *box, const bool normalized) { +static inline T BBoxArea(const T *box, bool normalized) { if (box[2] < box[0] || box[3] < box[1]) { // If coordinate values are is invalid // (e.g. xmax < xmin or ymax < ymin), return 0. @@ -220,7 +221,7 @@ T BBoxArea(const T *box, const bool normalized) { } template -T JaccardOverlap(const T *box1, const T *box2, const bool normalized) { +static inline T JaccardOverlap(const T *box1, const T *box2, bool normalized) { if (box2[0] > box1[2] || box2[2] < box1[0] || box2[1] > box1[3] || box2[3] < box1[1]) { return static_cast(0.); @@ -229,8 +230,8 @@ T JaccardOverlap(const T *box1, const T *box2, const bool normalized) { const T inter_ymin = std::max(box1[1], box2[1]); const T inter_xmax = std::min(box1[2], box2[2]); const T inter_ymax = std::min(box1[3], box2[3]); - const T inter_w = std::max(0.0f, inter_xmax - inter_xmin + 1); - const T inter_h = std::max(0.0f, inter_ymax - inter_ymin + 1); + const T inter_w = std::max(T(0), inter_xmax - inter_xmin + 1); + const T inter_h = std::max(T(0), inter_ymax - inter_ymin + 1); const T inter_area = inter_w * inter_h; const T bbox1_area = BBoxArea(box1, normalized); const T bbox2_area = BBoxArea(box2, normalized); @@ -238,9 +239,21 @@ T JaccardOverlap(const T *box1, const T *box2, const bool normalized) { } } +template +static inline Tensor VectorToTensor(const std::vector &selected_indices, + int selected_num) { + Tensor keep_nms; + keep_nms.Resize({selected_num}); + auto *keep_data = keep_nms.mutable_data(platform::CPUPlace()); + for (int i = 0; i < selected_num; ++i) { + keep_data[i] = selected_indices[i]; + } + return keep_nms; +} + template -Tensor NMS(const platform::DeviceContext &ctx, Tensor *bbox, Tensor *scores, - const T nms_threshold, const float eta) { +static inline Tensor NMS(const platform::DeviceContext &ctx, Tensor *bbox, + Tensor *scores, T nms_threshold, float eta) { PADDLE_ENFORCE_NOT_NULL(bbox); int64_t num_boxes = bbox->dims()[0]; // 4: [xmin ymin xmax ymax] @@ -248,20 +261,18 @@ Tensor NMS(const platform::DeviceContext &ctx, Tensor *bbox, Tensor *scores, std::vector scores_data(num_boxes); std::copy_n(scores->data(), num_boxes, scores_data.begin()); - std::vector> sorted_indices; - GetMaxScoreIndex(scores_data, &sorted_indices); + std::vector> sorted_indices = + GetSortedScoreIndex(scores_data); std::vector selected_indices; int selected_num = 0; T adaptive_threshold = nms_threshold; const T *bbox_data = bbox->data(); - bool flag; while (sorted_indices.size() != 0) { - int idx = sorted_indices.front().second; - flag = true; - for (size_t k = 0; k < selected_indices.size(); ++k) { + int idx = sorted_indices.back().second; + bool flag = true; + for (int kept_idx : selected_indices) { if (flag) { - const int kept_idx = selected_indices[k]; T overlap = JaccardOverlap(bbox_data + idx * box_size, bbox_data + kept_idx * box_size, false); flag = (overlap <= adaptive_threshold); @@ -271,32 +282,29 @@ Tensor NMS(const platform::DeviceContext &ctx, Tensor *bbox, Tensor *scores, } if (flag) { selected_indices.push_back(idx); - selected_num++; + ++selected_num; } - sorted_indices.erase(sorted_indices.begin()); + sorted_indices.erase(sorted_indices.end()); if (flag && eta < 1 && adaptive_threshold > 0.5) { adaptive_threshold *= eta; } } - Tensor keep_nms; - keep_nms.Resize({selected_num}); - int *keep_data = keep_nms.mutable_data(ctx.GetPlace()); - for (int i = 0; i < selected_num; ++i) { - keep_data[i] = selected_indices[i]; - } - - return keep_nms; + return VectorToTensor(selected_indices, selected_num); } -template +template class GenerateProposalsKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { auto *scores = context.Input("Scores"); auto *bbox_deltas = context.Input("BboxDeltas"); auto *im_info = context.Input("ImInfo"); - auto *anchors = context.Input("Anchors"); - auto *variances = context.Input("Variances"); + auto anchors = detail::Ref(context.Input("Anchors"), + "Cannot find input Anchors(%s) in scope", + context.Inputs("Anchors")[0]); + auto variances = detail::Ref(context.Input("Variances"), + "Cannot find input Variances(%s) in scope", + context.Inputs("Variances")[0]); auto *rpn_rois = context.Output("RpnRois"); auto *rpn_roi_probs = context.Output("RpnRoiProbs"); @@ -307,15 +315,16 @@ class GenerateProposalsKernel : public framework::OpKernel { float min_size = context.Attr("min_size"); float eta = context.Attr("eta"); - auto &dev_ctx = context.template device_context(); + auto &dev_ctx = + context.template device_context(); - auto scores_dim = scores->dims(); + auto &scores_dim = scores->dims(); int64_t num = scores_dim[0]; int64_t c_score = scores_dim[1]; int64_t h_score = scores_dim[2]; int64_t w_score = scores_dim[3]; - auto bbox_dim = bbox_deltas->dims(); + auto &bbox_dim = bbox_deltas->dims(); int64_t c_bbox = bbox_dim[1]; int64_t h_bbox = bbox_dim[2]; int64_t w_bbox = bbox_dim[3]; @@ -330,17 +339,17 @@ class GenerateProposalsKernel : public framework::OpKernel { scores_swap.mutable_data({num, h_score, w_score, c_score}, dev_ctx.GetPlace()); - math::Transpose trans; + math::Transpose trans; std::vector axis = {0, 2, 3, 1}; trans(dev_ctx, *bbox_deltas, &bbox_deltas_swap, axis); trans(dev_ctx, *scores, &scores_swap, axis); framework::LoD lod; - std::vector lod0(1, 0); - Tensor *anchor = const_cast(anchors); - anchor->Resize({anchors->numel() / 4, 4}); - Tensor *var = const_cast(variances); - var->Resize({var->numel() / 4, 4}); + lod.resize(1); + auto &lod0 = lod[0]; + lod0.push_back(0); + anchors.Resize({anchors.numel() / 4, 4}); + variances.Resize({variances.numel() / 4, 4}); int64_t num_proposals = 0; for (int64_t i = 0; i < num; ++i) { @@ -352,24 +361,17 @@ class GenerateProposalsKernel : public framework::OpKernel { scores_slice.Resize({h_score * w_score * c_score, 1}); std::pair tensor_pair = - ProposalForOneImage(dev_ctx, im_info_slice, *anchor, *var, + ProposalForOneImage(dev_ctx, im_info_slice, anchors, variances, bbox_deltas_slice, scores_slice, pre_nms_top_n, post_nms_top_n, nms_thresh, min_size, eta); - Tensor proposals = tensor_pair.first; - Tensor scores = tensor_pair.second; - - framework::VisitDataType( - framework::ToDataType(rpn_rois->type()), - AppendProposalsFunctor(rpn_rois, 4 * num_proposals, &proposals)); - framework::VisitDataType( - framework::ToDataType(rpn_roi_probs->type()), - AppendProposalsFunctor(rpn_roi_probs, num_proposals, &scores)); + Tensor &proposals = tensor_pair.first; + Tensor &scores = tensor_pair.second; + AppendProposals(rpn_rois, 4 * num_proposals, proposals); + AppendProposals(rpn_roi_probs, num_proposals, scores); num_proposals += proposals.dims()[0]; - lod0.emplace_back(num_proposals); + lod0.push_back(num_proposals); } - - lod.emplace_back(lod0); rpn_rois->set_lod(lod); rpn_roi_probs->set_lod(lod); rpn_rois->Resize({num_proposals, 4}); @@ -377,7 +379,7 @@ class GenerateProposalsKernel : public framework::OpKernel { } std::pair ProposalForOneImage( - const DeviceContext &ctx, const Tensor &im_info_slice, + const platform::CPUDeviceContext &ctx, const Tensor &im_info_slice, const Tensor &anchors, const Tensor &variances, const Tensor &bbox_deltas_slice, // [M, 4] const Tensor &scores_slice, // [N, 1] @@ -392,10 +394,9 @@ class GenerateProposalsKernel : public framework::OpKernel { for (int i = 0; i < scores_slice.numel(); ++i) { index[i] = i; } - std::function compare = - [scores_data](const int64_t &i, const int64_t &j) { - return scores_data[i] > scores_data[j]; - }; + auto compare = [scores_data](const int64_t &i, const int64_t &j) { + return scores_data[i] > scores_data[j]; + }; if (pre_nms_top_n <= 0 || pre_nms_top_n >= scores_slice.numel()) { std::sort(index, index + scores_slice.numel(), compare); @@ -469,12 +470,12 @@ class GenerateProposalsOpMaker : public framework::OpProtoAndCheckerMaker { Generate Proposals OP This operator proposes rois according to each box with their probability to be a foreground object and -the box can be calculated by anchors. Bbox_deltais and scores are the output of RPN. Final proposals +the box can be calculated by anchors. Bbox_details and scores are the output of RPN. Final proposals could be used to train detection net. Scores is the probability for each box to be an object. In format of (N, A, H, W) where N is batch size, A is number of anchors, H and W are height and width of the feature map. -BboxDeltas is the differece between predicted box locatoin and anchor location. In format of (N, 4*A, H, W) +BboxDeltas is the differece between predicted box location and anchor location. In format of (N, 4*A, H, W) For generating proposals, this operator transposes and resizes scores and bbox_deltas in size of (H*W*A, 1) and (H*W*A, 4) and calculate box locations as proposals candidates. Then clip boxes to image and remove predicted boxes with small area. @@ -490,6 +491,5 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(generate_proposals, ops::GenerateProposalsOp, ops::GenerateProposalsOpMaker, paddle::framework::EmptyGradOpMaker); -REGISTER_OP_CPU_KERNEL( - generate_proposals, - ops::GenerateProposalsKernel); +REGISTER_OP_CPU_KERNEL(generate_proposals, ops::GenerateProposalsKernel, + ops::GenerateProposalsKernel); diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cu b/paddle/fluid/operators/detection/generate_proposals_op.cu index 6146ff509d..efeeecf721 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cu +++ b/paddle/fluid/operators/detection/generate_proposals_op.cu @@ -16,10 +16,13 @@ limitations under the License. */ #include #include #include "cub/cub.cuh" +#include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memory.h" +#include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { @@ -36,36 +39,38 @@ namespace { int const kThreadsPerBlock = sizeof(uint64_t) * 8; -template -__global__ void RangeInitKernel(const T start, const T delta, const int size, - T *out) { - CUDA_1D_KERNEL_LOOP(i, size) { out[i] = start + i * delta; } -} +static const double kBBoxClipDefault = std::log(1000.0 / 16.0); + +struct RangeInitFunctor { + int start_; + int delta_; + int *out_; + __device__ void operator()(size_t i) { out_[i] = start_ + i * delta_; } +}; template -void SortDescending(const platform::CUDADeviceContext &ctx, const Tensor &value, - Tensor *value_out, Tensor *index_out) { - int num = value.numel(); +static void SortDescending(const platform::CUDADeviceContext &ctx, + const Tensor &value, Tensor *value_out, + Tensor *index_out) { + int num = static_cast(value.numel()); Tensor index_in_t; int *idx_in = index_in_t.mutable_data({num}, ctx.GetPlace()); - int block = 512; - auto stream = ctx.stream(); - RangeInitKernel<<>>(0, 1, num, idx_in); + platform::ForRange for_range(ctx, num); + for_range(RangeInitFunctor{0, 1, idx_in}); + int *idx_out = index_out->mutable_data({num}, ctx.GetPlace()); const T *keys_in = value.data(); T *keys_out = value_out->mutable_data({num}, ctx.GetPlace()); // Determine temporary device storage requirements - void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairsDescending( - d_temp_storage, temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, - num); + nullptr, temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num); // Allocate temporary storage auto place = boost::get(ctx.GetPlace()); - d_temp_storage = memory::Alloc(place, temp_storage_bytes); + void *d_temp_storage = memory::Alloc(place, temp_storage_bytes); // Run sorting operation cub::DeviceRadixSort::SortPairsDescending( @@ -76,22 +81,27 @@ void SortDescending(const platform::CUDADeviceContext &ctx, const Tensor &value, } template -__device__ __forceinline__ T Min(T x, T y) { - return x < y ? x : y; -} - -template -__device__ __forceinline__ T Max(T x, T y) { - return x > y ? x : y; -} - -template -__global__ void BoxDecodeAndClipKernel(const T *anchor, const T *deltas, - const T *var, const int *index, - const T *im_info, const int num, - T *proposals) { - T kBBoxClipDefault = log(1000.0 / 16.0); - CUDA_1D_KERNEL_LOOP(i, num) { +struct BoxDecodeAndClipFunctor { + const T *anchor; + const T *deltas; + const T *var; + const int *index; + const T *im_info; + + T *proposals; + + BoxDecodeAndClipFunctor(const T *anchor, const T *deltas, const T *var, + const int *index, const T *im_info, T *proposals) + : anchor(anchor), + deltas(deltas), + var(var), + index(index), + im_info(im_info), + proposals(proposals) {} + + T bbox_clip_default{static_cast(kBBoxClipDefault)}; + + __device__ void operator()(size_t i) { int k = index[i] * 4; T axmin = anchor[k]; T aymin = anchor[k + 1]; @@ -108,17 +118,17 @@ __global__ void BoxDecodeAndClipKernel(const T *anchor, const T *deltas, T dxmax = deltas[k + 2]; T dymax = deltas[k + 3]; - T d_cx = 0., d_cy = 0., d_w = 0., d_h = 0.; + T d_cx, d_cy, d_w, d_h; if (var) { d_cx = cx + dxmin * w * var[k]; d_cy = cy + dymin * h * var[k + 1]; - d_w = exp(Min(dxmax * var[k + 2], kBBoxClipDefault)) * w; - d_h = exp(Min(dymax * var[k + 3], kBBoxClipDefault)) * h; + d_w = exp(Min(dxmax * var[k + 2], bbox_clip_default)) * w; + d_h = exp(Min(dymax * var[k + 3], bbox_clip_default)) * h; } else { d_cx = cx + dxmin * w; d_cy = cy + dymin * h; - d_w = exp(Min(dxmax, kBBoxClipDefault)) * w; - d_h = exp(Min(dymax, kBBoxClipDefault)) * h; + d_w = exp(Min(dxmax, bbox_clip_default)) * w; + d_h = exp(Min(dymax, bbox_clip_default)) * h; } T oxmin = d_cx - d_w * 0.5; @@ -126,17 +136,21 @@ __global__ void BoxDecodeAndClipKernel(const T *anchor, const T *deltas, T oxmax = d_cx + d_w * 0.5 - 1.; T oymax = d_cy + d_h * 0.5 - 1.; - proposals[i * 4] = Max(Min(oxmin, im_info[1] - 1.), 0.); - proposals[i * 4 + 1] = Max(Min(oymin, im_info[0] - 1.), 0.); - proposals[i * 4 + 2] = Max(Min(oxmax, im_info[1] - 1.), 0.); - proposals[i * 4 + 3] = Max(Min(oymax, im_info[0] - 1.), 0.); + proposals[i * 4] = Max(Min(oxmin, im_info[1] - 1.), 0.); + proposals[i * 4 + 1] = Max(Min(oymin, im_info[0] - 1.), 0.); + proposals[i * 4 + 2] = Max(Min(oxmax, im_info[1] - 1.), 0.); + proposals[i * 4 + 3] = Max(Min(oymax, im_info[0] - 1.), 0.); } -} + + __device__ __forceinline__ T Min(T a, T b) const { return a > b ? b : a; } + + __device__ __forceinline__ T Max(T a, T b) const { return a > b ? a : b; } +}; template -__global__ void FilterBBoxes(const T *bboxes, const T *im_info, - const T min_size, const int num, int *keep_num, - int *keep) { +static __global__ void FilterBBoxes(const T *bboxes, const T *im_info, + const T min_size, const int num, + int *keep_num, int *keep) { T im_h = im_info[0]; T im_w = im_info[1]; T im_scale = im_info[2]; @@ -181,7 +195,7 @@ __global__ void FilterBBoxes(const T *bboxes, const T *im_info, } } -__device__ inline float IoU(const float *a, const float *b) { +static __device__ inline float IoU(const float *a, const float *b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); @@ -191,8 +205,9 @@ __device__ inline float IoU(const float *a, const float *b) { return inter_s / (s_a + s_b - inter_s); } -__global__ void NMSKernel(const int n_boxes, const float nms_overlap_thresh, - const float *dev_boxes, uint64_t *dev_mask) { +static __global__ void NMSKernel(const int n_boxes, + const float nms_overlap_thresh, + const float *dev_boxes, uint64_t *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; @@ -234,9 +249,9 @@ __global__ void NMSKernel(const int n_boxes, const float nms_overlap_thresh, } template -void NMS(const platform::CUDADeviceContext &ctx, const Tensor &proposals, - const Tensor &sorted_indices, const T nms_threshold, - Tensor *keep_out) { +static void NMS(const platform::CUDADeviceContext &ctx, const Tensor &proposals, + const Tensor &sorted_indices, const T nms_threshold, + Tensor *keep_out) { int boxes_num = proposals.dims()[0]; PADDLE_ENFORCE_EQ(boxes_num, sorted_indices.dims()[0]); @@ -247,13 +262,10 @@ void NMS(const platform::CUDADeviceContext &ctx, const Tensor &proposals, const T *boxes = proposals.data(); auto place = boost::get(ctx.GetPlace()); - int size_bytes = boxes_num * col_blocks * sizeof(uint64_t); - uint64_t *d_mask = - reinterpret_cast(memory::Alloc(place, size_bytes)); - NMSKernel<<>>(boxes_num, nms_threshold, boxes, d_mask); - uint64_t *h_mask = reinterpret_cast( - memory::Alloc(platform::CPUPlace(), size_bytes)); - memory::Copy(platform::CPUPlace(), h_mask, place, d_mask, size_bytes, 0); + framework::Vector mask(boxes_num * col_blocks); + NMSKernel<<>>( + boxes_num, nms_threshold, boxes, + mask.CUDAMutableData(boost::get(ctx.GetPlace()))); std::vector remv(col_blocks); memset(&remv[0], 0, sizeof(uint64_t) * col_blocks); @@ -267,7 +279,7 @@ void NMS(const platform::CUDADeviceContext &ctx, const Tensor &proposals, if (!(remv[nblock] & (1ULL << inblock))) { ++num_to_keep; keep_vec.push_back(i); - uint64_t *p = &h_mask[0] + i * col_blocks; + uint64_t *p = &mask[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } @@ -276,12 +288,10 @@ void NMS(const platform::CUDADeviceContext &ctx, const Tensor &proposals, int *keep = keep_out->mutable_data({num_to_keep}, ctx.GetPlace()); memory::Copy(place, keep, platform::CPUPlace(), keep_vec.data(), sizeof(int) * num_to_keep, 0); - memory::Free(place, d_mask); - memory::Free(platform::CPUPlace(), h_mask); } template -std::pair ProposalForOneImage( +static std::pair ProposalForOneImage( const platform::CUDADeviceContext &ctx, const Tensor &im_info, const Tensor &anchors, const Tensor &variances, const Tensor &bbox_deltas, // [M, 4] @@ -300,18 +310,20 @@ std::pair ProposalForOneImage( // 2. box decode and clipping Tensor proposals; proposals.mutable_data({pre_nms_num, 4}, ctx.GetPlace()); - int block = 512; - auto stream = ctx.stream(); - BoxDecodeAndClipKernel<<>>( - anchors.data(), bbox_deltas.data(), variances.data(), - index_sort.data(), im_info.data(), pre_nms_num, - proposals.data()); + + { + platform::ForRange for_range(ctx, pre_nms_num); + for_range(BoxDecodeAndClipFunctor{ + anchors.data(), bbox_deltas.data(), variances.data(), + index_sort.data(), im_info.data(), proposals.data()}); + } // 3. filter Tensor keep_index, keep_num_t; keep_index.mutable_data({pre_nms_num}, ctx.GetPlace()); keep_num_t.mutable_data({1}, ctx.GetPlace()); min_size = std::max(min_size, 1.0f); + auto stream = ctx.stream(); FilterBBoxes<<<1, 512, 0, stream>>>( proposals.data(), im_info.data(), min_size, pre_nms_num, keep_num_t.data(), keep_index.data()); @@ -355,8 +367,12 @@ class CUDAGenerateProposalsKernel : public framework::OpKernel { auto *scores = context.Input("Scores"); auto *bbox_deltas = context.Input("BboxDeltas"); auto *im_info = context.Input("ImInfo"); - auto *anchors = context.Input("Anchors"); - auto *variances = context.Input("Variances"); + auto anchors = detail::Ref(context.Input("Anchors"), + "Cannot find input Anchors(%s) in scope", + context.Inputs("Anchors")[0]); + auto variances = detail::Ref(context.Input("Variances"), + "Cannot find input Variances(%s) in scope", + context.Inputs("Variances")[0]); auto *rpn_rois = context.Output("RpnRois"); auto *rpn_roi_probs = context.Output("RpnRoiProbs"); @@ -392,10 +408,8 @@ class CUDAGenerateProposalsKernel : public framework::OpKernel { trans(dev_ctx, *bbox_deltas, &bbox_deltas_swap, axis); trans(dev_ctx, *scores, &scores_swap, axis); - Tensor *anchor = const_cast(anchors); - anchor->Resize({anchors->numel() / 4, 4}); - Tensor *var = const_cast(variances); - var->Resize({var->numel() / 4, 4}); + anchors.Resize({anchors.numel() / 4, 4}); + variances.Resize({variances.numel() / 4, 4}); rpn_rois->mutable_data({bbox_deltas->numel() / 4, 4}, context.GetPlace()); @@ -404,7 +418,7 @@ class CUDAGenerateProposalsKernel : public framework::OpKernel { T *rpn_rois_data = rpn_rois->data(); T *rpn_roi_probs_data = rpn_roi_probs->data(); - auto place = boost::get(dev_ctx.GetPlace()); + auto &place = boost::get(dev_ctx.GetPlace()); int64_t num_proposals = 0; std::vector offset(1, 0); @@ -417,12 +431,12 @@ class CUDAGenerateProposalsKernel : public framework::OpKernel { scores_slice.Resize({h_score * w_score * c_score, 1}); std::pair box_score_pair = - ProposalForOneImage(dev_ctx, im_info_slice, *anchor, *var, + ProposalForOneImage(dev_ctx, im_info_slice, anchors, variances, bbox_deltas_slice, scores_slice, pre_nms_top_n, post_nms_top_n, nms_thresh, min_size, eta); - Tensor proposals = box_score_pair.first; - Tensor scores = box_score_pair.second; + Tensor &proposals = box_score_pair.first; + Tensor &scores = box_score_pair.second; memory::Copy(place, rpn_rois_data + num_proposals * 4, place, proposals.data(), sizeof(T) * proposals.numel(), 0); diff --git a/paddle/fluid/operators/gather.h b/paddle/fluid/operators/gather.h index d15cb55647..d72e07d76c 100644 --- a/paddle/fluid/operators/gather.h +++ b/paddle/fluid/operators/gather.h @@ -39,11 +39,9 @@ void CPUGather(const platform::DeviceContext& ctx, const Tensor& src, PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace())); // check index of shape 1-D PADDLE_ENFORCE(index.dims().size() == 1); - int index_size = index.dims()[0]; + int64_t index_size = index.dims()[0]; auto src_dims = src.dims(); - framework::DDim output_dims(src_dims); - output_dims[0] = index_size; const T* p_src = src.data(); const int* p_index = index.data(); @@ -55,7 +53,7 @@ void CPUGather(const platform::DeviceContext& ctx, const Tensor& src, const size_t slice_bytes = slice_size * sizeof(T); - for (int i = 0; i < index_size; ++i) { + for (int64_t i = 0; i < index_size; ++i) { int index_ = p_index[i]; memcpy(p_output + i * slice_size, p_src + index_ * slice_size, slice_bytes); } From 5207caf58762bdb0d4ee29b83d2cfe406f94d91f Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Wed, 17 Oct 2018 19:46:38 +0800 Subject: [PATCH 02/53] core.so do not link libpython test=develop --- paddle/fluid/pybind/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index e7f634c4a6..04fe579a66 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -1,5 +1,5 @@ -set(PYBIND_DEPS pybind python proto_desc memory executor prune feed_fetch_method pass_builder) +set(PYBIND_DEPS pybind proto_desc memory executor prune feed_fetch_method pass_builder) set(PYBIND_SRCS pybind.cc exception.cc protobuf.cc const_value.cc) if(NOT WIN32) list(APPEND PYBIND_DEPS parallel_executor profiler) From 3c249283af8e682ce900eea3a783c0200b639f1c Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Thu, 18 Oct 2018 21:55:28 +0800 Subject: [PATCH 03/53] init seqconv eltadd relu op --- paddle/fluid/operators/CMakeLists.txt | 2 +- .../fusion_seqconv_eltadd_relu_op.cc | 227 ++++++++++++++++++ .../operators/fusion_seqconv_eltadd_relu_op.h | 42 ++++ 3 files changed, 270 insertions(+), 1 deletion(-) create mode 100644 paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.cc create mode 100644 paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.h diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index c97225669a..6c95f4b9c5 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -86,7 +86,7 @@ function(op_library TARGET) # remove windows unsupported op, because windows has no nccl, no warpctc such ops. foreach(windows_unsupport_op "nccl_op" "gen_nccl_id_op" "warpctc_op" "hierarchical_sigmoid_op" "crf_decoding_op" "select_op" "lstmp_op" "gru_op" "fusion_gru_op" "lstm_op" "fusion_lstm_op" "cumsum_op" - "channel_send_op" "channel_create_op" "channel_close_op" "channel_recv_op") + "fusion_seqconv_eltadd_relu_op" "channel_send_op" "channel_create_op" "channel_close_op" "channel_recv_op") if ("${TARGET}" STREQUAL "${windows_unsupport_op}") return() endif() diff --git a/paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.cc b/paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.cc new file mode 100644 index 0000000000..efeb18e161 --- /dev/null +++ b/paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.cc @@ -0,0 +1,227 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.h" +#include // for min, max +#include +#include "paddle/fluid/operators/math/blas.h" +#include "paddle/fluid/operators/math/fc_compute.h" + +namespace paddle { +namespace operators { + +void FusionSeqConvEltAddReluOp::InferShape( + framework::InferShapeContext* ctx) const { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of FusionSeqConvEltAddReluOp should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("Filter"), + "Input(Filter) of FusionSeqConvEltAddReluOp should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("Bias"), + "Input(Bias) of FusionSeqConvEltAddReluOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("Out"), + "Output(Out) of FusionSeqConvEltAddReluOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("ColMat"), + "Output(ColMat) of FusionSeqConvEltAddReluOp should not be null."); + + auto x_dims = ctx->GetInputDim("X"); + auto w_dims = ctx->GetInputDim("Filter"); + PADDLE_ENFORCE( + ctx->Attrs().Get("contextStride") == 1, + "Currently, FusionSeqConvEltAddReluOp only supports contextStride=1."); + PADDLE_ENFORCE(x_dims.size() == 2 && w_dims.size() == 2, + "Input(X, Filter) should be 2-D tensor."); + PADDLE_ENFORCE(x_dims.size() == 2 && w_dims.size() == 2, + "Input(X, Filter) should be 2-D tensor."); + PADDLE_ENFORCE( + w_dims[0] == ctx->Attrs().Get("contextLength") * x_dims[1], + "Filter's height should be context_length * " + "input_hidden_size ."); + + ctx->SetOutputDim("Out", {x_dims[0], w_dims[1]}); + ctx->SetOutputDim("ColMat", {x_dims[0], w_dims[0]}); + ctx->ShareLoD("X", "Out"); +} + +framework::OpKernelType FusionSeqConvEltAddReluOp::GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); +} + +void FusionSeqConvEltAddReluOpMaker::Make() { + AddInput("X", + "(LoDTensor) the input is a LodTensor, which support " + "variable-time length input sequence. The underlying tensor in " + "this LoDTensor is a matrix with shape (T X M), where T is the " + "total time steps in this mini-batch, M is the dim size of x."); + // PaddingData only support false yet, should be ensured at pass. + AddInput("Filter", + "(Tensor) same as the input(Filter) of sequence conv op is an " + "learnable parameter." + "This is a tensor with shape (K, N), where K is the " + "context_length * dim size of x, N is the output feature size."); + AddInput("Bias", + "(Tensor) the learnable weights. shape (1, N), where N is the " + "output feature size"); + AddOutput( + "Out", + "(LoDTensor) the output(Out) is a LodTensor, which support " + "variable-time length output sequence. The underlying tensor in " + "this LoDTensor is a matrix with shape (T, N), where, T is the " + "total time steps in this mini-batch, N is the output feature size."); + AddOutput("ColMat", + "(Tensor) (T, K), where T is where T is the " + "total time steps in this mini-batch, K is height of Filter") + .AsIntermediate(); + AddAttr("contextLength", + "(int) the contextLength of FusionSeqConvEltAddReluOp is the " + "height of the convolution kernel.") + .GreaterThan(0); + AddAttr("contextStart", + "(int, default:0) the contextStart of FusionSeqConvEltAddReluOp " + "represents the beginning of the convolution of the number of " + "rows of sequence, which can be negative. The negative number " + "means to pad contextStart time-steps of zeros or learnable " + "parameters at the beginning of each instance. The positive " + "number means to skip contextStart time-steps of each " + "instance.") + .SetDefault(0); + AddAttr( + "contextStride", + "(int, default:1) the contextStride of FusionSeqConvEltAddReluOp " + "represents the stride length of convolution kernel. " + "Currently, FusionSeqConvEltAddReluOp only supports" + "contextStride=1.") + .SetDefault(1) + .GreaterThan(0); + AddComment(R"DOC( +Fusion Sequence Conv and ElementwiseAdd Operator. +)DOC"); +} + +template +class FusionSeqConvEltAddReluKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + using DeviceContext = paddle::platform::CPUDeviceContext; + auto* x = ctx.Input("X"); + auto* w = ctx.Input("Filter"); + auto* b = ctx.Input("Bias"); + auto* y = ctx.Output("Out"); + auto* col = ctx.Output("ColMat"); + + auto x_lod = x->lod(); + auto x_dims = x->dims(); + auto w_dims = w->dims(); + PADDLE_ENFORCE_EQ(b->numel(), w_dims[1], + "bias size should be equal to output feature size."); + PADDLE_ENFORCE_EQ(x_lod.size(), 1UL, + "Only support one level sequence now."); + + const T* x_data = x->data(); + const T* w_data = w->data(); + const T* b_data = b->data(); + T* y_data = y->mutable_data(ctx.GetPlace()); + T* col_data = col->mutable_data(ctx.GetPlace()); + + int context_start = ctx.Attr("contextStart"); + int context_length = ctx.Attr("contextLength"); + int up_pad = std::max(0, -context_start); + int down_pad = std::max(0, context_start + context_length - 1); + // im2col + int src_mat_w = static_cast(x_dims[1]); + int src_mat_w_sz = src_mat_w * sizeof(T); + int col_mat_w = static_cast(w_dims[0]); + int col_mat_w_sz = col_mat_w * sizeof(T); + for (int i = 0; i < static_cast(x_lod[0].size()) - 1; ++i) { + int st = x_lod[0][i]; + int ed = x_lod[0][i + 1]; + const T* src_data = x_data + st * src_mat_w; + T* dst_data = col_data + st * col_mat_w; + int seq_len = ed - st; + if (seq_len > up_pad + down_pad) { + // zero all up_pad + std::memset(dst_data, 0, up_pad * col_mat_w_sz); + // fill up_pad data + dst_data = dst_data + up_pad * src_mat_w; + int copy_size = col_mat_w_sz - up_pad * src_mat_w_sz; + for (int j = 0; j < up_pad; ++j) { + // blas.VCOPY? + std::memcpy(dst_data, src_data, copy_size); + dst_data += (col_mat_w - src_mat_w); + copy_size += src_mat_w_sz; + } + // fill data + for (int j = 0; j < seq_len - up_pad - down_pad; ++j) { + std::memcpy(dst_data, src_data, copy_size); + dst_data += col_mat_w; + src_data += src_mat_w; + } + // zero all down_pad + std::memset(dst_data, 0, down_pad * col_mat_w_sz); + // fill down_pad data + copy_size -= src_mat_w_sz; + for (int j = 0; j < down_pad; ++j) { + std::memcpy(dst_data, src_data, copy_size); + dst_data += col_mat_w; + src_data += src_mat_w; + copy_size -= src_mat_w_sz; + } + } else { + PADDLE_ENFORCE_GE(context_length, up_pad + down_pad + 1); + std::memset(dst_data, 0, seq_len * col_mat_w_sz); + int zero_sz = up_pad * src_mat_w_sz; + int seq_len_size = seq_len * src_mat_w_sz; + for (int j = 0; j < std::min(up_pad, seq_len); ++j) { + int copy_size = std::min(seq_len_size, col_mat_w_sz - zero_sz); + std::memcpy(dst_data + zero_sz / sizeof(T), src_data, copy_size); + dst_data += col_mat_w; + zero_sz -= src_mat_w_sz; + } + zero_sz = down_pad * src_mat_w_sz; + dst_data = col_data + (ed - 1) * col_mat_w; + src_data = x_data + (ed - up_pad - 1) * src_mat_w; + for (int j = 0; j < std::min(0, seq_len - up_pad); ++j) { + int copy_size = std::min(seq_len_size, col_mat_w_sz - zero_sz); + std::memcpy(dst_data, src_data, copy_size); + dst_data -= col_mat_w; + src_data += src_mat_w; + zero_sz -= src_mat_w_sz; + } + } + } + + auto& dev_ctx = ctx.template device_context(); + auto blas = math::GetBlas(dev_ctx); + math::FCCompute(blas, x_dims[0], w_dims[1], w_dims[0], + col_data, w_data, y_data, b_data, true); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(fusion_seqconv_eltadd_relu, ops::FusionSeqConvEltAddReluOp, + ops::FusionSeqConvEltAddReluOpMaker, + paddle::framework::DefaultGradOpDescMaker); + +REGISTER_OP_CPU_KERNEL(fusion_seqconv_eltadd_relu, + ops::FusionSeqConvEltAddReluKernel, + ops::FusionSeqConvEltAddReluKernel); diff --git a/paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.h b/paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.h new file mode 100644 index 0000000000..028d79dc2a --- /dev/null +++ b/paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.h @@ -0,0 +1,42 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using LoDTensor = framework::LoDTensor; +using Tensor = framework::Tensor; + +class FusionSeqConvEltAddReluOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; +}; + +class FusionSeqConvEltAddReluOpMaker + : public framework::OpProtoAndCheckerMaker { + public: + void Make() override; +}; + +} // namespace operators +} // namespace paddle From 7cb19a5976a8c23c34cdea6d86bf3ce7c3c3cc79 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 19 Oct 2018 00:48:43 +0800 Subject: [PATCH 04/53] fuse elementwise_add and relu --- paddle/fluid/operators/math/fc_compute.h | 24 +++-- paddle/fluid/operators/math/jit_kernel.h | 6 ++ .../fluid/operators/math/jit_kernel_blas.cc | 91 +++++++++++++++++++ 3 files changed, 112 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/operators/math/fc_compute.h b/paddle/fluid/operators/math/fc_compute.h index 1f5a49c0ab..2d7e877a77 100644 --- a/paddle/fluid/operators/math/fc_compute.h +++ b/paddle/fluid/operators/math/fc_compute.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/fluid/operators/math/blas.h" +#include "paddle/fluid/operators/math/jit_kernel.h" // TODO(TJ): add deps DECLARE_int32(paddle_num_threads); @@ -30,20 +31,25 @@ inline void FCCompute(const BlasT& blas, const int M, if (B == NULL) { return; } + if (relu) { + const auto& vaddrelu = jitkernel::KernelPool::Instance() + .template Get>(N); + for (int i = 0; i < M; i++) { + T* dst = Y + i * N; + vaddrelu->Compute(B, dst, dst); + } + } else { + const auto& vadd = jitkernel::KernelPool::Instance() + .template Get>(N); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for if (FLAGS_paddle_num_threads > 1) #endif - for (int i = 0; i < M; i++) { - blas.AXPY(N, static_cast(1), B, Y + i * N); + for (int i = 0; i < M; i++) { + T* dst = Y + i * N; + vadd->Compute(B, dst, dst); + } } - - if (!relu) { - return; - } - - // TODO(TJ): fuse relu - LOG(FATAL) << "Not implemented!"; } } // namespace math diff --git a/paddle/fluid/operators/math/jit_kernel.h b/paddle/fluid/operators/math/jit_kernel.h index b4dfda6db7..e91e4e8e5a 100644 --- a/paddle/fluid/operators/math/jit_kernel.h +++ b/paddle/fluid/operators/math/jit_kernel.h @@ -86,6 +86,12 @@ class VAddBiasKernel : public Kernel { virtual void Compute(const T a, const T *x, T *y) const = 0; }; +template +class VAddReluKernel : public Kernel { + public: + virtual void Compute(const T *x, const T *y, T *z) const = 0; +}; + template class VActKernel : public Kernel { public: diff --git a/paddle/fluid/operators/math/jit_kernel_blas.cc b/paddle/fluid/operators/math/jit_kernel_blas.cc index 0f9ea533fc..a486a0ca80 100644 --- a/paddle/fluid/operators/math/jit_kernel_blas.cc +++ b/paddle/fluid/operators/math/jit_kernel_blas.cc @@ -378,11 +378,102 @@ class VIdentityKernelImpl : public VIdentityKernel { void Compute(const T* x, T* y) const override {} }; +/* VAddRelu JitKernel */ +template +class VAddReluKernelImpl : public VAddReluKernel { + public: + explicit VAddReluKernelImpl(int d) : VAddReluKernel() { this->num_ = d; } + void Compute(const T* x, const T* y, T* z) const override { + for (int i = 0; i < this->num_; ++i) { + z[i] = x[i] + y[i]; + z[i] = z[i] > 0 ? z[i] : 0; + } + } +}; + +#define INTRI8_FLOAT(isa) \ + template <> \ + void VAddReluKernelImpl::Compute( \ + const float* x, const float* y, float* z) const { \ + __m256 tmpx = _mm256_loadu_ps(x); \ + __m256 tmpy = _mm256_loadu_ps(y); \ + tmpy = _mm256_add_ps(tmpx, tmpy); \ + tmpy = _mm256_max_ps(tmpy, _mm256_setzero_ps()); \ + _mm256_storeu_ps(z, tmpy); \ + } + +#define INTRI16_FLOAT(isa) \ + template <> \ + void VAddReluKernelImpl::Compute( \ + const float* x, const float* y, float* z) const { \ + __m256 zeros = _mm256_setzero_ps(); \ + __m256 tmp0 = _mm256_loadu_ps(x); \ + __m256 tmp1 = _mm256_loadu_ps(y); \ + tmp0 = _mm256_add_ps(tmp0, tmp1); \ + tmp0 = _mm256_max_ps(tmp0, zeros); \ + tmp1 = _mm256_loadu_ps(x + 8); \ + __m256 tmp2 = _mm256_loadu_ps(y + 8); \ + tmp1 = _mm256_add_ps(tmp1, tmp2); \ + tmp1 = _mm256_max_ps(tmp1, zeros); \ + _mm256_storeu_ps(z, tmp0); \ + _mm256_storeu_ps(z + 8, tmp1); \ + } + +#define INTRI_COMMON_FLOAT(isa, block) \ + template <> \ + VAddReluKernelImpl::VAddReluKernelImpl(int d) \ + : VAddReluKernel() { \ + this->num_ = d; \ + this->end_ = d - d % AVX_FLOAT_BLOCK; \ + this->rest_ = d - this->end_; \ + } \ + template <> \ + void VAddReluKernelImpl::Compute( \ + const float* x, const float* y, float* z) const { \ + __m256 zeros = _mm256_setzero_ps(); \ + for (int i = 0; i < this->end_; i += AVX_FLOAT_BLOCK) { \ + __m256 tmpx = _mm256_loadu_ps(x + i); \ + __m256 tmpy = _mm256_loadu_ps(y + i); \ + tmpy = _mm256_add_ps(tmpx, tmpy); \ + tmpy = _mm256_max_ps(tmpy, zeros); \ + _mm256_storeu_ps(z + i, tmpy); \ + } \ + for (int i = this->end_; i < this->num_; ++i) { \ + z[i] = x[i] + y[i]; \ + z[i] = z[i] > 0 ? z[i] : 0; \ + } \ + } + +#ifdef __AVX__ +INTRI8_FLOAT(jit::avx); +INTRI16_FLOAT(jit::avx); +INTRI_COMMON_FLOAT(jit::avx, kGT8LT16); +INTRI_COMMON_FLOAT(jit::avx, kGT16); +#endif +#ifdef __AVX2__ +INTRI8_FLOAT(jit::avx2); +INTRI16_FLOAT(jit::avx2); +INTRI_COMMON_FLOAT(jit::avx2, kGT8LT16); +INTRI_COMMON_FLOAT(jit::avx2, kGT16); +#endif +#ifdef __AVX512F__ +// TODO(TJ): refine avx512 +INTRI8_FLOAT(jit::avx512f); +INTRI16_FLOAT(jit::avx512f); +INTRI_COMMON_FLOAT(jit::avx512f, kGT8LT16); +INTRI_COMMON_FLOAT(jit::avx512f, kGT16); +#endif + +#undef INTRI8_FLOAT +#undef INTRI16_FLOAT +#undef INTRI_COMMON_FLOAT + REGISTER_JITKERNEL(vmul, VMulKernel); REGISTER_JITKERNEL(vadd, VAddKernel); REGISTER_JITKERNEL(vscal, VScalKernel); REGISTER_JITKERNEL(vaddb, VAddBiasKernel); REGISTER_JITKERNEL(vrelu, VReluKernel); +REGISTER_JITKERNEL(vaddrelu, VAddReluKernel); REGISTER_JITKERNEL(videntity, VIdentityKernel); } // namespace jitkernel From e5ce9659522553e373227d760a1b993dfe337e44 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 19 Oct 2018 11:09:33 +0800 Subject: [PATCH 05/53] refine and add eltadd_relu unit test --- paddle/fluid/operators/math/fc_compute.h | 2 +- .../fluid/operators/math/jit_kernel_blas.cc | 3 - .../fluid/operators/math/jit_kernel_test.cc | 57 +++++++++++++++++++ 3 files changed, 58 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/operators/math/fc_compute.h b/paddle/fluid/operators/math/fc_compute.h index 2d7e877a77..87220d4019 100644 --- a/paddle/fluid/operators/math/fc_compute.h +++ b/paddle/fluid/operators/math/fc_compute.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "paddle/fluid/operators/math/blas.h" -#include "paddle/fluid/operators/math/jit_kernel.h" // TODO(TJ): add deps +#include "paddle/fluid/operators/math/jit_kernel.h" DECLARE_int32(paddle_num_threads); diff --git a/paddle/fluid/operators/math/jit_kernel_blas.cc b/paddle/fluid/operators/math/jit_kernel_blas.cc index a486a0ca80..c88b17b012 100644 --- a/paddle/fluid/operators/math/jit_kernel_blas.cc +++ b/paddle/fluid/operators/math/jit_kernel_blas.cc @@ -447,20 +447,17 @@ class VAddReluKernelImpl : public VAddReluKernel { #ifdef __AVX__ INTRI8_FLOAT(jit::avx); INTRI16_FLOAT(jit::avx); -INTRI_COMMON_FLOAT(jit::avx, kGT8LT16); INTRI_COMMON_FLOAT(jit::avx, kGT16); #endif #ifdef __AVX2__ INTRI8_FLOAT(jit::avx2); INTRI16_FLOAT(jit::avx2); -INTRI_COMMON_FLOAT(jit::avx2, kGT8LT16); INTRI_COMMON_FLOAT(jit::avx2, kGT16); #endif #ifdef __AVX512F__ // TODO(TJ): refine avx512 INTRI8_FLOAT(jit::avx512f); INTRI16_FLOAT(jit::avx512f); -INTRI_COMMON_FLOAT(jit::avx512f, kGT8LT16); INTRI_COMMON_FLOAT(jit::avx512f, kGT16); #endif diff --git a/paddle/fluid/operators/math/jit_kernel_test.cc b/paddle/fluid/operators/math/jit_kernel_test.cc index 7fdd1c6b76..c9e6ab740d 100644 --- a/paddle/fluid/operators/math/jit_kernel_test.cc +++ b/paddle/fluid/operators/math/jit_kernel_test.cc @@ -712,6 +712,63 @@ TEST(JitKernel, vadd) { } } +void vaddrelu_ref(const int n, const float* x, const float* y, float* z) { + for (int i = 0; i < n; ++i) { + z[i] = x[i] + y[i]; + z[i] = z[i] > 0 ? z[i] : 0; + } +} +void vaddrelu_better( + const std::shared_ptr< + const paddle::operators::math::jitkernel::VAddKernel>& vadd, + const std::shared_ptr< + const paddle::operators::math::jitkernel::VReluKernel>& vrelu, + const float* x, const float* y, float* z) { + vadd->Compute(x, y, z); + vrelu->Compute(z, z); +} + +TEST(JitKernel, vaddrelu) { + namespace jit = paddle::operators::math::jitkernel; + for (int d : {7, 8, 15, 16, 30, 256, 512}) { + std::vector x(d), y(d); + std::vector zref(d), ztgt(d); + RandomVec(d, x.data()); + RandomVec(d, y.data()); + const auto& ker = + jit::KernelPool::Instance().template Get>(d); + const auto& vadd = + jit::KernelPool::Instance().template Get>(d); + const auto& vrelu = + jit::KernelPool::Instance().template Get>(d); + const float* x_data = x.data(); + const float* y_data = y.data(); + float* ztgt_data = ztgt.data(); + float* zref_data = zref.data(); + auto trefs = GetCurrentUS(); + for (int i = 0; i < repeat; ++i) { + vadd_ref(d, x_data, y_data, zref_data); + } + auto trefe = GetCurrentUS(); + auto tmkls = GetCurrentUS(); + for (int i = 0; i < repeat; ++i) { + vaddrelu_better(vadd, vrelu, x_data, y_data, zref_data); + } + auto tmkle = GetCurrentUS(); + auto ttgts = GetCurrentUS(); + for (int i = 0; i < repeat; ++i) { + ker->Compute(x_data, y_data, ztgt_data); + } + auto ttgte = GetCurrentUS(); + VLOG(3) << "Vec size " << d << ": refer takes: " << (trefe - trefs) / repeat + << " us, better takes: " << (tmkle - tmkls) / repeat << " us, " + << "tgt takes: " << (ttgte - ttgts) / repeat; + for (int i = 0; i < d; ++i) { + EXPECT_NEAR(ztgt_data[i], zref_data[i], 1e-3); + } + } +} + TEST(JitKernel, pool) { namespace jit = paddle::operators::math::jitkernel; const int frame_size = 4; From 726b91e471c15ce8caba16f1edfebccfd06a5589 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Fri, 19 Oct 2018 15:11:41 +0800 Subject: [PATCH 06/53] update --- cmake/generic.cmake | 7 +++++++ paddle/fluid/pybind/CMakeLists.txt | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 5bf82b4ddf..a610c7964c 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -261,6 +261,13 @@ function(cc_library TARGET_NAME) add_dependencies(${TARGET_NAME} mklml) target_link_libraries(${TARGET_NAME} "-L${MKLML_LIB_DIR} -liomp5 -Wl,--as-needed") endif() + # remove link to python, see notes at: + # https://github.com/pybind/pybind11/blob/master/docs/compiling.rst#building-manually + if("${cc_library_DEPS};" MATCHES "python;") + list(REMOVE_ITEM cc_library_DEPS python) + add_dependencies(${TARGET_NAME} python) + target_link_libraries(${TARGET_NAME} "-Wl,-undefined,dynamic_lookup") + endif() target_link_libraries(${TARGET_NAME} ${cc_library_DEPS}) add_dependencies(${TARGET_NAME} ${cc_library_DEPS}) endif() diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 04fe579a66..e7f634c4a6 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -1,5 +1,5 @@ -set(PYBIND_DEPS pybind proto_desc memory executor prune feed_fetch_method pass_builder) +set(PYBIND_DEPS pybind python proto_desc memory executor prune feed_fetch_method pass_builder) set(PYBIND_SRCS pybind.cc exception.cc protobuf.cc const_value.cc) if(NOT WIN32) list(APPEND PYBIND_DEPS parallel_executor profiler) From 8e0b9496de28c0c858c1876831d0117d9f5b110a Mon Sep 17 00:00:00 2001 From: Dang Qingqing Date: Fri, 19 Oct 2018 17:06:45 +0800 Subject: [PATCH 07/53] Fix unit test test=develop --- paddle/fluid/operators/detection/generate_proposals_op.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cu b/paddle/fluid/operators/detection/generate_proposals_op.cu index efeeecf721..91213b3c4d 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cu +++ b/paddle/fluid/operators/detection/generate_proposals_op.cu @@ -418,7 +418,7 @@ class CUDAGenerateProposalsKernel : public framework::OpKernel { T *rpn_rois_data = rpn_rois->data(); T *rpn_roi_probs_data = rpn_roi_probs->data(); - auto &place = boost::get(dev_ctx.GetPlace()); + auto place = boost::get(dev_ctx.GetPlace()); int64_t num_proposals = 0; std::vector offset(1, 0); From 339e655aeccba6bb109b3ec854e3a57296f558b5 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 19 Oct 2018 16:03:06 +0800 Subject: [PATCH 08/53] refine and add seqconv elementwiseadd relu op test --- .../fusion_seqconv_eltadd_relu_op.cc | 40 ++++---- .../test_fusion_seqconv_eltadd_relu_op.py | 94 ++++++++++++++++++ .../fluid/tests/unittests/test_seq_conv.py | 99 +++++++++---------- 3 files changed, 164 insertions(+), 69 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_fusion_seqconv_eltadd_relu_op.py diff --git a/paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.cc b/paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.cc index efeb18e161..b0910dc19e 100644 --- a/paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.cc +++ b/paddle/fluid/operators/fusion_seqconv_eltadd_relu_op.cc @@ -40,6 +40,7 @@ void FusionSeqConvEltAddReluOp::InferShape( auto x_dims = ctx->GetInputDim("X"); auto w_dims = ctx->GetInputDim("Filter"); + int context_length = ctx->Attrs().Get("contextLength"); PADDLE_ENFORCE( ctx->Attrs().Get("contextStride") == 1, "Currently, FusionSeqConvEltAddReluOp only supports contextStride=1."); @@ -47,10 +48,11 @@ void FusionSeqConvEltAddReluOp::InferShape( "Input(X, Filter) should be 2-D tensor."); PADDLE_ENFORCE(x_dims.size() == 2 && w_dims.size() == 2, "Input(X, Filter) should be 2-D tensor."); - PADDLE_ENFORCE( - w_dims[0] == ctx->Attrs().Get("contextLength") * x_dims[1], - "Filter's height should be context_length * " - "input_hidden_size ."); + PADDLE_ENFORCE(w_dims[0] == context_length * x_dims[1], + "Filter's height should be context_length * " + "input_hidden_size ."); + PADDLE_ENFORCE_GT(context_length + ctx->Attrs().Get("contextStart"), 0, + "contextStart size should be smaller than contextLength."); ctx->SetOutputDim("Out", {x_dims[0], w_dims[1]}); ctx->SetOutputDim("ColMat", {x_dims[0], w_dims[0]}); @@ -156,9 +158,8 @@ class FusionSeqConvEltAddReluKernel : public framework::OpKernel { T* dst_data = col_data + st * col_mat_w; int seq_len = ed - st; if (seq_len > up_pad + down_pad) { - // zero all up_pad + // zero all up_pad and fill data std::memset(dst_data, 0, up_pad * col_mat_w_sz); - // fill up_pad data dst_data = dst_data + up_pad * src_mat_w; int copy_size = col_mat_w_sz - up_pad * src_mat_w_sz; for (int j = 0; j < up_pad; ++j) { @@ -173,9 +174,8 @@ class FusionSeqConvEltAddReluKernel : public framework::OpKernel { dst_data += col_mat_w; src_data += src_mat_w; } - // zero all down_pad + // zero all down_pad and fill data std::memset(dst_data, 0, down_pad * col_mat_w_sz); - // fill down_pad data copy_size -= src_mat_w_sz; for (int j = 0; j < down_pad; ++j) { std::memcpy(dst_data, src_data, copy_size); @@ -186,27 +186,29 @@ class FusionSeqConvEltAddReluKernel : public framework::OpKernel { } else { PADDLE_ENFORCE_GE(context_length, up_pad + down_pad + 1); std::memset(dst_data, 0, seq_len * col_mat_w_sz); + dst_data = dst_data + up_pad * src_mat_w; int zero_sz = up_pad * src_mat_w_sz; - int seq_len_size = seq_len * src_mat_w_sz; + int cur_src_sz = seq_len * src_mat_w_sz; for (int j = 0; j < std::min(up_pad, seq_len); ++j) { - int copy_size = std::min(seq_len_size, col_mat_w_sz - zero_sz); - std::memcpy(dst_data + zero_sz / sizeof(T), src_data, copy_size); - dst_data += col_mat_w; + int copy_size = std::min(cur_src_sz, col_mat_w_sz - zero_sz); + std::memcpy(dst_data, src_data, copy_size); + dst_data += (col_mat_w - src_mat_w); zero_sz -= src_mat_w_sz; } + // from bottom + dst_data = col_data + ed * col_mat_w; + src_data = x_data + st * src_mat_w; zero_sz = down_pad * src_mat_w_sz; - dst_data = col_data + (ed - 1) * col_mat_w; - src_data = x_data + (ed - up_pad - 1) * src_mat_w; - for (int j = 0; j < std::min(0, seq_len - up_pad); ++j) { - int copy_size = std::min(seq_len_size, col_mat_w_sz - zero_sz); - std::memcpy(dst_data, src_data, copy_size); + for (int j = 1; j <= std::min(down_pad, seq_len); ++j) { + int copy_size = std::min(cur_src_sz, col_mat_w_sz - zero_sz); + std::memcpy(dst_data - (zero_sz + copy_size) / sizeof(T), + src_data + std::max(seq_len - j - up_pad, 0) * src_mat_w, + copy_size); dst_data -= col_mat_w; - src_data += src_mat_w; zero_sz -= src_mat_w_sz; } } } - auto& dev_ctx = ctx.template device_context(); auto blas = math::GetBlas(dev_ctx); math::FCCompute(blas, x_dims[0], w_dims[1], w_dims[0], diff --git a/python/paddle/fluid/tests/unittests/test_fusion_seqconv_eltadd_relu_op.py b/python/paddle/fluid/tests/unittests/test_fusion_seqconv_eltadd_relu_op.py new file mode 100644 index 0000000000..ba6f1415b1 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_fusion_seqconv_eltadd_relu_op.py @@ -0,0 +1,94 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import random +from op_test import OpTest +from test_seq_conv import seqconv + + +class TestSeqConvEltAddRelu(OpTest): + def set_conf(self): + pass + + def setUp(self): + self.op_type = 'fusion_seqconv_eltadd_relu' + self.lod = [[6, 4]] + self.in_fea_size = 16 + self.out_fea_size = 8 + self.context_length = 4 + self.context_stride = 1 + self.context_start = 0 + self.set_conf() + + assert self.context_stride == 1 + + T = sum(self.lod[0]) + x = np.random.uniform(-1, 1, [T, self.in_fea_size]).astype('float32') + w = np.random.uniform( + -1, 1, [self.in_fea_size * self.context_length, + self.out_fea_size]).astype('float32') + b = np.random.uniform(-2, 1, [1, self.out_fea_size]).astype('float32') + out = seqconv(x, self.lod, w, self.context_length, self.context_start) + out = np.maximum(out + b, 0) + + self.inputs = {'X': (x, self.lod), 'Filter': w, 'Bias': b} + self.attrs = { + 'contextStart': self.context_start, + 'contextLength': self.context_length, + 'contextStride': self.context_stride + } + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + + +class TestSeqConvEltAddReluBS1(TestSeqConvEltAddRelu): + def set_conf(self): + self.lod = [[10]] + + +class TestSeqConvEltAddReluBS1Case2(TestSeqConvEltAddRelu): + def set_conf(self): + self.lod = [[2]] + + +class TestSeqConvEltAddReluCase1(TestSeqConvEltAddRelu): + def set_conf(self): + self.lod = [[3, 5, 1, 6]] + self.context_length = 3 + self.context_start = -2 + + +class TestSeqConvEltAddReluCase2(TestSeqConvEltAddRelu): + def set_conf(self): + self.lod = [[10, 1, 2, 4, 1, 5, 6]] + self.in_fea_size = 2 + self.context_length = 4 + self.context_start = -1 + + +class TestSeqConvEltAddReluCase3(TestSeqConvEltAddRelu): + def set_conf(self): + self.lod = [[10, 1, 2, 4, 1, 5, 6]] + self.context_length = 5 + self.context_start = -4 + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_seq_conv.py b/python/paddle/fluid/tests/unittests/test_seq_conv.py index dcc86382e5..2285e94967 100644 --- a/python/paddle/fluid/tests/unittests/test_seq_conv.py +++ b/python/paddle/fluid/tests/unittests/test_seq_conv.py @@ -20,6 +20,53 @@ import random from op_test import OpTest +def seqconv(x, + lod, + filter, + context_length, + context_start, + padding_trainable=False, + padding_data=None): + [T, M] = x.shape + col = np.zeros((T, context_length * M)).astype('float32') + offset = [0] + for seq_len in lod[0]: + offset.append(offset[-1] + seq_len) + begin_pad = np.max([0, -context_start]) + for i in range(len(offset) - 1): + for j in range(context_length): + in_begin = offset[i] + context_start + j + in_end = offset[i + 1] + context_start + j + out_begin = offset[i] + out_end = offset[i + 1] + if in_begin < offset[i]: + pad_size = np.min( + [offset[i] - in_begin, offset[i + 1] - offset[i]]) + if padding_trainable: + sub_w = padding_data[j:j + pad_size, :] + col[offset[i]:offset[i] + pad_size, j * M:(j + 1) * + M] = sub_w + out_begin = offset[i] + pad_size + in_begin = offset[i] + + if in_end > offset[i + 1]: + pad_size = np.min( + [in_end - offset[i + 1], offset[i + 1] - offset[i]]) + if padding_trainable: + sub_w = padding_data[begin_pad + context_start + j - + pad_size:begin_pad + context_start + + j, :] + col[offset[i + 1] - pad_size:offset[i + 1], j * M:(j + 1) * + M] = sub_w + in_end = offset[i + 1] + out_end = offset[i + 1] - pad_size + if in_end <= in_begin: + continue + in_sub = x[in_begin:in_end, :] + col[out_begin:out_end, j * M:(j + 1) * M] += in_sub + return np.dot(col, filter) + + class TestSeqProject(OpTest): def setUp(self): self.init_test_case() @@ -66,57 +113,9 @@ class TestSeqProject(OpTest): 'paddingTrainable': self.padding_trainable, 'contextStride': self.context_stride } - out = np.zeros( - (self.input_size[0], self.output_represention)).astype('float32') + out = seqconv(x, self.lod, w, self.context_length, self.context_start, + self.padding_trainable, self.pad_data) self.outputs = {'Out': out} - self.compute() - - def compute(self): - x, lod = self.inputs['X'] - filter = self.inputs['Filter'] - pading_data = self.pad_data - out = np.zeros((self.input_size[0], self.context_length * - self.input_size[1])).astype('float32') - offset = [0] - for seq_len in lod[0]: - offset.append(offset[-1] + seq_len) - begin_pad = np.max([0, -self.context_start]) - - for i in range(len(offset) - 1): - for j in range(self.context_length): - in_begin = offset[i] + self.context_start + j - in_end = offset[i + 1] + self.context_start + j - out_begin = offset[i] - out_end = offset[i + 1] - if in_begin < offset[i]: - pad_size = np.min( - [offset[i] - in_begin, offset[i + 1] - offset[i]]) - if self.padding_trainable: - sub_w = pading_data[j:j + pad_size, :] - out[offset[i]:offset[i] + pad_size, j * self.input_size[ - 1]:(j + 1) * self.input_size[1]] = sub_w - out_begin = offset[i] + pad_size - in_begin = offset[i] - - if in_end > offset[i + 1]: - pad_size = np.min( - [in_end - offset[i + 1], offset[i + 1] - offset[i]]) - if self.padding_trainable: - sub_w = pading_data[begin_pad + self.context_start + j - - pad_size:begin_pad + - self.context_start + j, :] - out[offset[i + 1] - pad_size:offset[i + 1], j * self. - input_size[1]:(j + 1) * self.input_size[1]] = sub_w - in_end = offset[i + 1] - out_end = offset[i + 1] - pad_size - if in_end <= in_begin: - continue - - in_sub = x[in_begin:in_end, :] - out[out_begin:out_end, j * self.input_size[1]:(j + 1) * - self.input_size[1]] += in_sub - - np.dot(out, filter, out=self.outputs['Out']) def test_check_output(self): self.check_output() From 603ba5e01d71cf237e6506ea1e83a4d52a3c0ccc Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 19 Oct 2018 22:38:41 +0800 Subject: [PATCH 09/53] add seqconv eltadd relu pass --- paddle/fluid/framework/ir/CMakeLists.txt | 1 + .../framework/ir/graph_pattern_detector.cc | 50 +++++++++ .../framework/ir/graph_pattern_detector.h | 25 +++++ .../ir/seqconv_eltadd_relu_fuse_pass.cc | 101 ++++++++++++++++++ .../ir/seqconv_eltadd_relu_fuse_pass.h | 38 +++++++ paddle/fluid/inference/analysis/analyzer.h | 23 ++-- 6 files changed, 227 insertions(+), 11 deletions(-) create mode 100644 paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.cc create mode 100644 paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.h diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index abab290e7d..d2429d5b20 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -37,6 +37,7 @@ pass_library(embedding_fc_lstm_fuse_pass inference) pass_library(fc_gru_fuse_pass inference) pass_library(seq_concat_fc_fuse_pass inference) pass_library(conv_bn_fuse_pass inference) +pass_library(seqconv_eltadd_relu_fuse_pass inference) if(WITH_MKLDNN) pass_library(mkldnn_placement_pass base) pass_library(conv_relu_mkldnn_fuse_pass inference) diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 4664953c63..0674670971 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -349,6 +349,11 @@ PDNode *PDNode::assert_is_op() { return this; } +// PDNode *PDNode::assert_op_attr() { +// asserts_.emplace_back([](Node *x) { return x && x->IsOp(); }); +// return this; +// } + PDNode *PDNode::assert_is_op(const std::string &op_type) { asserts_.emplace_back([op_type](Node *x) { return x && x->IsOp() && x->Op()->Type() == op_type; @@ -761,6 +766,51 @@ PDNode *patterns::ConvReLU::operator()( return relu_out_var; } +PDNode *patterns::SeqConvEltAddRelu::operator()( + paddle::framework::ir::PDNode *seqconv_input) { + // Create Operators + seqconv_input->assert_is_op_input("sequence_conv", "X"); + auto *seqconv_op = + pattern->NewNode(seqconv_repr())->assert_is_op("sequence_conv"); + // ->assert_op_attr("paddingTrainable", false) + // ->assert_op_attr("contextStride", 1) + + auto *eltadd_op = + pattern->NewNode(eltadd_repr())->assert_is_op("elementwise_add"); + auto *relu_op = pattern->NewNode(relu_repr())->assert_is_op("relu"); + // Create variables + // Filter + auto *seqconv_weight_var = + pattern->NewNode(seqconv_weight_repr()) + ->AsInput() + ->assert_is_persistable_var() + ->assert_is_op_input("sequence_conv", "Filter"); + // Bias + auto *eltadd_bias_var = pattern->NewNode(eltadd_bias_repr()) + ->AsInput() + ->assert_is_op_input("elementwise_add"); + // intermediate variable, will be removed in the IR after fuse. + auto *seqconv_out_var = pattern->NewNode(seqconv_out_repr()) + ->AsIntermediate() + ->assert_is_only_output_of_op("sequence_conv") + ->assert_is_op_input("elementwise_add"); + auto *eltadd_out_var = pattern->NewNode(eltadd_out_repr()) + ->AsIntermediate() + ->assert_is_only_output_of_op("elementwise_add") + ->assert_is_only_input_of_op("relu"); + // output + auto *relu_out_var = pattern->NewNode(relu_out_repr()) + ->AsOutput() + ->assert_is_op_output("relu"); + + seqconv_op->LinksFrom({seqconv_input, seqconv_weight_var}) + .LinksTo({seqconv_out_var}); + eltadd_op->LinksFrom({seqconv_out_var, eltadd_bias_var}) + .LinksTo({eltadd_out_var}); + relu_op->LinksFrom({eltadd_out_var}).LinksTo({relu_out_var}); + return relu_out_var; +} + PDNode *patterns::FC::operator()(paddle::framework::ir::PDNode *x, bool with_bias) { // Create shared nodes. diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index cdd6413d96..558eea353d 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -434,6 +434,31 @@ struct ConvReLU : public PatternBase { PATTERN_DECL_NODE(relu_out); }; +// SEQCONV with Elementwise_Add ReLU +// op: seqconv + elementwise_add + relu +// named nodes: +// seqconv_input, seqconv_weight, +// seqconv_out, seqconv, +// elementwise_add_bias, elementwise_add_out, elementwise_add +// relu_out, relu +struct SeqConvEltAddRelu : public PatternBase { + SeqConvEltAddRelu(PDPattern* pattern, const std::string& name_scope) + : PatternBase(pattern, name_scope, "seqconv_eltadd_relu") {} + + PDNode* operator()(PDNode* seqconv_input); + + // declare operator node's name + PATTERN_DECL_NODE(seqconv); + PATTERN_DECL_NODE(eltadd); + PATTERN_DECL_NODE(relu); + // declare variable node's name + PATTERN_DECL_NODE(seqconv_weight); + PATTERN_DECL_NODE(seqconv_out); + PATTERN_DECL_NODE(eltadd_bias); + PATTERN_DECL_NODE(eltadd_out); + PATTERN_DECL_NODE(relu_out); +}; + // FC with bias // op: mul + elementwise_add // named nodes: diff --git a/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.cc b/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.cc new file mode 100644 index 0000000000..0a1f65d274 --- /dev/null +++ b/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.cc @@ -0,0 +1,101 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.h" +#include +#include "paddle/fluid/framework/lod_tensor.h" + +namespace paddle { +namespace framework { +namespace ir { + +int BuildFusion(Graph* graph, const std::string& name_scope, Scope* scope) { + GraphPatternDetector gpd; + auto* pattern = gpd.mutable_pattern(); + + PDNode* x = pattern->NewNode(patterns::PDNodeName(name_scope, "X")) + ->assert_is_op_input("sequence_conv") + ->assert_var_not_persistable(); + patterns::SeqConvEltAddRelu fuse_pattern(pattern, name_scope); + fuse_pattern(x); + + // Create New OpDesc + auto fuse_creator = [&](Node* seqconv, Node* input, Node* seqconv_weight, + Node* eltadd_bias, Node* relu_out) { + OpDesc op_desc; + op_desc.SetType("fusion_seqconv_eltadd_relu"); + op_desc.SetInput("X", {input->Name()}); + op_desc.SetInput("Filter", {seqconv_weight->Name()}); + op_desc.SetInput("Bias", {eltadd_bias->Name()}); + op_desc.SetAttr("contextLength", seqconv->Op()->GetAttr("contextLength")); + op_desc.SetAttr("contextStart", seqconv->Op()->GetAttr("contextStart")); + op_desc.SetAttr("contextStride", seqconv->Op()->GetAttr("contextStride")); + PADDLE_ENFORCE(graph->Has(kParamScopeAttr)); + auto* scope = graph->Get(kParamScopeAttr); + const std::string ColMat = patterns::UniqueKey("SeqConvColMat"); + op_desc.SetOutput("ColMat", {ColMat}); + op_desc.SetOutput("Out", {relu_out->Name()}); + scope->Var(ColMat)->GetMutable(); + + auto* op = graph->CreateOpNode(&op_desc); + IR_NODE_LINK_TO(input, op); + IR_NODE_LINK_TO(seqconv_weight, op); + IR_NODE_LINK_TO(eltadd_bias, op); + IR_NODE_LINK_TO(op, relu_out); + return op; + }; + + int fusion_count{0}; + + auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, + Graph* g) { + VLOG(4) << "handle SeqConv EltAdd Relu fuse"; + GET_IR_NODE_FROM_SUBGRAPH(seqconv, seqconv, fuse_pattern); + GET_IR_NODE_FROM_SUBGRAPH(seqconv_weight, seqconv_weight, fuse_pattern); + GET_IR_NODE_FROM_SUBGRAPH(seqconv_out, seqconv_out, fuse_pattern); + GET_IR_NODE_FROM_SUBGRAPH(eltadd, eltadd, fuse_pattern); + GET_IR_NODE_FROM_SUBGRAPH(eltadd_bias, eltadd_bias, fuse_pattern); + GET_IR_NODE_FROM_SUBGRAPH(eltadd_out, eltadd_out, fuse_pattern); + GET_IR_NODE_FROM_SUBGRAPH(relu, relu, fuse_pattern); + GET_IR_NODE_FROM_SUBGRAPH(relu_out, relu_out, fuse_pattern); + + fuse_creator(seqconv, subgraph.at(x), seqconv_weight, eltadd_bias, + relu_out); + std::unordered_set marked_nodes( + {seqconv, seqconv_out, eltadd, eltadd_out, relu}); + GraphSafeRemoveNodes(graph, marked_nodes); + ++fusion_count; + }; + + gpd(graph, handler); + + return fusion_count; +} + +std::unique_ptr SeqConvEltAddReluFusePass::ApplyImpl( + std::unique_ptr graph) const { + FusePassBase::Init(name_scope_, graph.get()); + + int fusion_count = BuildFusion(graph.get(), name_scope_, param_scope()); + AddStatis(fusion_count); + + return graph; +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(seqconv_eltadd_relu_fuse_pass, + paddle::framework::ir::SeqConvEltAddReluFusePass); diff --git a/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.h b/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.h new file mode 100644 index 0000000000..dac9de7193 --- /dev/null +++ b/paddle/fluid/framework/ir/seqconv_eltadd_relu_fuse_pass.h @@ -0,0 +1,38 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "paddle/fluid/framework/ir/fuse_pass_base.h" +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/graph_pattern_detector.h" + +namespace paddle { +namespace framework { +namespace ir { + +class SeqConvEltAddReluFusePass : public FusePassBase { + public: + virtual ~SeqConvEltAddReluFusePass() {} + + protected: + std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + + const std::string name_scope_{"seqconv_eltadd_relu_fuse"}; +}; + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/analyzer.h b/paddle/fluid/inference/analysis/analyzer.h index 6f45c6bf7e..84d622fa76 100644 --- a/paddle/fluid/inference/analysis/analyzer.h +++ b/paddle/fluid/inference/analysis/analyzer.h @@ -67,17 +67,18 @@ class Analyzer : public OrderedRegistry { // larger fusion. const std::vector all_ir_passes_{{ // Manual update the passes here. - "infer_clean_graph_pass", // - "attention_lstm_fuse_pass", // - "embedding_fc_lstm_fuse_pass", // - "fc_lstm_fuse_pass", // - "mul_lstm_fuse_pass", // - "fc_gru_fuse_pass", // - "mul_gru_fuse_pass", // - "seq_concat_fc_fuse_pass", // - "fc_fuse_pass", // - "conv_bn_fuse_pass", // - "conv_eltwiseadd_bn_fuse_pass", // + "infer_clean_graph_pass", // + "attention_lstm_fuse_pass", // + "seqconv_eltadd_relu_fuse_pass", // + "embedding_fc_lstm_fuse_pass", // + "fc_lstm_fuse_pass", // + "mul_lstm_fuse_pass", // + "fc_gru_fuse_pass", // + "mul_gru_fuse_pass", // + "seq_concat_fc_fuse_pass", // + "fc_fuse_pass", // + "conv_bn_fuse_pass", // + "conv_eltwiseadd_bn_fuse_pass", // #ifdef PADDLE_WITH_MKLDNN "conv_relu_mkldnn_fuse_pass", // #endif From 40f8456a4fe8ea3077e79e68cb157da715175bf6 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Sun, 21 Oct 2018 01:41:05 +0800 Subject: [PATCH 10/53] refine fuse pattern and attr test=develop --- paddle/fluid/framework/ir/graph_pattern_detector.cc | 13 ++++--------- paddle/fluid/framework/ir/graph_pattern_detector.h | 9 +++++++++ .../tests/api/analyzer_seq_conv1_tester.cc | 8 +++++++- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 2de9bd9a05..51fd390c4d 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -349,11 +349,6 @@ PDNode *PDNode::assert_is_op() { return this; } -// PDNode *PDNode::assert_op_attr() { -// asserts_.emplace_back([](Node *x) { return x && x->IsOp(); }); -// return this; -// } - PDNode *PDNode::assert_is_op(const std::string &op_type) { asserts_.emplace_back([op_type](Node *x) { return x && x->IsOp() && x->Op()->Type() == op_type; @@ -770,10 +765,10 @@ PDNode *patterns::SeqConvEltAddRelu::operator()( paddle::framework::ir::PDNode *seqconv_input) { // Create Operators seqconv_input->assert_is_op_input("sequence_conv", "X"); - auto *seqconv_op = - pattern->NewNode(seqconv_repr())->assert_is_op("sequence_conv"); - // ->assert_op_attr("paddingTrainable", false) - // ->assert_op_attr("contextStride", 1) + auto *seqconv_op = pattern->NewNode(seqconv_repr()) + ->assert_is_op("sequence_conv") + ->assert_op_attr("paddingTrainable", false) + ->assert_op_attr("contextStride", 1); auto *eltadd_op = pattern->NewNode(eltadd_repr())->assert_is_op("elementwise_add"); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index 640b46eef5..58a1cbf316 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -128,6 +128,15 @@ struct PDNode { const std::unordered_set& op_types, const std::string& argument, int nth); + template + PDNode* assert_op_attr(const std::string& attr_name, const T& attr) { + asserts_.emplace_back([=](Node* x) { + return x && x->IsOp() && x->Op()->HasAttr(attr_name) && + boost::get(x->Op()->GetAttr(attr_name)) == attr; + }); + return this; + } + private: PDNode(PDPattern* pattern, const std::string& name = "", Type type = Type::kVar) diff --git a/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc index cb4671c437..f590ef2796 100644 --- a/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc @@ -183,7 +183,13 @@ TEST(Analyzer_seq_conv1, fuse_statis) { SetConfig(&cfg); int num_ops; auto predictor = CreatePaddlePredictor(cfg); - GetFuseStatis(predictor.get(), &num_ops); + + auto fuse_statis = GetFuseStatis(predictor.get(), &num_ops); + ASSERT_TRUE(fuse_statis.count("fc_fuse")); + ASSERT_TRUE(fuse_statis.count("seqconv_eltadd_relu_fuse")); + EXPECT_EQ(fuse_statis.at("fc_fuse"), 2); + EXPECT_EQ(fuse_statis.at("seqconv_eltadd_relu_fuse"), 6); + EXPECT_EQ(num_ops, 32); } // Compare result of NativeConfig and AnalysisConfig From 9ce343f868f9c92ba6d02622ad9cbf3c3296d014 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Mon, 10 Sep 2018 14:45:06 +0200 Subject: [PATCH 11/53] MKLDNN conv + elementwise_add fusion: initial implementation of patterns --- .../mkldnn_conv_elementwise_add_fuse_pass.cc | 174 ++++++++++++++++++ .../mkldnn_conv_elementwise_add_fuse_pass.h | 24 +++ 2 files changed, 198 insertions(+) create mode 100644 paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.cc create mode 100644 paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.h diff --git a/paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.cc new file mode 100644 index 0000000000..52d8f5fec5 --- /dev/null +++ b/paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.cc @@ -0,0 +1,174 @@ +#include "paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.h" + +namespace paddle { +namespace framework { +namespace ir { +namespace patterns { + +struct PatternNode { + PatternNode(PDPattern* pattern, + const std::string& name, + const std::string& name_scope, + const std::string& repr, + size_t id) + : nodeName{PDNodeName(name_scope, repr, id, name)} + , node{pattern->RetrieveNode(nodeName) + { } + + std::string name() { return nodeName }; + PDNode* node() { return node }; + + private: + std::string nodeName; + PDNode* node; +}; +/* + +struct Conv : public PatternBase { + Conv(PDPattern* pattern, const std::string& name_scope) + : PatternBase{pattern, name_scope, "conv"} + , conv{pattern, "conv", name_scope_, repr_, id_} + , input{pattern, "Input", name_scope_, repr_, id_} + , filter{pattern, "Filter", name_scope_, repr_, id_} + , output{pattern, "Output", node_scope_, repr_ id_} + { } + + private: + PatternNode conv; + PatternNode input; + PatternNode filter; + PatternNode output; + + public: + PDNode* operator()() { + auto conv_op = pattern->NewNode(conv.name()) + ->assert_is_op("conv2d"); + + auto input_var = pattern->NewNode(input.name()) + ->AsInput() + ->assert_is_op_input(conv.name()); + + auto filter_var = pattern->NewNode(filter.name()) + ->AsInput() + ->assert_is_persistable_var() + ->assert_is_op_input(conv.name()); + + auto output_var = patterh->NewNode(output.name()) + ->AsOutput() + ->assert_is_op_output(conv.name()); + + conv_op->LinksFrom({input_var, filter_var}); + conv_op->LinksTo({output_var}; + + return output_var; + } +}; +*/ + +struct Conv : public PatternBase { + Conv(PDPattern* pattern, const std::string& name_scope) + : PatternBase{pattern, name_scope, "conv"} + { } + + std::string conv_name() { return PDNodeName(name_scope_, repr_, id_, "conv2d"); } + PDNode* conv_node() { return pattern->RetrieveNode(conv_name()); } + + std::string input_name() { return PDNodeName(name_scope, repr_, id_, "Input"); } + PDNode* input_node() { return pattern->RetrieveNode(input_name()); } + + std::string filter_name() { return PDNodeName(name_scope_, repr_, id_, "Filter"); } + PDNode* filter_node() { return pattern->RetrieveNode(filter_name()); } + + std::string output_name() { return PDNodeName(name_scope, repr_, id_, "Output"); } + PDNode* output_node() { return pattern->RetrieveNode(output_name()); } + + PDNode* operator()() { + auto conv_op = pattern->NewNode(conv_name()) + ->assert_is_op("conv2d"); + + auto input_var = pattern->NewNode(input_name()) + ->AsInput() + ->assert_is_op_input(conv_name()); + + auto filter_var = pattern->NewNode(filter_name()) + ->AsInput() + ->assert_is_persistable_var() + ->assert_is_op_input(conv_name()); + + auto output_var = patterh->NewNode(output_name()) + ->AsOutput() + ->assert_is_op_output(conv_name()); + + conv_op->LinksFrom({input_var, filter_var}); + conv_op->LinksTo({output_var}; + + return output_var; + } +}; + +struct ElementwiseAdd : public PatternBase { + Conv(PDPattern* pattern, const std::string& name_scope) + : PatternBase{pattern, name_scope, "elementwise_add"} + { } + + std::string elementwise_add_name() { return PDNodeName(name_scope_, repr_, id_, "elementwise_add"); } + PDNode* elementwise_add_node() { return pattern->RetrieveNode(elementwise_add_name()); } + + std::string x_name() { return PDNodeName(name_scope, repr_, id_, "X"); } + PDNode* x_node() { return pattern->RetrieveNode(x_name()); } + + std::string y_name() { return PDNodeName(name_scope_, repr_, id_, "Y"); } + PDNode* y_node() { return pattern->RetrieveNode(y_name()); } + + std::string out_name() { return PDNodeName(name_scope, repr_, id_, "Out"); } + PDNode* out_node() { return pattern->RetrieveNode(out_name()); } + + PDNode* operator()(PDNode* conv_output) { + auto elementwise_add_op = pattern->NewNode(conv_name()) + ->assert_is_op("elementwise_add"); + + auto x_var = pattern->NewNode(x_name()) + ->AsInput() + ->assert_is_op_input(elementwise_add_name()); + + conv_output->assert_is_op_input(elementwise_add_name(), y_name()); +// auto y_var = pattern->NewNode(y_name()) +// ->AsInput() +// ->assert_is_op_input(elementwise_add_name()); + + auto out_var = pattern->NewNode(out_name()) + ->AsOutput() + ->assert_is_op_output(elementwise_add_name()); + + conv_op->LinksFrom({x_var, conv_output}); + conv_op->LinksTo({out_var}; + + return out_var; + } +}; + + +} // namespace patterns + +using graph_ptr = std::unique_ptr; + +graph_ptr MKLDNNConvElementwiseAddFusePass::ApplyImpl(graph_ptr) const { + FusePassBase::Init("mkldnn_conv_elementwise_add_fuse", graph.get()); + + GraphPatternDetector gpd; + auto pattern = gpd.mutable_pattern(); + + patterns::Conv conv_pattern(pattern, name_scope_); + auto conv_output = conv_pattern(); + conv_output->AsIntermediate(); + + patterns::ElementwiseAdd elementwise_add_pattern(pattern, name_scope_); + auto elementwis_add_output = elementwise_add_pattern(conv_output); + + +} + + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.h b/paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.h new file mode 100644 index 0000000000..3aa594ae66 --- /dev/null +++ b/paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.h @@ -0,0 +1,24 @@ +#pragma once + +#include +#include "paddle/fluid/framework/ir/fuse_pass_base.h" +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/graph_pattern_detector.h" + +namespace paddle { +namespace framework { +namespace ir { + +class MKLDNNConvElementwiseAddFusePass : public FusePassBase { + public: + virtual ~FCGRUFusePass() {} + + protected: + std::unique_ptr ApplyImpl(std::unique_ptr graph) const; + + const std::string name_scope_{"mkldnn_conv_elementwise_add_fuse"}; +}; + +} // namespace ir +} // namespace framework +} // namespace paddle From 604bad08bca2ce0903251fa5d33de57c8ab745a2 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Wed, 12 Sep 2018 01:30:15 +0200 Subject: [PATCH 12/53] MKLDNN conv + elementwise_add fusion: implementation of patterns refarctored, applied to graph. UTs added --- paddle/fluid/framework/ir/CMakeLists.txt | 4 + .../conv_elementwise_add_mkldnn_fuse_pass.cc | 178 ++++++++++++++++++ ...> conv_elementwise_add_mkldnn_fuse_pass.h} | 6 +- ...elementwise_add_mkldnn_fuse_pass_tester.cc | 81 ++++++++ .../mkldnn_conv_elementwise_add_fuse_pass.cc | 174 ----------------- 5 files changed, 266 insertions(+), 177 deletions(-) create mode 100644 paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc rename paddle/fluid/framework/ir/{mkldnn_conv_elementwise_add_fuse_pass.h => conv_elementwise_add_mkldnn_fuse_pass.h} (69%) create mode 100644 paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc delete mode 100644 paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.cc diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index 929a388573..0f46e16201 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -44,6 +44,9 @@ if(WITH_MKLDNN) endif() cc_library(fuse_elewise_add_act_pass SRCS fuse_elewise_add_act_pass.cc DEPS pass graph_pattern_detector ) +if(WITH_MKLDNN) + pass_library(conv_elementwise_add_mkldnn_fuse_pass inference) +endif() set(GLOB_PASS_LIB ${PASS_LIBRARY} CACHE INTERNAL "Global PASS library") @@ -57,4 +60,5 @@ cc_test(test_graph_pattern_detector SRCS graph_pattern_detector_tester.cc DEPS g cc_test(test_fc_fuse_pass SRCS fc_fuse_pass_tester.cc DEPS fc_fuse_pass framework_proto) if (WITH_MKLDNN) cc_test(test_conv_relu_mkldnn_fuse_pass SRCS conv_relu_mkldnn_fuse_pass_tester.cc DEPS conv_relu_mkldnn_fuse_pass) + cc_test(test_conv_elementwise_add_mkldnn_fuse_pass SRCS conv_elementwise_add_mkldnn_fuse_pass_tester.cc DEPS conv_elementwise_add_mkldnn_fuse_pass) endif () diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc new file mode 100644 index 0000000000..973cd73e48 --- /dev/null +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -0,0 +1,178 @@ +#include "paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h" + +namespace paddle { +namespace framework { +namespace ir { +namespace patterns { + +struct Pattern : public PatternBase { + Pattern(PDPattern* pattern, const std::string& name_scope) + : PatternBase{pattern, name_scope, ""} + { } + + private: + std::string name_scope() { return name_scope_; } + std::string repr() { return repr_; } + size_t id() { return id_; } + PDPattern* node_pattern() { return pattern; } + + public: + std::string node_name(std::string op_name) + { + return PDNodeName(name_scope(), repr(), id(), op_name); + } + + PDNode* retrieve_node(std::string op_name) + { + return node_pattern()->RetrieveNode(node_name(op_name)); + } + + PDNode* new_node(std::string op_name) + { + return node_pattern()->NewNode(node_name(op_name)); + } +}; + +struct Conv { + std::string conv_name() { return "conv2d"; } + std::string input_name() { return "Input"; } + std::string filter_name() { return "Filter"; } + std::string output_name() { return "Output"; } + + std::function operator()(std::shared_ptr pattern) { + return [&]() -> PDNode* { + auto conv_op = pattern->new_node(conv_name()) + ->assert_is_op("conv2d"); + + auto input_var = pattern->new_node(input_name()) + ->AsInput() + ->assert_is_op_input(conv_name()); + + auto filter_var = pattern->new_node(filter_name()) + ->AsInput() + ->assert_is_persistable_var() + ->assert_is_op_input(conv_name()); + + auto output_var = pattern->new_node(output_name()) + ->AsOutput() + ->assert_is_op_output(conv_name()); + + conv_op->LinksFrom({input_var, filter_var}); + conv_op->LinksTo({output_var}); + + return output_var; + }; + } +}; + +struct ElementwiseAdd { + std::string elementwise_add_name() { return "elementwise_add"; } + std::string x_name() { return "X"; } + std::string y_name() { return "Y"; } + std::string out_name() { return "Out"; } + + std::function operator()(std::shared_ptr pattern) { + return [&](PDNode* conv_output) -> PDNode* { + auto elementwise_add_op = pattern->new_node(elementwise_add_name()) + ->assert_is_op("elementwise_add"); + + auto y_var = pattern->new_node(y_name()) + ->AsInput() + ->assert_is_op_input(elementwise_add_name()); + + conv_output->assert_is_op_input(pattern->node_name(elementwise_add_name()), + pattern->node_name(x_name())); +// auto y_var = pattern->NewNode(y_name()) +// ->AsInput() +// ->assert_is_op_input(elementwise_add_name()); + + auto out_var = pattern->new_node(out_name()) + ->AsOutput() + ->assert_is_op_output( + pattern->node_name(elementwise_add_name())); + + elementwise_add_op->LinksFrom({y_var, conv_output}); + elementwise_add_op->LinksTo({out_var}); + + return out_var; + }; + } +}; +} // namespace patterns + +Node* node_from_subgraph(const GraphPatternDetector::subgraph_t& subgraph, + std::shared_ptr pattern, const std::string& op_name) +{ + PADDLE_ENFORCE(subgraph.count(pattern->retrieve_node(op_name)), + "Node not found for PDNode %s", pattern->node_name(op_name)); + Node* var = subgraph.at(pattern->retrieve_node(op_name)); + PADDLE_ENFORCE(var, "node %s not exists in the sub-graph"); + + return var; +} + +using graph_ptr = std::unique_ptr; + +graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { + FusePassBase::Init("conv_elementwise_add_mkldnn_fuse_pass", graph.get()); + + GraphPatternDetector gpd; + auto pattern = gpd.mutable_pattern(); + + auto pattern_ptr = std::make_shared(pattern, name_scope_); + + patterns::Conv conv_pattern; + auto conv_output = conv_pattern(pattern_ptr)(); + conv_output->AsIntermediate(); + + patterns::ElementwiseAdd elementwise_add_pattern; + elementwise_add_pattern(pattern_ptr)(conv_output); + + auto link_nodes_to = [](Node* a, Node* b) { + a->outputs.push_back(b); + b->inputs.push_back(a); + }; + + auto fuse_conv = [&](Graph* g, Node* conv_input, Node* conv_filter, Node* y) { + OpDesc op_desc; + op_desc.SetType("conv2d"); + + op_desc.SetInput("Input", {conv_input->Name()}); + op_desc.SetInput("Filter", {conv_filter->Name()}); + op_desc.SetOutput("Ouput", {y->Name()}); + + op_desc.SetAttr("fuse_sum", true); + + auto fused_conv_op = g->CreateOpNode(&op_desc); + + link_nodes_to(conv_input, fused_conv_op); + link_nodes_to(conv_filter, fused_conv_op); + link_nodes_to(fused_conv_op, y); + }; + + auto remove_unused_nodes = [](Graph* g, const std::unordered_set& removed_nodes) { + GraphSafeRemoveNodes(g, removed_nodes); + }; + + auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, Graph* g) { + auto elementwise_add_x = node_from_subgraph(subgraph, pattern_ptr, elementwise_add_pattern.x_name()); + auto elementwise_add_y = node_from_subgraph(subgraph, pattern_ptr, elementwise_add_pattern.y_name()); + auto elementwise_add_out = node_from_subgraph(subgraph, pattern_ptr, elementwise_add_pattern.out_name()); + + auto conv_filter = node_from_subgraph(subgraph, pattern_ptr, conv_pattern.filter_name()); + auto conv_input = node_from_subgraph(subgraph, pattern_ptr, conv_pattern.input_name()); + auto conv_output = node_from_subgraph(subgraph, pattern_ptr, conv_pattern.output_name()); + + fuse_conv(g, conv_input, conv_filter, elementwise_add_y); + remove_unused_nodes(g, {elementwise_add_x, conv_output, elementwise_add_out}); + }; + + gpd(graph.get(), handler); + + return graph; +} +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(conv_elementwise_add_mkldnn_fuse_pass, paddle::framework::ir::ConvElementwiseAddMKLDNNFusePass); diff --git a/paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.h b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h similarity index 69% rename from paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.h rename to paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h index 3aa594ae66..26118bce4b 100644 --- a/paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.h +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h @@ -9,14 +9,14 @@ namespace paddle { namespace framework { namespace ir { -class MKLDNNConvElementwiseAddFusePass : public FusePassBase { +class ConvElementwiseAddMKLDNNFusePass : public FusePassBase { public: - virtual ~FCGRUFusePass() {} + virtual ~ConvElementwiseAddMKLDNNFusePass() {} protected: std::unique_ptr ApplyImpl(std::unique_ptr graph) const; - const std::string name_scope_{"mkldnn_conv_elementwise_add_fuse"}; + const std::string name_scope_{"conv_elementwise_add_mkldnn_fuse_pass"}; }; } // namespace ir diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc new file mode 100644 index 0000000000..62dbb1eccd --- /dev/null +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc @@ -0,0 +1,81 @@ +#include "paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h" + +#include + +namespace paddle { +namespace framework { +namespace ir { + +void SetOp(ProgramDesc* prog, const std::string& type, + const std::vector& inputs, + const std::vector& outputs) { + auto op = prog->MutableBlock(0)->AppendOp(); + op->SetType(type); + + if (type == "conv2d") { + op->SetAttr("use_mkldnn", true); + op->SetInput("Input", {inputs[0]}); + op->SetInput("Filter", {inputs[1]}); + op->SetInput("Output", {outputs}); + } else if (type == "elementwise_add") { + op->SetInput("X", {inputs[0]}); + op->SetInput("Y", {inputs[1]}); + op->SetOutput("Out", outputs); + } +} + +ProgramDesc BuildProgramDesc() { + ProgramDesc prog; + for (auto& v : + std::vector({"a", "b", "c", "d", "weights", "f", "g"})) { + auto* var = prog.MutableBlock(0)->Var(v); + var->SetType(proto::VarType::LOD_TENSOR); + if (v == "weights" || v == "bias") { + var->SetPersistable(true); + } + } + + SetOp(&prog, "OP0", {"a"}, {"b"}); + SetOp(&prog, "OP1", {"c"}, {"d"}); + SetOp(&prog, "conv2d", {"d", "weights"}, {"f"}); + SetOp(&prog, "elemenwise_add", {"d", "f"}, {"g"}); + + return prog; +} + +TEST(ConvElementwiseAddMKLDNNFusePass, basic) { + auto prog = BuildProgramDesc(); + std::unique_ptr graph(new ir::Graph(prog)); + auto pass = PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); + int original_nodes_num = graph->Nodes().size(); + graph = pass->Apply(std::move(graph)); + int current_nodes_num = graph->Nodes().size(); + + EXPECT_EQ(original_nodes_num - 2, current_nodes_num); + // Assert conv_relu op in newly generated graph + int conv_elementwise_add_count = 0; + + for (auto* node : graph->Nodes()) { + if (node->IsOp() && node->Op()->Type() == "conv2d") { + if (node->Op()->HasAttr("use_mkldnn")) { + bool use_mkldnn = boost::get(node->Op()->GetAttr("use_mkldnn")); + if (use_mkldnn) { + // TODO tpatejko: it is commented because convolution does not support this attribute + if (true/*node->Op()->HasAttr("fuse_sum")*/) { +// bool fuse_sum = boost::get(node->Op()->GetAttr("fuse_sum")); + if (true /*fuse_sum*/) { + ++conv_elementwise_add_count; + } + } + } + } + } + } + EXPECT_EQ(conv_elementwise_add_count, 1); +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +USE_PASS(conv_elementwise_add_mkldnn_fuse_pass); diff --git a/paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.cc deleted file mode 100644 index 52d8f5fec5..0000000000 --- a/paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.cc +++ /dev/null @@ -1,174 +0,0 @@ -#include "paddle/fluid/framework/ir/mkldnn_conv_elementwise_add_fuse_pass.h" - -namespace paddle { -namespace framework { -namespace ir { -namespace patterns { - -struct PatternNode { - PatternNode(PDPattern* pattern, - const std::string& name, - const std::string& name_scope, - const std::string& repr, - size_t id) - : nodeName{PDNodeName(name_scope, repr, id, name)} - , node{pattern->RetrieveNode(nodeName) - { } - - std::string name() { return nodeName }; - PDNode* node() { return node }; - - private: - std::string nodeName; - PDNode* node; -}; -/* - -struct Conv : public PatternBase { - Conv(PDPattern* pattern, const std::string& name_scope) - : PatternBase{pattern, name_scope, "conv"} - , conv{pattern, "conv", name_scope_, repr_, id_} - , input{pattern, "Input", name_scope_, repr_, id_} - , filter{pattern, "Filter", name_scope_, repr_, id_} - , output{pattern, "Output", node_scope_, repr_ id_} - { } - - private: - PatternNode conv; - PatternNode input; - PatternNode filter; - PatternNode output; - - public: - PDNode* operator()() { - auto conv_op = pattern->NewNode(conv.name()) - ->assert_is_op("conv2d"); - - auto input_var = pattern->NewNode(input.name()) - ->AsInput() - ->assert_is_op_input(conv.name()); - - auto filter_var = pattern->NewNode(filter.name()) - ->AsInput() - ->assert_is_persistable_var() - ->assert_is_op_input(conv.name()); - - auto output_var = patterh->NewNode(output.name()) - ->AsOutput() - ->assert_is_op_output(conv.name()); - - conv_op->LinksFrom({input_var, filter_var}); - conv_op->LinksTo({output_var}; - - return output_var; - } -}; -*/ - -struct Conv : public PatternBase { - Conv(PDPattern* pattern, const std::string& name_scope) - : PatternBase{pattern, name_scope, "conv"} - { } - - std::string conv_name() { return PDNodeName(name_scope_, repr_, id_, "conv2d"); } - PDNode* conv_node() { return pattern->RetrieveNode(conv_name()); } - - std::string input_name() { return PDNodeName(name_scope, repr_, id_, "Input"); } - PDNode* input_node() { return pattern->RetrieveNode(input_name()); } - - std::string filter_name() { return PDNodeName(name_scope_, repr_, id_, "Filter"); } - PDNode* filter_node() { return pattern->RetrieveNode(filter_name()); } - - std::string output_name() { return PDNodeName(name_scope, repr_, id_, "Output"); } - PDNode* output_node() { return pattern->RetrieveNode(output_name()); } - - PDNode* operator()() { - auto conv_op = pattern->NewNode(conv_name()) - ->assert_is_op("conv2d"); - - auto input_var = pattern->NewNode(input_name()) - ->AsInput() - ->assert_is_op_input(conv_name()); - - auto filter_var = pattern->NewNode(filter_name()) - ->AsInput() - ->assert_is_persistable_var() - ->assert_is_op_input(conv_name()); - - auto output_var = patterh->NewNode(output_name()) - ->AsOutput() - ->assert_is_op_output(conv_name()); - - conv_op->LinksFrom({input_var, filter_var}); - conv_op->LinksTo({output_var}; - - return output_var; - } -}; - -struct ElementwiseAdd : public PatternBase { - Conv(PDPattern* pattern, const std::string& name_scope) - : PatternBase{pattern, name_scope, "elementwise_add"} - { } - - std::string elementwise_add_name() { return PDNodeName(name_scope_, repr_, id_, "elementwise_add"); } - PDNode* elementwise_add_node() { return pattern->RetrieveNode(elementwise_add_name()); } - - std::string x_name() { return PDNodeName(name_scope, repr_, id_, "X"); } - PDNode* x_node() { return pattern->RetrieveNode(x_name()); } - - std::string y_name() { return PDNodeName(name_scope_, repr_, id_, "Y"); } - PDNode* y_node() { return pattern->RetrieveNode(y_name()); } - - std::string out_name() { return PDNodeName(name_scope, repr_, id_, "Out"); } - PDNode* out_node() { return pattern->RetrieveNode(out_name()); } - - PDNode* operator()(PDNode* conv_output) { - auto elementwise_add_op = pattern->NewNode(conv_name()) - ->assert_is_op("elementwise_add"); - - auto x_var = pattern->NewNode(x_name()) - ->AsInput() - ->assert_is_op_input(elementwise_add_name()); - - conv_output->assert_is_op_input(elementwise_add_name(), y_name()); -// auto y_var = pattern->NewNode(y_name()) -// ->AsInput() -// ->assert_is_op_input(elementwise_add_name()); - - auto out_var = pattern->NewNode(out_name()) - ->AsOutput() - ->assert_is_op_output(elementwise_add_name()); - - conv_op->LinksFrom({x_var, conv_output}); - conv_op->LinksTo({out_var}; - - return out_var; - } -}; - - -} // namespace patterns - -using graph_ptr = std::unique_ptr; - -graph_ptr MKLDNNConvElementwiseAddFusePass::ApplyImpl(graph_ptr) const { - FusePassBase::Init("mkldnn_conv_elementwise_add_fuse", graph.get()); - - GraphPatternDetector gpd; - auto pattern = gpd.mutable_pattern(); - - patterns::Conv conv_pattern(pattern, name_scope_); - auto conv_output = conv_pattern(); - conv_output->AsIntermediate(); - - patterns::ElementwiseAdd elementwise_add_pattern(pattern, name_scope_); - auto elementwis_add_output = elementwise_add_pattern(conv_output); - - -} - - -} // namespace ir -} // namespace framework -} // namespace paddle From 16eaaf3fbeac13be018272e70e8b17b3c57a00cf Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Wed, 12 Sep 2018 07:24:20 +0200 Subject: [PATCH 13/53] MKLDNN conv + elementwise_add fusion: added one more UT, found and corrected bugs in pass --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 41 +++---- ...elementwise_add_mkldnn_fuse_pass_tester.cc | 111 ++++++++++++++---- 2 files changed, 104 insertions(+), 48 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 973cd73e48..111e08d4fc 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -45,17 +45,13 @@ struct Conv { ->assert_is_op("conv2d"); auto input_var = pattern->new_node(input_name()) - ->AsInput() - ->assert_is_op_input(conv_name()); + ->assert_is_op_input(conv_name(), input_name()); auto filter_var = pattern->new_node(filter_name()) - ->AsInput() - ->assert_is_persistable_var() - ->assert_is_op_input(conv_name()); + ->assert_is_op_input(conv_name(), filter_name()); auto output_var = pattern->new_node(output_name()) - ->AsOutput() - ->assert_is_op_output(conv_name()); + ->assert_is_op_output(conv_name(), output_name()); conv_op->LinksFrom({input_var, filter_var}); conv_op->LinksTo({output_var}); @@ -77,19 +73,13 @@ struct ElementwiseAdd { ->assert_is_op("elementwise_add"); auto y_var = pattern->new_node(y_name()) - ->AsInput() - ->assert_is_op_input(elementwise_add_name()); + ->assert_is_op_input(elementwise_add_name(), y_name()); - conv_output->assert_is_op_input(pattern->node_name(elementwise_add_name()), - pattern->node_name(x_name())); -// auto y_var = pattern->NewNode(y_name()) -// ->AsInput() -// ->assert_is_op_input(elementwise_add_name()); + conv_output->assert_is_op_input(elementwise_add_name(), x_name()); auto out_var = pattern->new_node(out_name()) ->AsOutput() - ->assert_is_op_output( - pattern->node_name(elementwise_add_name())); + ->assert_is_op_output(elementwise_add_name(), out_name()); elementwise_add_op->LinksFrom({y_var, conv_output}); elementwise_add_op->LinksTo({out_var}); @@ -118,16 +108,16 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { GraphPatternDetector gpd; auto pattern = gpd.mutable_pattern(); - auto pattern_ptr = std::make_shared(pattern, name_scope_); patterns::Conv conv_pattern; auto conv_output = conv_pattern(pattern_ptr)(); - conv_output->AsIntermediate(); patterns::ElementwiseAdd elementwise_add_pattern; elementwise_add_pattern(pattern_ptr)(conv_output); + conv_output->AsIntermediate(); + auto link_nodes_to = [](Node* a, Node* b) { a->outputs.push_back(b); b->inputs.push_back(a); @@ -139,7 +129,7 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { op_desc.SetInput("Input", {conv_input->Name()}); op_desc.SetInput("Filter", {conv_filter->Name()}); - op_desc.SetOutput("Ouput", {y->Name()}); + op_desc.SetOutput("Output", {y->Name()}); op_desc.SetAttr("fuse_sum", true); @@ -155,16 +145,17 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { }; auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, Graph* g) { - auto elementwise_add_x = node_from_subgraph(subgraph, pattern_ptr, elementwise_add_pattern.x_name()); - auto elementwise_add_y = node_from_subgraph(subgraph, pattern_ptr, elementwise_add_pattern.y_name()); - auto elementwise_add_out = node_from_subgraph(subgraph, pattern_ptr, elementwise_add_pattern.out_name()); - - auto conv_filter = node_from_subgraph(subgraph, pattern_ptr, conv_pattern.filter_name()); + auto conv_op = node_from_subgraph(subgraph, pattern_ptr, conv_pattern.conv_name()); auto conv_input = node_from_subgraph(subgraph, pattern_ptr, conv_pattern.input_name()); + auto conv_filter = node_from_subgraph(subgraph, pattern_ptr, conv_pattern.filter_name()); auto conv_output = node_from_subgraph(subgraph, pattern_ptr, conv_pattern.output_name()); + auto elementwise_add_op = node_from_subgraph(subgraph, pattern_ptr, elementwise_add_pattern.elementwise_add_name()); + auto elementwise_add_y = node_from_subgraph(subgraph, pattern_ptr, elementwise_add_pattern.y_name()); + auto elementwise_add_out = node_from_subgraph(subgraph, pattern_ptr, elementwise_add_pattern.out_name()); + fuse_conv(g, conv_input, conv_filter, elementwise_add_y); - remove_unused_nodes(g, {elementwise_add_x, conv_output, elementwise_add_out}); + remove_unused_nodes(g, {conv_output, elementwise_add_out, conv_op, elementwise_add_op}); }; gpd(graph.get(), handler); diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc index 62dbb1eccd..ffecf35de2 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc @@ -16,7 +16,7 @@ void SetOp(ProgramDesc* prog, const std::string& type, op->SetAttr("use_mkldnn", true); op->SetInput("Input", {inputs[0]}); op->SetInput("Filter", {inputs[1]}); - op->SetInput("Output", {outputs}); + op->SetOutput("Output", outputs); } else if (type == "elementwise_add") { op->SetInput("X", {inputs[0]}); op->SetInput("Y", {inputs[1]}); @@ -24,54 +24,119 @@ void SetOp(ProgramDesc* prog, const std::string& type, } } -ProgramDesc BuildProgramDesc() { - ProgramDesc prog; - for (auto& v : - std::vector({"a", "b", "c", "d", "weights", "f", "g"})) { - auto* var = prog.MutableBlock(0)->Var(v); - var->SetType(proto::VarType::LOD_TENSOR); - if (v == "weights" || v == "bias") { - var->SetPersistable(true); +TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddWithOps) { + auto build_program_desc = [&]() -> ProgramDesc { + ProgramDesc prog; + for (auto& v : + std::vector({"a", "b", "weights", "c", "d", "e", "f", "g"})) { + auto* var = prog.MutableBlock(0)->Var(v); + var->SetType(proto::VarType::LOD_TENSOR); + if (v == "weights" || v == "bias") { + var->SetPersistable(true); + } } - } - SetOp(&prog, "OP0", {"a"}, {"b"}); - SetOp(&prog, "OP1", {"c"}, {"d"}); - SetOp(&prog, "conv2d", {"d", "weights"}, {"f"}); - SetOp(&prog, "elemenwise_add", {"d", "f"}, {"g"}); + SetOp(&prog, "OP0", {"a"}, {"b"}); + SetOp(&prog, "OP1", {"c"}, {"d"}); + SetOp(&prog, "conv2d", {"b", "weights"}, {"e"}); + SetOp(&prog, "elementwise_add", {"e", "d"}, {"f"}); + SetOp(&prog, "OP3", {"f"}, {"g"}); + + return prog; + }; - return prog; + auto prog = build_program_desc(); + std::unique_ptr graph(new ir::Graph(prog)); + auto pass = PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); + int original_nodes_num = graph->Nodes().size(); + graph = pass->Apply(std::move(graph)); + int current_nodes_num = graph->Nodes().size(); + + EXPECT_EQ(original_nodes_num - 4 + 1, current_nodes_num); + // Assert conv_relu op in newly generated graph + int conv_count = 0; + int elementwise_add_count = 0; + + for (auto* node : graph->Nodes()) { + if (node->IsOp() && node->Op()->Type() == "conv2d") { + ++conv_count; + } + if (node->IsOp() && node->Op()->Type() == "elementwise_add") { + ++elementwise_add_count; + } + /* + if (node->Op()->HasAttr("use_mkldnn")) { + bool use_mkldnn = boost::get(node->Op()->GetAttr("use_mkldnn")); + if (use_mkldnn) { + if (node->Op()->HasAttr("fuse_sum")) { +// bool fuse_sum = boost::get(node->Op()->GetAttr("fuse_sum")); + if (fuse_sum) { + ++conv_elementwise_add_count; + } + } + } + } + } + */ + } + EXPECT_EQ(conv_count, 1); + EXPECT_EQ(elementwise_add_count, 0); } -TEST(ConvElementwiseAddMKLDNNFusePass, basic) { - auto prog = BuildProgramDesc(); +TEST(ConvElementwiseAddMKLDNNFusePass, OnlyConvolutionElementwiseAdd) { + auto build_program_desc = [&]() -> ProgramDesc { + ProgramDesc prog; + for (auto& v : + std::vector({"a", "b", "weights"})) { + auto* var = prog.MutableBlock(0)->Var(v); + var->SetType(proto::VarType::LOD_TENSOR); + if (v == "weights" || v == "bias") { + var->SetPersistable(true); + } + } + + SetOp(&prog, "conv2d", {"a", "weights"}, {"b"}); + SetOp(&prog, "elementwise_add", {"b", "c"}, {"d"}); + + return prog; + }; + + auto prog = build_program_desc(); std::unique_ptr graph(new ir::Graph(prog)); auto pass = PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); int original_nodes_num = graph->Nodes().size(); graph = pass->Apply(std::move(graph)); int current_nodes_num = graph->Nodes().size(); - EXPECT_EQ(original_nodes_num - 2, current_nodes_num); + EXPECT_EQ(original_nodes_num - 4 + 1, current_nodes_num); // Assert conv_relu op in newly generated graph - int conv_elementwise_add_count = 0; + int conv_count = 0; + int elementwise_add_count = 0; for (auto* node : graph->Nodes()) { if (node->IsOp() && node->Op()->Type() == "conv2d") { + ++conv_count; + } + if (node->IsOp() && node->Op()->Type() == "elementwise_add") { + ++elementwise_add_count; + } + /* if (node->Op()->HasAttr("use_mkldnn")) { bool use_mkldnn = boost::get(node->Op()->GetAttr("use_mkldnn")); if (use_mkldnn) { - // TODO tpatejko: it is commented because convolution does not support this attribute - if (true/*node->Op()->HasAttr("fuse_sum")*/) { + if (node->Op()->HasAttr("fuse_sum")) { // bool fuse_sum = boost::get(node->Op()->GetAttr("fuse_sum")); - if (true /*fuse_sum*/) { + if (fuse_sum) { ++conv_elementwise_add_count; } } } } } + */ } - EXPECT_EQ(conv_elementwise_add_count, 1); + EXPECT_EQ(conv_count, 1); + EXPECT_EQ(elementwise_add_count, 0); } } // namespace ir From 38b7b34b1c04442ab4f81612ce0bd9d99d341192 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Thu, 13 Sep 2018 05:50:48 +0200 Subject: [PATCH 14/53] MKLDNN conv + elementwise_add fusion: added reachability tests, inputs and outputs in graph nodes are transformed --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 33 +++- ...elementwise_add_mkldnn_fuse_pass_tester.cc | 162 ++++++++++++++---- 2 files changed, 151 insertions(+), 44 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 111e08d4fc..76ea58120b 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -1,4 +1,5 @@ #include "paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h" +#include "paddle/fluid/framework/ir/graph_traits.h" namespace paddle { namespace framework { @@ -90,7 +91,7 @@ struct ElementwiseAdd { }; } // namespace patterns -Node* node_from_subgraph(const GraphPatternDetector::subgraph_t& subgraph, +Node* GetNodeFromSubgraph(const GraphPatternDetector::subgraph_t& subgraph, std::shared_ptr pattern, const std::string& op_name) { PADDLE_ENFORCE(subgraph.count(pattern->retrieve_node(op_name)), @@ -103,6 +104,20 @@ Node* node_from_subgraph(const GraphPatternDetector::subgraph_t& subgraph, using graph_ptr = std::unique_ptr; +void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { + for (auto& node : GraphTraits::DFS(*graph)) { + std::vector to_remove; + auto same = std::find_if(std::begin(node.inputs), + std::end(node.inputs), + [from](Node* n) { return n == from; }); + + if (same != std::end(node.inputs)) { + node.inputs.push_back(to); + to->outputs.push_back(&node); + } + } +} + graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { FusePassBase::Init("conv_elementwise_add_mkldnn_fuse_pass", graph.get()); @@ -145,16 +160,18 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { }; auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, Graph* g) { - auto conv_op = node_from_subgraph(subgraph, pattern_ptr, conv_pattern.conv_name()); - auto conv_input = node_from_subgraph(subgraph, pattern_ptr, conv_pattern.input_name()); - auto conv_filter = node_from_subgraph(subgraph, pattern_ptr, conv_pattern.filter_name()); - auto conv_output = node_from_subgraph(subgraph, pattern_ptr, conv_pattern.output_name()); + auto conv_op = GetNodeFromSubgraph(subgraph, pattern_ptr, conv_pattern.conv_name()); + auto conv_input = GetNodeFromSubgraph(subgraph, pattern_ptr, conv_pattern.input_name()); + auto conv_filter = GetNodeFromSubgraph(subgraph, pattern_ptr, conv_pattern.filter_name()); + auto conv_output = GetNodeFromSubgraph(subgraph, pattern_ptr, conv_pattern.output_name()); - auto elementwise_add_op = node_from_subgraph(subgraph, pattern_ptr, elementwise_add_pattern.elementwise_add_name()); - auto elementwise_add_y = node_from_subgraph(subgraph, pattern_ptr, elementwise_add_pattern.y_name()); - auto elementwise_add_out = node_from_subgraph(subgraph, pattern_ptr, elementwise_add_pattern.out_name()); + auto elementwise_add_op = GetNodeFromSubgraph(subgraph, pattern_ptr, elementwise_add_pattern.elementwise_add_name()); + auto elementwise_add_y = GetNodeFromSubgraph(subgraph, pattern_ptr, elementwise_add_pattern.y_name()); + auto elementwise_add_out = GetNodeFromSubgraph(subgraph, pattern_ptr, elementwise_add_pattern.out_name()); fuse_conv(g, conv_input, conv_filter, elementwise_add_y); + CorrectGraphEdges(g, elementwise_add_out, elementwise_add_y); + remove_unused_nodes(g, {conv_output, elementwise_add_out, conv_op, elementwise_add_op}); }; diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc index ffecf35de2..e60a916b1d 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc @@ -1,5 +1,7 @@ #include "paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h" +#include "paddle/fluid/framework/ir/graph_traits.h" +#include #include namespace paddle { @@ -21,37 +23,96 @@ void SetOp(ProgramDesc* prog, const std::string& type, op->SetInput("X", {inputs[0]}); op->SetInput("Y", {inputs[1]}); op->SetOutput("Out", outputs); + } else if (type == "relu" || type == "sigmoid") { + op->SetInput("X", {inputs[0]}); + op->SetOutput("Out", outputs); } } -TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddWithOps) { +struct IsReachable { + using func = std::function; + + auto operator()(const std::unique_ptr& graph) -> func { + auto find_node = [](const std::unique_ptr& graph, const std::string& name) -> Node* { + for (auto& node : GraphTraits::DFS(*graph)) { + if (name == node.Name()) { + return &node; + } + } + + return nullptr; + }; + + return [&](std::string from, const std::string to) -> bool { + if (from == to) + return true; + + std::map visited; + + for (auto& node : GraphTraits::DFS(*graph)) { + visited[node.Name()] = false; + } + + visited[from] = true; + + std::list queue; + queue.push_back(from); + + while(!queue.empty()) { + auto cur = find_node(graph, queue.front()); + queue.pop_front(); + + if (cur == nullptr) + return false; + + for (auto n : cur->outputs) { + if (n->Name() == to) + return true; + + if (!visited[n->Name()]) { + visited[n->Name()] = true; + queue.push_back(n->Name()); + } + } + } + return false; + }; + } +}; + +TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { auto build_program_desc = [&]() -> ProgramDesc { ProgramDesc prog; for (auto& v : - std::vector({"a", "b", "weights", "c", "d", "e", "f", "g"})) { + std::vector({"a", "b", "weights", "c", "d", "e"})) { auto* var = prog.MutableBlock(0)->Var(v); var->SetType(proto::VarType::LOD_TENSOR); - if (v == "weights" || v == "bias") { + if (v == "weights") { var->SetPersistable(true); } } - SetOp(&prog, "OP0", {"a"}, {"b"}); - SetOp(&prog, "OP1", {"c"}, {"d"}); - SetOp(&prog, "conv2d", {"b", "weights"}, {"e"}); - SetOp(&prog, "elementwise_add", {"e", "d"}, {"f"}); - SetOp(&prog, "OP3", {"f"}, {"g"}); + SetOp(&prog, "conv2d", {"a", "weights"}, {"b"}); + SetOp(&prog, "elementwise_add", {"b", "c"}, {"d"}); + SetOp(&prog, "relu", {"d"}, {"e"}); return prog; }; auto prog = build_program_desc(); std::unique_ptr graph(new ir::Graph(prog)); + + IsReachable is_reachable; + + EXPECT_TRUE(is_reachable(graph)("a", "relu")); + auto pass = PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); int original_nodes_num = graph->Nodes().size(); graph = pass->Apply(std::move(graph)); int current_nodes_num = graph->Nodes().size(); - + + EXPECT_TRUE(is_reachable(graph)("a", "relu")); + EXPECT_EQ(original_nodes_num - 4 + 1, current_nodes_num); // Assert conv_relu op in newly generated graph int conv_count = 0; @@ -64,26 +125,12 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddWithOps) { if (node->IsOp() && node->Op()->Type() == "elementwise_add") { ++elementwise_add_count; } - /* - if (node->Op()->HasAttr("use_mkldnn")) { - bool use_mkldnn = boost::get(node->Op()->GetAttr("use_mkldnn")); - if (use_mkldnn) { - if (node->Op()->HasAttr("fuse_sum")) { -// bool fuse_sum = boost::get(node->Op()->GetAttr("fuse_sum")); - if (fuse_sum) { - ++conv_elementwise_add_count; - } - } - } - } - } - */ } EXPECT_EQ(conv_count, 1); EXPECT_EQ(elementwise_add_count, 0); } -TEST(ConvElementwiseAddMKLDNNFusePass, OnlyConvolutionElementwiseAdd) { +TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionElementwiseAdd) { auto build_program_desc = [&]() -> ProgramDesc { ProgramDesc prog; for (auto& v : @@ -103,10 +150,16 @@ TEST(ConvElementwiseAddMKLDNNFusePass, OnlyConvolutionElementwiseAdd) { auto prog = build_program_desc(); std::unique_ptr graph(new ir::Graph(prog)); + + IsReachable is_reachable; + EXPECT_TRUE(is_reachable(graph)("a", "d")); + auto pass = PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); int original_nodes_num = graph->Nodes().size(); graph = pass->Apply(std::move(graph)); int current_nodes_num = graph->Nodes().size(); + + EXPECT_FALSE(is_reachable(graph)("a", "d")); EXPECT_EQ(original_nodes_num - 4 + 1, current_nodes_num); // Assert conv_relu op in newly generated graph @@ -120,20 +173,57 @@ TEST(ConvElementwiseAddMKLDNNFusePass, OnlyConvolutionElementwiseAdd) { if (node->IsOp() && node->Op()->Type() == "elementwise_add") { ++elementwise_add_count; } - /* - if (node->Op()->HasAttr("use_mkldnn")) { - bool use_mkldnn = boost::get(node->Op()->GetAttr("use_mkldnn")); - if (use_mkldnn) { - if (node->Op()->HasAttr("fuse_sum")) { -// bool fuse_sum = boost::get(node->Op()->GetAttr("fuse_sum")); - if (fuse_sum) { - ++conv_elementwise_add_count; - } - } - } + } + EXPECT_EQ(conv_count, 1); + EXPECT_EQ(elementwise_add_count, 0); +} + +TEST(ConvElementwiseAddMKLDNNFusePass, SigmoidConvolutionAddElementwiseRelu) { + auto build_program_desc = [&]() -> ProgramDesc { + ProgramDesc prog; + for (auto& v : + std::vector({"a", "b", "weights", "c", "d", "e", "f"})) { + auto* var = prog.MutableBlock(0)->Var(v); + var->SetType(proto::VarType::LOD_TENSOR); + if (v.find("weights")) { + var->SetPersistable(true); } } - */ + + SetOp(&prog, "sigmoid", {"a"}, {"b"}); + SetOp(&prog, "conv2d", {"b", "weights"}, {"c"}); + SetOp(&prog, "elementwise_add", {"c", "d"}, {"e"}); + SetOp(&prog, "relu", {"e"}, {"f"}); + + return prog; + }; + + auto prog = build_program_desc(); + std::unique_ptr graph(new ir::Graph(prog)); + + IsReachable is_reachable; + + EXPECT_TRUE(is_reachable(graph)("a", "f")); + + auto pass = PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); + int original_nodes_num = graph->Nodes().size(); + graph = pass->Apply(std::move(graph)); + int current_nodes_num = graph->Nodes().size(); + + EXPECT_TRUE(is_reachable(graph)("a", "f")); + + EXPECT_EQ(original_nodes_num - 4 + 1, current_nodes_num); + // Assert conv_relu op in newly generated graph + int conv_count = 0; + int elementwise_add_count = 0; + + for (auto* node : graph->Nodes()) { + if (node->IsOp() && node->Op()->Type() == "conv2d") { + ++conv_count; + } + if (node->IsOp() && node->Op()->Type() == "elementwise_add") { + ++elementwise_add_count; + } } EXPECT_EQ(conv_count, 1); EXPECT_EQ(elementwise_add_count, 0); From 441d3a47268f91c235c4fd01886ee2b4f67d0125 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Thu, 13 Sep 2018 08:37:19 +0200 Subject: [PATCH 15/53] MKLDNN conv + elementwise_add: added some refactoring in the pass --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 93 ++++++++++--------- 1 file changed, 48 insertions(+), 45 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 76ea58120b..f7b76ab08a 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -18,41 +18,41 @@ struct Pattern : public PatternBase { PDPattern* node_pattern() { return pattern; } public: - std::string node_name(std::string op_name) - { + std::string node_name(std::string op_name) { return PDNodeName(name_scope(), repr(), id(), op_name); } - PDNode* retrieve_node(std::string op_name) - { + PDNode* retrieve_node(std::string op_name) { return node_pattern()->RetrieveNode(node_name(op_name)); } - PDNode* new_node(std::string op_name) - { + PDNode* new_node(std::string op_name) { return node_pattern()->NewNode(node_name(op_name)); } }; struct Conv { - std::string conv_name() { return "conv2d"; } + std::string op_name() { return "conv2d"; } std::string input_name() { return "Input"; } std::string filter_name() { return "Filter"; } std::string output_name() { return "Output"; } std::function operator()(std::shared_ptr pattern) { return [&]() -> PDNode* { - auto conv_op = pattern->new_node(conv_name()) + auto conv_op = pattern->new_node(op_name()) ->assert_is_op("conv2d"); auto input_var = pattern->new_node(input_name()) - ->assert_is_op_input(conv_name(), input_name()); + ->assert_is_op_input(op_name(), + input_name()); auto filter_var = pattern->new_node(filter_name()) - ->assert_is_op_input(conv_name(), filter_name()); + ->assert_is_op_input(op_name(), + filter_name()); auto output_var = pattern->new_node(output_name()) - ->assert_is_op_output(conv_name(), output_name()); + ->assert_is_op_output(op_name(), + output_name()); conv_op->LinksFrom({input_var, filter_var}); conv_op->LinksTo({output_var}); @@ -63,24 +63,27 @@ struct Conv { }; struct ElementwiseAdd { - std::string elementwise_add_name() { return "elementwise_add"; } + std::string op_name() { return "elementwise_add"; } std::string x_name() { return "X"; } std::string y_name() { return "Y"; } std::string out_name() { return "Out"; } std::function operator()(std::shared_ptr pattern) { return [&](PDNode* conv_output) -> PDNode* { - auto elementwise_add_op = pattern->new_node(elementwise_add_name()) + auto elementwise_add_op = pattern->new_node(op_name()) ->assert_is_op("elementwise_add"); auto y_var = pattern->new_node(y_name()) - ->assert_is_op_input(elementwise_add_name(), y_name()); + ->assert_is_op_input(op_name(), + y_name()); - conv_output->assert_is_op_input(elementwise_add_name(), x_name()); + conv_output->assert_is_op_input(op_name(), + x_name()); auto out_var = pattern->new_node(out_name()) ->AsOutput() - ->assert_is_op_output(elementwise_add_name(), out_name()); + ->assert_is_op_output(op_name(), + out_name()); elementwise_add_op->LinksFrom({y_var, conv_output}); elementwise_add_op->LinksTo({out_var}); @@ -89,11 +92,10 @@ struct ElementwiseAdd { }; } }; -} // namespace patterns Node* GetNodeFromSubgraph(const GraphPatternDetector::subgraph_t& subgraph, - std::shared_ptr pattern, const std::string& op_name) -{ + std::shared_ptr pattern, + const std::string& op_name) { PADDLE_ENFORCE(subgraph.count(pattern->retrieve_node(op_name)), "Node not found for PDNode %s", pattern->node_name(op_name)); Node* var = subgraph.at(pattern->retrieve_node(op_name)); @@ -102,7 +104,10 @@ Node* GetNodeFromSubgraph(const GraphPatternDetector::subgraph_t& subgraph, return var; } -using graph_ptr = std::unique_ptr; +void LinkNodes(Node* from, Node* to) { + from->outputs.push_back(to); + to->inputs.push_back(from); +} void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { for (auto& node : GraphTraits::DFS(*graph)) { @@ -112,11 +117,12 @@ void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { [from](Node* n) { return n == from; }); if (same != std::end(node.inputs)) { - node.inputs.push_back(to); - to->outputs.push_back(&node); + LinkNodes(to, &node); } } } +} // namespace patterns +using graph_ptr = std::unique_ptr; graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { FusePassBase::Init("conv_elementwise_add_mkldnn_fuse_pass", graph.get()); @@ -133,11 +139,6 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { conv_output->AsIntermediate(); - auto link_nodes_to = [](Node* a, Node* b) { - a->outputs.push_back(b); - b->inputs.push_back(a); - }; - auto fuse_conv = [&](Graph* g, Node* conv_input, Node* conv_filter, Node* y) { OpDesc op_desc; op_desc.SetType("conv2d"); @@ -150,29 +151,31 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { auto fused_conv_op = g->CreateOpNode(&op_desc); - link_nodes_to(conv_input, fused_conv_op); - link_nodes_to(conv_filter, fused_conv_op); - link_nodes_to(fused_conv_op, y); - }; - - auto remove_unused_nodes = [](Graph* g, const std::unordered_set& removed_nodes) { - GraphSafeRemoveNodes(g, removed_nodes); + patterns::LinkNodes(conv_input, fused_conv_op); + patterns::LinkNodes(conv_filter, fused_conv_op); + patterns::LinkNodes(fused_conv_op, y); }; auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, Graph* g) { - auto conv_op = GetNodeFromSubgraph(subgraph, pattern_ptr, conv_pattern.conv_name()); - auto conv_input = GetNodeFromSubgraph(subgraph, pattern_ptr, conv_pattern.input_name()); - auto conv_filter = GetNodeFromSubgraph(subgraph, pattern_ptr, conv_pattern.filter_name()); - auto conv_output = GetNodeFromSubgraph(subgraph, pattern_ptr, conv_pattern.output_name()); - - auto elementwise_add_op = GetNodeFromSubgraph(subgraph, pattern_ptr, elementwise_add_pattern.elementwise_add_name()); - auto elementwise_add_y = GetNodeFromSubgraph(subgraph, pattern_ptr, elementwise_add_pattern.y_name()); - auto elementwise_add_out = GetNodeFromSubgraph(subgraph, pattern_ptr, elementwise_add_pattern.out_name()); + auto conv_op = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, + conv_pattern.op_name()); + auto conv_input = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, + conv_pattern.input_name()); + auto conv_filter = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, + conv_pattern.filter_name()); + auto conv_output = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, + conv_pattern.output_name()); + + auto elementwise_add_op = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, + elementwise_add_pattern.op_name()); + auto elementwise_add_y = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, + elementwise_add_pattern.y_name()); + auto elementwise_add_out = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, + elementwise_add_pattern.out_name()); fuse_conv(g, conv_input, conv_filter, elementwise_add_y); - CorrectGraphEdges(g, elementwise_add_out, elementwise_add_y); - - remove_unused_nodes(g, {conv_output, elementwise_add_out, conv_op, elementwise_add_op}); + patterns::CorrectGraphEdges(g, elementwise_add_out, elementwise_add_y); + patterns::GraphSafeRemoveNodes(g, {conv_output, elementwise_add_out, conv_op, elementwise_add_op}); }; gpd(graph.get(), handler); From 42f569fdfde9aec91970723c1d77c969b4fa200d Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Fri, 14 Sep 2018 02:31:10 +0200 Subject: [PATCH 16/53] MKLDNN conv + elementwise_add fusion: use_mkldnn attribute added --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 37 ++++++++++--------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index f7b76ab08a..0e37bf9634 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -39,25 +39,25 @@ struct Conv { std::function operator()(std::shared_ptr pattern) { return [&]() -> PDNode* { - auto conv_op = pattern->new_node(op_name()) - ->assert_is_op("conv2d"); + auto conv_op = pattern->new_node(op_name()) + ->assert_is_op("conv2d"); - auto input_var = pattern->new_node(input_name()) - ->assert_is_op_input(op_name(), - input_name()); - - auto filter_var = pattern->new_node(filter_name()) - ->assert_is_op_input(op_name(), - filter_name()); + auto input_var = pattern->new_node(input_name()) + ->assert_is_op_input(op_name(), + input_name()); + + auto filter_var = pattern->new_node(filter_name()) + ->assert_is_op_input(op_name(), + filter_name()); - auto output_var = pattern->new_node(output_name()) - ->assert_is_op_output(op_name(), - output_name()); + auto output_var = pattern->new_node(output_name()) + ->assert_is_op_output(op_name(), + output_name()); - conv_op->LinksFrom({input_var, filter_var}); - conv_op->LinksTo({output_var}); + conv_op->LinksFrom({input_var, filter_var}); + conv_op->LinksTo({output_var}); - return output_var; + return output_var; }; } }; @@ -139,7 +139,7 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { conv_output->AsIntermediate(); - auto fuse_conv = [&](Graph* g, Node* conv_input, Node* conv_filter, Node* y) { + auto fuse_conv = [](Graph* g, Node* conv_input, Node* conv_filter, Node* y) { OpDesc op_desc; op_desc.SetType("conv2d"); @@ -147,7 +147,8 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { op_desc.SetInput("Filter", {conv_filter->Name()}); op_desc.SetOutput("Output", {y->Name()}); - op_desc.SetAttr("fuse_sum", true); + op_desc.SetAttr("use_mkldnn", true); + op_desc.SetAttr("fuse_eltwise", true); auto fused_conv_op = g->CreateOpNode(&op_desc); @@ -175,7 +176,7 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { fuse_conv(g, conv_input, conv_filter, elementwise_add_y); patterns::CorrectGraphEdges(g, elementwise_add_out, elementwise_add_y); - patterns::GraphSafeRemoveNodes(g, {conv_output, elementwise_add_out, conv_op, elementwise_add_op}); + GraphSafeRemoveNodes(g, {conv_output, elementwise_add_out, conv_op, elementwise_add_op}); }; gpd(graph.get(), handler); From 56528531eadd5f0004b6ddc05b906d8260a1b08b Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Mon, 17 Sep 2018 03:37:48 +0200 Subject: [PATCH 17/53] MKLDNN conv + elementwis_add fusion: initial work on passing eltwise data to conv primitive --- paddle/fluid/operators/conv_mkldnn_op.cc | 16 +++++++++++++++- paddle/fluid/operators/conv_op.cc | 3 +++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index eae6596828..d9666c1ced 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -386,8 +386,22 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { auto user_weights_memory_p = handler.AcquireWeightsMemory( user_weights_md, to_void_cast(filter_data)); - T* output_data = + + T* output_data = nullptr; + + if (fuse_eltwise) { + auto eltwise_param = ctx.Input("EltwiseParameter"); + auto eltwise_param_data = eltwise_param->data(); + + PADDLE_ENFORCE(eltwise_param_data != nullptr, "Provide data if you want MKLDNN conv+elementwise_add fusion"); + PADDLE_ENFORCE_EQ(output->dims(), eltwise_param->dims(), "Output and elementwise parameter need to have the same dimension sizes"); + + output_data = const_cast(eltwise_param_data); + } else { + output_data = output->mutable_data(ctx.GetPlace(), handler.GetDstMemorySize()); + } + // create reorder primitive if the input format is not the preferred one auto src_memory_p = handler.AcquireSrcMemoryFromPrimitive(user_src_memory_p, pipeline); diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 8f84bf71a7..efb8c62737 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -132,6 +132,9 @@ void Conv2DOpMaker::Make() { "(Tensor) The output tensor of convolution operator. " "The format of output tensor is also NCHW.") .Reuse("Input"); + AddInput("EltwiseParameter", + "(Tensor) Tensor to which convolution output will be added." + "Used on with fuse_eltwise fusion."); AddAttr>("strides", "(vector default:{1, 1}), the " "strides(h_stride, w_stride) of " From 07a62ddc08aaaa80f4fe934d9dc8b40870970018 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Mon, 17 Sep 2018 04:41:26 +0200 Subject: [PATCH 18/53] MKLDNN conv + elementwise_add fusion: inputs in pass modified. Support for new conv parameter. UTs corrected --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 25 ++++++++++--------- ...elementwise_add_mkldnn_fuse_pass_tester.cc | 15 ++++++----- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 0e37bf9634..f2ff0bf13b 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -73,19 +73,19 @@ struct ElementwiseAdd { auto elementwise_add_op = pattern->new_node(op_name()) ->assert_is_op("elementwise_add"); - auto y_var = pattern->new_node(y_name()) + auto x_var = pattern->new_node(x_name()) ->assert_is_op_input(op_name(), - y_name()); + x_name()); conv_output->assert_is_op_input(op_name(), - x_name()); + y_name()); auto out_var = pattern->new_node(out_name()) ->AsOutput() ->assert_is_op_output(op_name(), out_name()); - elementwise_add_op->LinksFrom({y_var, conv_output}); + elementwise_add_op->LinksFrom({x_var, conv_output}); elementwise_add_op->LinksTo({out_var}); return out_var; @@ -139,13 +139,14 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { conv_output->AsIntermediate(); - auto fuse_conv = [](Graph* g, Node* conv_input, Node* conv_filter, Node* y) { + auto fuse_conv = [](Graph* g, Node* conv_input, Node* conv_filter, Node* conv_output, Node* elementwise_add_x) { OpDesc op_desc; op_desc.SetType("conv2d"); op_desc.SetInput("Input", {conv_input->Name()}); op_desc.SetInput("Filter", {conv_filter->Name()}); - op_desc.SetOutput("Output", {y->Name()}); + op_desc.SetInput("ElementwiseParameter", {elementwise_add_x->Name()}); + op_desc.SetOutput("Output", {conv_output->Name()}); op_desc.SetAttr("use_mkldnn", true); op_desc.SetAttr("fuse_eltwise", true); @@ -154,7 +155,7 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { patterns::LinkNodes(conv_input, fused_conv_op); patterns::LinkNodes(conv_filter, fused_conv_op); - patterns::LinkNodes(fused_conv_op, y); + patterns::LinkNodes(fused_conv_op, conv_output); }; auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, Graph* g) { @@ -169,14 +170,14 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { auto elementwise_add_op = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, elementwise_add_pattern.op_name()); - auto elementwise_add_y = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, - elementwise_add_pattern.y_name()); + auto elementwise_add_x = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, + elementwise_add_pattern.x_name()); auto elementwise_add_out = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, elementwise_add_pattern.out_name()); - fuse_conv(g, conv_input, conv_filter, elementwise_add_y); - patterns::CorrectGraphEdges(g, elementwise_add_out, elementwise_add_y); - GraphSafeRemoveNodes(g, {conv_output, elementwise_add_out, conv_op, elementwise_add_op}); + fuse_conv(g, conv_input, conv_filter, conv_output, elementwise_add_x); + patterns::CorrectGraphEdges(g, elementwise_add_out, conv_output); + GraphSafeRemoveNodes(g, {elementwise_add_out, conv_op, elementwise_add_op}); }; gpd(graph.get(), handler); diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc index e60a916b1d..17de916c63 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc @@ -8,6 +8,9 @@ namespace paddle { namespace framework { namespace ir { +constexpr int nodes_removed = 3; +constexpr int nodes_added = 1; + void SetOp(ProgramDesc* prog, const std::string& type, const std::vector& inputs, const std::vector& outputs) { @@ -93,7 +96,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { } SetOp(&prog, "conv2d", {"a", "weights"}, {"b"}); - SetOp(&prog, "elementwise_add", {"b", "c"}, {"d"}); + SetOp(&prog, "elementwise_add", {"c", "b"}, {"d"}); SetOp(&prog, "relu", {"d"}, {"e"}); return prog; @@ -113,7 +116,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { EXPECT_TRUE(is_reachable(graph)("a", "relu")); - EXPECT_EQ(original_nodes_num - 4 + 1, current_nodes_num); + EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, current_nodes_num); // Assert conv_relu op in newly generated graph int conv_count = 0; int elementwise_add_count = 0; @@ -143,7 +146,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionElementwiseAdd) { } SetOp(&prog, "conv2d", {"a", "weights"}, {"b"}); - SetOp(&prog, "elementwise_add", {"b", "c"}, {"d"}); + SetOp(&prog, "elementwise_add", {"c", "b"}, {"d"}); return prog; }; @@ -161,7 +164,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionElementwiseAdd) { EXPECT_FALSE(is_reachable(graph)("a", "d")); - EXPECT_EQ(original_nodes_num - 4 + 1, current_nodes_num); + EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, current_nodes_num); // Assert conv_relu op in newly generated graph int conv_count = 0; int elementwise_add_count = 0; @@ -192,7 +195,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, SigmoidConvolutionAddElementwiseRelu) { SetOp(&prog, "sigmoid", {"a"}, {"b"}); SetOp(&prog, "conv2d", {"b", "weights"}, {"c"}); - SetOp(&prog, "elementwise_add", {"c", "d"}, {"e"}); + SetOp(&prog, "elementwise_add", {"d", "c"}, {"e"}); SetOp(&prog, "relu", {"e"}, {"f"}); return prog; @@ -212,7 +215,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, SigmoidConvolutionAddElementwiseRelu) { EXPECT_TRUE(is_reachable(graph)("a", "f")); - EXPECT_EQ(original_nodes_num - 4 + 1, current_nodes_num); + EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, current_nodes_num); // Assert conv_relu op in newly generated graph int conv_count = 0; int elementwise_add_count = 0; From 41f3d78fdfd27b54195ef07c0b696c87168e675e Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Mon, 17 Sep 2018 10:08:05 +0200 Subject: [PATCH 19/53] MKLDNN conv + elementwise_add fusion: output and elemwise param share data in conv primitive. Output is properly allocated --- .../framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc | 4 +++- paddle/fluid/operators/conv_mkldnn_op.cc | 3 ++- paddle/fluid/operators/conv_op.cc | 3 ++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index f2ff0bf13b..1ede53f468 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -118,6 +118,7 @@ void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { if (same != std::end(node.inputs)) { LinkNodes(to, &node); + node.Op()->SetInput("X", {to->Name()}); } } } @@ -145,7 +146,7 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { op_desc.SetInput("Input", {conv_input->Name()}); op_desc.SetInput("Filter", {conv_filter->Name()}); - op_desc.SetInput("ElementwiseParameter", {elementwise_add_x->Name()}); + op_desc.SetInput("EltwiseParameter", {elementwise_add_x->Name()}); op_desc.SetOutput("Output", {conv_output->Name()}); op_desc.SetAttr("use_mkldnn", true); @@ -155,6 +156,7 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { patterns::LinkNodes(conv_input, fused_conv_op); patterns::LinkNodes(conv_filter, fused_conv_op); + patterns::LinkNodes(elementwise_add_x, fused_conv_op); patterns::LinkNodes(fused_conv_op, conv_output); }; diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index d9666c1ced..c849caf94f 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -396,7 +396,8 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { PADDLE_ENFORCE(eltwise_param_data != nullptr, "Provide data if you want MKLDNN conv+elementwise_add fusion"); PADDLE_ENFORCE_EQ(output->dims(), eltwise_param->dims(), "Output and elementwise parameter need to have the same dimension sizes"); - output_data = const_cast(eltwise_param_data); + output_data = output->mutable_data(ctx.GetPlace()); + output->ShareDataWith(*eltwise_param); } else { output_data = output->mutable_data(ctx.GetPlace(), handler.GetDstMemorySize()); diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index efb8c62737..99c50a5207 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -134,7 +134,8 @@ void Conv2DOpMaker::Make() { .Reuse("Input"); AddInput("EltwiseParameter", "(Tensor) Tensor to which convolution output will be added." - "Used on with fuse_eltwise fusion."); + "Used on with fuse_eltwise fusion.") + .AsDispensable(); AddAttr>("strides", "(vector default:{1, 1}), the " "strides(h_stride, w_stride) of " From 5996bd39e89085214da1e7bc161525c1eb4e88d5 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Tue, 18 Sep 2018 03:26:27 +0200 Subject: [PATCH 20/53] MKLDNN conv + elementwise_add fusion: graph is corrected based on actual argument name, not formal argument name --- .../ir/conv_elementwise_add_mkldnn_fuse_pass.cc | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 1ede53f468..c3454ea7a6 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -118,7 +118,18 @@ void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { if (same != std::end(node.inputs)) { LinkNodes(to, &node); - node.Op()->SetInput("X", {to->Name()}); + + auto inputs = node.Op()->Inputs(); + + std::for_each(std::begin(inputs), std::end(inputs), + [from, to](const std::pair>& i) -> void { + auto params = i.second; + + std::remove_if(std::begin(params), std::end(params), + std::bind(std::equal_to(), from->Name(), std::placeholders::_1)); + + params.push_back(to->Name()); + }); } } } From 7f5c8a95e84f530f2c41890380703c837af9331a Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Tue, 18 Sep 2018 06:16:41 +0200 Subject: [PATCH 21/53] MKLDNN conv + elementwise_add fusion: arguments are replaced for many parameters in operator --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 38 ++++++++++++++----- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index c3454ea7a6..eae55e0e26 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -109,9 +109,23 @@ void LinkNodes(Node* from, Node* to) { to->inputs.push_back(from); } +template +void ReplaceAllOccurances(IT s, IT e, FindFunc f, ReplaceFunc r) { + if (s == e) + return; + + auto it = std::find_if(s, e, f); + + if (it != e) { + r(*it); + } + + it++; + ReplaceAllOccurances(it, e, f, r); +} + void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { for (auto& node : GraphTraits::DFS(*graph)) { - std::vector to_remove; auto same = std::find_if(std::begin(node.inputs), std::end(node.inputs), [from](Node* n) { return n == from; }); @@ -121,15 +135,19 @@ void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { auto inputs = node.Op()->Inputs(); - std::for_each(std::begin(inputs), std::end(inputs), - [from, to](const std::pair>& i) -> void { - auto params = i.second; - - std::remove_if(std::begin(params), std::end(params), - std::bind(std::equal_to(), from->Name(), std::placeholders::_1)); - - params.push_back(to->Name()); - }); + using input_type = VariableNameMap::value_type; + + ReplaceAllOccurances(std::begin(inputs), std::end(inputs), + [from](const input_type& i) -> bool { + auto params = i.second; + auto pi = std::find_if(std::begin(params), std::end(params), + std::bind(std::equal_to(), + from->Name(), std::placeholders::_1)); + return pi != std::end(params); + }, + [to, &node](const input_type& i) { + node.Op()->SetInput(i.first, {to->Name()}); + }); } } } From 27573ece03d3c764308d52ba0987fe39da5f250c Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Tue, 18 Sep 2018 14:56:58 +0200 Subject: [PATCH 22/53] MKLDNN conv + elementwise_add fusion: trailing spaces removed --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 137 ++++++++++-------- ...elementwise_add_mkldnn_fuse_pass_tester.cc | 69 +++++---- 2 files changed, 117 insertions(+), 89 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index eae55e0e26..ac15e1b3d5 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -1,4 +1,20 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h" +#include + #include "paddle/fluid/framework/ir/graph_traits.h" namespace paddle { @@ -8,15 +24,14 @@ namespace patterns { struct Pattern : public PatternBase { Pattern(PDPattern* pattern, const std::string& name_scope) - : PatternBase{pattern, name_scope, ""} - { } - - private: + : PatternBase{pattern, name_scope, ""} {} + + private: std::string name_scope() { return name_scope_; } - std::string repr() { return repr_; } + std::string repr() { return repr_; } size_t id() { return id_; } PDPattern* node_pattern() { return pattern; } - + public: std::string node_name(std::string op_name) { return PDNodeName(name_scope(), repr(), id(), op_name); @@ -37,22 +52,18 @@ struct Conv { std::string filter_name() { return "Filter"; } std::string output_name() { return "Output"; } - std::function operator()(std::shared_ptr pattern) { + std::function operator()(std::shared_ptr pattern) { return [&]() -> PDNode* { - auto conv_op = pattern->new_node(op_name()) - ->assert_is_op("conv2d"); + auto conv_op = pattern->new_node(op_name())->assert_is_op("conv2d"); auto input_var = pattern->new_node(input_name()) - ->assert_is_op_input(op_name(), - input_name()); - + ->assert_is_op_input(op_name(), input_name()); + auto filter_var = pattern->new_node(filter_name()) - ->assert_is_op_input(op_name(), - filter_name()); + ->assert_is_op_input(op_name(), filter_name()); auto output_var = pattern->new_node(output_name()) - ->assert_is_op_output(op_name(), - output_name()); + ->assert_is_op_output(op_name(), output_name()); conv_op->LinksFrom({input_var, filter_var}); conv_op->LinksTo({output_var}); @@ -68,22 +79,19 @@ struct ElementwiseAdd { std::string y_name() { return "Y"; } std::string out_name() { return "Out"; } - std::function operator()(std::shared_ptr pattern) { + std::function operator()(std::shared_ptr pattern) { return [&](PDNode* conv_output) -> PDNode* { - auto elementwise_add_op = pattern->new_node(op_name()) - ->assert_is_op("elementwise_add"); + auto elementwise_add_op = + pattern->new_node(op_name())->assert_is_op("elementwise_add"); + + auto x_var = + pattern->new_node(x_name())->assert_is_op_input(op_name(), x_name()); - auto x_var = pattern->new_node(x_name()) - ->assert_is_op_input(op_name(), - x_name()); - - conv_output->assert_is_op_input(op_name(), - y_name()); + conv_output->assert_is_op_input(op_name(), y_name()); auto out_var = pattern->new_node(out_name()) - ->AsOutput() - ->assert_is_op_output(op_name(), - out_name()); + ->AsOutput() + ->assert_is_op_output(op_name(), out_name()); elementwise_add_op->LinksFrom({x_var, conv_output}); elementwise_add_op->LinksTo({out_var}); @@ -94,13 +102,13 @@ struct ElementwiseAdd { }; Node* GetNodeFromSubgraph(const GraphPatternDetector::subgraph_t& subgraph, - std::shared_ptr pattern, - const std::string& op_name) { + std::shared_ptr pattern, + const std::string& op_name) { PADDLE_ENFORCE(subgraph.count(pattern->retrieve_node(op_name)), "Node not found for PDNode %s", pattern->node_name(op_name)); Node* var = subgraph.at(pattern->retrieve_node(op_name)); PADDLE_ENFORCE(var, "node %s not exists in the sub-graph"); - + return var; } @@ -109,10 +117,9 @@ void LinkNodes(Node* from, Node* to) { to->inputs.push_back(from); } -template +template void ReplaceAllOccurances(IT s, IT e, FindFunc f, ReplaceFunc r) { - if (s == e) - return; + if (s == e) return; auto it = std::find_if(s, e, f); @@ -126,8 +133,7 @@ void ReplaceAllOccurances(IT s, IT e, FindFunc f, ReplaceFunc r) { void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { for (auto& node : GraphTraits::DFS(*graph)) { - auto same = std::find_if(std::begin(node.inputs), - std::end(node.inputs), + auto same = std::find_if(std::begin(node.inputs), std::end(node.inputs), [from](Node* n) { return n == from; }); if (same != std::end(node.inputs)) { @@ -137,17 +143,19 @@ void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { using input_type = VariableNameMap::value_type; - ReplaceAllOccurances(std::begin(inputs), std::end(inputs), - [from](const input_type& i) -> bool { - auto params = i.second; - auto pi = std::find_if(std::begin(params), std::end(params), - std::bind(std::equal_to(), - from->Name(), std::placeholders::_1)); - return pi != std::end(params); - }, - [to, &node](const input_type& i) { - node.Op()->SetInput(i.first, {to->Name()}); - }); + ReplaceAllOccurances( + std::begin(inputs), std::end(inputs), + [from](const input_type& i) -> bool { + auto params = i.second; + auto pi = + std::find_if(std::begin(params), std::end(params), + std::bind(std::equal_to(), + from->Name(), std::placeholders::_1)); + return pi != std::end(params); + }, + [to, &node](const input_type& i) { + node.Op()->SetInput(i.first, {to->Name()}); + }); } } } @@ -169,7 +177,8 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { conv_output->AsIntermediate(); - auto fuse_conv = [](Graph* g, Node* conv_input, Node* conv_filter, Node* conv_output, Node* elementwise_add_x) { + auto fuse_conv = [](Graph* g, Node* conv_input, Node* conv_filter, + Node* conv_output, Node* elementwise_add_x) { OpDesc op_desc; op_desc.SetType("conv2d"); @@ -189,22 +198,23 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { patterns::LinkNodes(fused_conv_op, conv_output); }; - auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, Graph* g) { + auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, + Graph* g) { auto conv_op = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, - conv_pattern.op_name()); + conv_pattern.op_name()); auto conv_input = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, - conv_pattern.input_name()); - auto conv_filter = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, - conv_pattern.filter_name()); - auto conv_output = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, - conv_pattern.output_name()); - - auto elementwise_add_op = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, - elementwise_add_pattern.op_name()); - auto elementwise_add_x = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, - elementwise_add_pattern.x_name()); - auto elementwise_add_out = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, - elementwise_add_pattern.out_name()); + conv_pattern.input_name()); + auto conv_filter = patterns::GetNodeFromSubgraph( + subgraph, pattern_ptr, conv_pattern.filter_name()); + auto conv_output = patterns::GetNodeFromSubgraph( + subgraph, pattern_ptr, conv_pattern.output_name()); + + auto elementwise_add_op = patterns::GetNodeFromSubgraph( + subgraph, pattern_ptr, elementwise_add_pattern.op_name()); + auto elementwise_add_x = patterns::GetNodeFromSubgraph( + subgraph, pattern_ptr, elementwise_add_pattern.x_name()); + auto elementwise_add_out = patterns::GetNodeFromSubgraph( + subgraph, pattern_ptr, elementwise_add_pattern.out_name()); fuse_conv(g, conv_input, conv_filter, conv_output, elementwise_add_x); patterns::CorrectGraphEdges(g, elementwise_add_out, conv_output); @@ -219,4 +229,5 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { } // namespace framework } // namespace paddle -REGISTER_PASS(conv_elementwise_add_mkldnn_fuse_pass, paddle::framework::ir::ConvElementwiseAddMKLDNNFusePass); +REGISTER_PASS(conv_elementwise_add_mkldnn_fuse_pass, + paddle::framework::ir::ConvElementwiseAddMKLDNNFusePass); diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc index 17de916c63..58b1097a25 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc @@ -1,8 +1,22 @@ -#include "paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h" -#include "paddle/fluid/framework/ir/graph_traits.h" +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -#include #include +#include + +#include "paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h" +#include "paddle/fluid/framework/ir/graph_traits.h" namespace paddle { namespace framework { @@ -33,10 +47,11 @@ void SetOp(ProgramDesc* prog, const std::string& type, } struct IsReachable { - using func = std::function; + using func = std::function; auto operator()(const std::unique_ptr& graph) -> func { - auto find_node = [](const std::unique_ptr& graph, const std::string& name) -> Node* { + auto find_node = [](const std::unique_ptr& graph, + const std::string& name) -> Node* { for (auto& node : GraphTraits::DFS(*graph)) { if (name == node.Name()) { return &node; @@ -47,8 +62,7 @@ struct IsReachable { }; return [&](std::string from, const std::string to) -> bool { - if (from == to) - return true; + if (from == to) return true; std::map visited; @@ -61,16 +75,14 @@ struct IsReachable { std::list queue; queue.push_back(from); - while(!queue.empty()) { + while (!queue.empty()) { auto cur = find_node(graph, queue.front()); queue.pop_front(); - if (cur == nullptr) - return false; + if (cur == nullptr) return false; for (auto n : cur->outputs) { - if (n->Name() == to) - return true; + if (n->Name() == to) return true; if (!visited[n->Name()]) { visited[n->Name()] = true; @@ -87,14 +99,14 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { auto build_program_desc = [&]() -> ProgramDesc { ProgramDesc prog; for (auto& v : - std::vector({"a", "b", "weights", "c", "d", "e"})) { + std::vector({"a", "b", "weights", "c", "d", "e"})) { auto* var = prog.MutableBlock(0)->Var(v); var->SetType(proto::VarType::LOD_TENSOR); if (v == "weights") { var->SetPersistable(true); } } - + SetOp(&prog, "conv2d", {"a", "weights"}, {"b"}); SetOp(&prog, "elementwise_add", {"c", "b"}, {"d"}); SetOp(&prog, "relu", {"d"}, {"e"}); @@ -109,14 +121,16 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { EXPECT_TRUE(is_reachable(graph)("a", "relu")); - auto pass = PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); + auto pass = + PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); int original_nodes_num = graph->Nodes().size(); graph = pass->Apply(std::move(graph)); int current_nodes_num = graph->Nodes().size(); EXPECT_TRUE(is_reachable(graph)("a", "relu")); - EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, current_nodes_num); + EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, + current_nodes_num); // Assert conv_relu op in newly generated graph int conv_count = 0; int elementwise_add_count = 0; @@ -136,15 +150,14 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionElementwiseAdd) { auto build_program_desc = [&]() -> ProgramDesc { ProgramDesc prog; - for (auto& v : - std::vector({"a", "b", "weights"})) { + for (auto& v : std::vector({"a", "b", "weights"})) { auto* var = prog.MutableBlock(0)->Var(v); var->SetType(proto::VarType::LOD_TENSOR); if (v == "weights" || v == "bias") { var->SetPersistable(true); } } - + SetOp(&prog, "conv2d", {"a", "weights"}, {"b"}); SetOp(&prog, "elementwise_add", {"c", "b"}, {"d"}); @@ -157,14 +170,16 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionElementwiseAdd) { IsReachable is_reachable; EXPECT_TRUE(is_reachable(graph)("a", "d")); - auto pass = PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); + auto pass = + PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); int original_nodes_num = graph->Nodes().size(); graph = pass->Apply(std::move(graph)); int current_nodes_num = graph->Nodes().size(); EXPECT_FALSE(is_reachable(graph)("a", "d")); - - EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, current_nodes_num); + + EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, + current_nodes_num); // Assert conv_relu op in newly generated graph int conv_count = 0; int elementwise_add_count = 0; @@ -185,14 +200,14 @@ TEST(ConvElementwiseAddMKLDNNFusePass, SigmoidConvolutionAddElementwiseRelu) { auto build_program_desc = [&]() -> ProgramDesc { ProgramDesc prog; for (auto& v : - std::vector({"a", "b", "weights", "c", "d", "e", "f"})) { + std::vector({"a", "b", "weights", "c", "d", "e", "f"})) { auto* var = prog.MutableBlock(0)->Var(v); var->SetType(proto::VarType::LOD_TENSOR); if (v.find("weights")) { var->SetPersistable(true); } } - + SetOp(&prog, "sigmoid", {"a"}, {"b"}); SetOp(&prog, "conv2d", {"b", "weights"}, {"c"}); SetOp(&prog, "elementwise_add", {"d", "c"}, {"e"}); @@ -208,14 +223,16 @@ TEST(ConvElementwiseAddMKLDNNFusePass, SigmoidConvolutionAddElementwiseRelu) { EXPECT_TRUE(is_reachable(graph)("a", "f")); - auto pass = PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); + auto pass = + PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); int original_nodes_num = graph->Nodes().size(); graph = pass->Apply(std::move(graph)); int current_nodes_num = graph->Nodes().size(); EXPECT_TRUE(is_reachable(graph)("a", "f")); - EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, current_nodes_num); + EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, + current_nodes_num); // Assert conv_relu op in newly generated graph int conv_count = 0; int elementwise_add_count = 0; From b8e54ab5cc39774e05fa902c3fe10d476bfe1308 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Tue, 18 Sep 2018 07:42:19 +0200 Subject: [PATCH 23/53] MKLDNN conv + elementwise_add fusion: parameter name changed to ResidualData --- .../ir/conv_elementwise_add_mkldnn_fuse_pass.cc | 2 +- paddle/fluid/operators/conv_mkldnn_op.cc | 10 +++++----- paddle/fluid/operators/conv_op.cc | 5 +++-- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index ac15e1b3d5..9cd3c401b0 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -184,7 +184,7 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { op_desc.SetInput("Input", {conv_input->Name()}); op_desc.SetInput("Filter", {conv_filter->Name()}); - op_desc.SetInput("EltwiseParameter", {elementwise_add_x->Name()}); + op_desc.SetInput("ResidualData", {elementwise_add_x->Name()}); op_desc.SetOutput("Output", {conv_output->Name()}); op_desc.SetAttr("use_mkldnn", true); diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index c849caf94f..8c9ea7c409 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -390,14 +390,14 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { T* output_data = nullptr; if (fuse_eltwise) { - auto eltwise_param = ctx.Input("EltwiseParameter"); - auto eltwise_param_data = eltwise_param->data(); + auto residual_param = ctx.Input("ResidualData"); + auto residual_param_data = residual_param->data(); - PADDLE_ENFORCE(eltwise_param_data != nullptr, "Provide data if you want MKLDNN conv+elementwise_add fusion"); - PADDLE_ENFORCE_EQ(output->dims(), eltwise_param->dims(), "Output and elementwise parameter need to have the same dimension sizes"); + PADDLE_ENFORCE(residual_param_data != nullptr, "Provide data if you want MKLDNN conv+elementwise_add fusion"); + PADDLE_ENFORCE_EQ(output->dims(), residual_param->dims(), "Output and elementwise parameter need to have the same dimension sizes"); output_data = output->mutable_data(ctx.GetPlace()); - output->ShareDataWith(*eltwise_param); + output->ShareDataWith(*residual_param); } else { output_data = output->mutable_data(ctx.GetPlace(), handler.GetDstMemorySize()); diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 99c50a5207..1e913dea1b 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -132,8 +132,9 @@ void Conv2DOpMaker::Make() { "(Tensor) The output tensor of convolution operator. " "The format of output tensor is also NCHW.") .Reuse("Input"); - AddInput("EltwiseParameter", - "(Tensor) Tensor to which convolution output will be added." + AddInput("ResidualData", + "(Tensor) Tensor with residual data " + "to which convolution output will be added." "Used on with fuse_eltwise fusion.") .AsDispensable(); AddAttr>("strides", From 2a251bbf275a0bd9fb8c1b07c398bae325ff51e3 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Wed, 19 Sep 2018 01:56:13 +0200 Subject: [PATCH 24/53] MKLDNN conv + elementwise_add fusion: some refactoring: consts, function calls instead of constant values --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 51 ++++++++++--------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 9cd3c401b0..32c677be12 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -47,23 +47,24 @@ struct Pattern : public PatternBase { }; struct Conv { - std::string op_name() { return "conv2d"; } - std::string input_name() { return "Input"; } - std::string filter_name() { return "Filter"; } - std::string output_name() { return "Output"; } + std::string op_name() const { return "conv2d"; } + std::string input_name() const { return "Input"; } + std::string filter_name() const { return "Filter"; } + std::string residual_data_name() const { return "ResidualData"; } + std::string output_name() const { return "Output"; } std::function operator()(std::shared_ptr pattern) { return [&]() -> PDNode* { - auto conv_op = pattern->new_node(op_name())->assert_is_op("conv2d"); + auto conv_op = pattern->new_node(op_name())->assert_is_op(op_name()); auto input_var = pattern->new_node(input_name()) - ->assert_is_op_input(op_name(), input_name()); + ->assert_is_op_input(op_name(), input_name()); auto filter_var = pattern->new_node(filter_name()) - ->assert_is_op_input(op_name(), filter_name()); + ->assert_is_op_input(op_name(), filter_name()); auto output_var = pattern->new_node(output_name()) - ->assert_is_op_output(op_name(), output_name()); + ->assert_is_op_output(op_name(), output_name()); conv_op->LinksFrom({input_var, filter_var}); conv_op->LinksTo({output_var}); @@ -74,15 +75,15 @@ struct Conv { }; struct ElementwiseAdd { - std::string op_name() { return "elementwise_add"; } - std::string x_name() { return "X"; } - std::string y_name() { return "Y"; } - std::string out_name() { return "Out"; } + std::string op_name() const { return "elementwise_add"; } + std::string x_name() const { return "X"; } + std::string y_name() const { return "Y"; } + std::string out_name() const { return "Out"; } std::function operator()(std::shared_ptr pattern) { return [&](PDNode* conv_output) -> PDNode* { auto elementwise_add_op = - pattern->new_node(op_name())->assert_is_op("elementwise_add"); + pattern->new_node(op_name())->assert_is_op(op_name()); auto x_var = pattern->new_node(x_name())->assert_is_op_input(op_name(), x_name()); @@ -90,8 +91,8 @@ struct ElementwiseAdd { conv_output->assert_is_op_input(op_name(), y_name()); auto out_var = pattern->new_node(out_name()) - ->AsOutput() - ->assert_is_op_output(op_name(), out_name()); + ->AsOutput() + ->assert_is_op_output(op_name(), out_name()); elementwise_add_op->LinksFrom({x_var, conv_output}); elementwise_add_op->LinksTo({out_var}); @@ -177,15 +178,17 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { conv_output->AsIntermediate(); - auto fuse_conv = [](Graph* g, Node* conv_input, Node* conv_filter, - Node* conv_output, Node* elementwise_add_x) { + auto fuse_conv = [&conv_pattern](Graph* g, Node* conv_input, + Node* conv_filter, + Node* conv_output, + Node* elementwise_add_x) { OpDesc op_desc; - op_desc.SetType("conv2d"); + op_desc.SetType(conv_pattern.op_name()); - op_desc.SetInput("Input", {conv_input->Name()}); - op_desc.SetInput("Filter", {conv_filter->Name()}); - op_desc.SetInput("ResidualData", {elementwise_add_x->Name()}); - op_desc.SetOutput("Output", {conv_output->Name()}); + op_desc.SetInput(conv_pattern.input_name(), {conv_input->Name()}); + op_desc.SetInput(conv_pattern.filter_name(), {conv_filter->Name()}); + op_desc.SetInput(conv_pattern.residual_data_name(), {elementwise_add_x->Name()}); + op_desc.SetOutput(conv_pattern.output_name(), {conv_output->Name()}); op_desc.SetAttr("use_mkldnn", true); op_desc.SetAttr("fuse_eltwise", true); @@ -198,8 +201,8 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { patterns::LinkNodes(fused_conv_op, conv_output); }; - auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, - Graph* g) { + auto handler = [&conv_pattern, &elementwise_add_pattern, pattern_ptr, fuse_conv] + (const GraphPatternDetector::subgraph_t& subgraph, Graph* g) { auto conv_op = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, conv_pattern.op_name()); auto conv_input = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, From cbe122ae2eda6443d10c10e745b1b908d0485bfc Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Wed, 19 Sep 2018 11:07:54 +0200 Subject: [PATCH 25/53] MKLDNN conv + elementwise_add fusion: correcting formatting --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 32c677be12..56a491a195 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -58,13 +58,13 @@ struct Conv { auto conv_op = pattern->new_node(op_name())->assert_is_op(op_name()); auto input_var = pattern->new_node(input_name()) - ->assert_is_op_input(op_name(), input_name()); + ->assert_is_op_input(op_name(), input_name()); auto filter_var = pattern->new_node(filter_name()) - ->assert_is_op_input(op_name(), filter_name()); + ->assert_is_op_input(op_name(), filter_name()); auto output_var = pattern->new_node(output_name()) - ->assert_is_op_output(op_name(), output_name()); + ->assert_is_op_output(op_name(), output_name()); conv_op->LinksFrom({input_var, filter_var}); conv_op->LinksTo({output_var}); @@ -91,8 +91,8 @@ struct ElementwiseAdd { conv_output->assert_is_op_input(op_name(), y_name()); auto out_var = pattern->new_node(out_name()) - ->AsOutput() - ->assert_is_op_output(op_name(), out_name()); + ->AsOutput() + ->assert_is_op_output(op_name(), out_name()); elementwise_add_op->LinksFrom({x_var, conv_output}); elementwise_add_op->LinksTo({out_var}); @@ -179,15 +179,15 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { conv_output->AsIntermediate(); auto fuse_conv = [&conv_pattern](Graph* g, Node* conv_input, - Node* conv_filter, - Node* conv_output, + Node* conv_filter, Node* conv_output, Node* elementwise_add_x) { OpDesc op_desc; op_desc.SetType(conv_pattern.op_name()); op_desc.SetInput(conv_pattern.input_name(), {conv_input->Name()}); op_desc.SetInput(conv_pattern.filter_name(), {conv_filter->Name()}); - op_desc.SetInput(conv_pattern.residual_data_name(), {elementwise_add_x->Name()}); + op_desc.SetInput(conv_pattern.residual_data_name(), + {elementwise_add_x->Name()}); op_desc.SetOutput(conv_pattern.output_name(), {conv_output->Name()}); op_desc.SetAttr("use_mkldnn", true); @@ -201,8 +201,9 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { patterns::LinkNodes(fused_conv_op, conv_output); }; - auto handler = [&conv_pattern, &elementwise_add_pattern, pattern_ptr, fuse_conv] - (const GraphPatternDetector::subgraph_t& subgraph, Graph* g) { + auto handler = [&conv_pattern, &elementwise_add_pattern, pattern_ptr, + fuse_conv](const GraphPatternDetector::subgraph_t& subgraph, + Graph* g) { auto conv_op = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, conv_pattern.op_name()); auto conv_input = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, From bf95ac36a719af2799935215f2ccb32e86f4d2dd Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Wed, 19 Sep 2018 12:08:52 +0200 Subject: [PATCH 26/53] MKLDNN conv + elementwise_add fusion: further reformatting --- .../ir/conv_elementwise_add_mkldnn_fuse_pass.h | 14 ++++++++++++++ paddle/fluid/operators/conv_mkldnn_op.cc | 13 ++++++++----- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h index 26118bce4b..e8e407350d 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h @@ -1,3 +1,17 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #pragma once #include diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index 8c9ea7c409..48f64b1144 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -303,7 +303,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { bool fuse_eltwise = ctx.Attr("fuse_eltwise"); int groups = ctx.Attr("groups"); - // TODO: add support for dilation + // TODO(tpatejko): add support for dilation PADDLE_ENFORCE( dilations.size() == 2 && dilations[0] == 1 && dilations[1] == 1, "dilation in convolution is not implemented yet"); @@ -386,21 +386,24 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { auto user_weights_memory_p = handler.AcquireWeightsMemory( user_weights_md, to_void_cast(filter_data)); - T* output_data = nullptr; if (fuse_eltwise) { auto residual_param = ctx.Input("ResidualData"); auto residual_param_data = residual_param->data(); - PADDLE_ENFORCE(residual_param_data != nullptr, "Provide data if you want MKLDNN conv+elementwise_add fusion"); - PADDLE_ENFORCE_EQ(output->dims(), residual_param->dims(), "Output and elementwise parameter need to have the same dimension sizes"); + PADDLE_ENFORCE( + residual_param_data != nullptr, + "Provide data if you want MKLDNN conv+elementwise_add fusion"); + PADDLE_ENFORCE_EQ(output->dims(), residual_param->dims(), + "Output and elementwise parameter need to have the " + "same dimension sizes"); output_data = output->mutable_data(ctx.GetPlace()); output->ShareDataWith(*residual_param); } else { output_data = - output->mutable_data(ctx.GetPlace(), handler.GetDstMemorySize()); + output->mutable_data(ctx.GetPlace(), handler.GetDstMemorySize()); } // create reorder primitive if the input format is not the preferred one From 347bf904127d2b17ecc3872104bbc18a8d52be18 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Thu, 20 Sep 2018 17:10:28 +0200 Subject: [PATCH 27/53] MKLDNN conv + elementwise_add fusion: bias is also handled --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 15 ++++++++++++--- ...elementwise_add_mkldnn_fuse_pass_tester.cc | 19 ++++++++++--------- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 56a491a195..eca4319c41 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -49,6 +49,7 @@ struct Pattern : public PatternBase { struct Conv { std::string op_name() const { return "conv2d"; } std::string input_name() const { return "Input"; } + std::string bias_name() const { return "Bias"; } std::string filter_name() const { return "Filter"; } std::string residual_data_name() const { return "ResidualData"; } std::string output_name() const { return "Output"; } @@ -60,13 +61,16 @@ struct Conv { auto input_var = pattern->new_node(input_name()) ->assert_is_op_input(op_name(), input_name()); + auto bias_var = pattern->new_node(bias_name()) + ->assert_is_op_input(op_name(), bias_name()); + auto filter_var = pattern->new_node(filter_name()) ->assert_is_op_input(op_name(), filter_name()); auto output_var = pattern->new_node(output_name()) ->assert_is_op_output(op_name(), output_name()); - conv_op->LinksFrom({input_var, filter_var}); + conv_op->LinksFrom({input_var, bias_var, filter_var}); conv_op->LinksTo({output_var}); return output_var; @@ -178,13 +182,14 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { conv_output->AsIntermediate(); - auto fuse_conv = [&conv_pattern](Graph* g, Node* conv_input, + auto fuse_conv = [&conv_pattern](Graph* g, Node* conv_input, Node* conv_bias, Node* conv_filter, Node* conv_output, Node* elementwise_add_x) { OpDesc op_desc; op_desc.SetType(conv_pattern.op_name()); op_desc.SetInput(conv_pattern.input_name(), {conv_input->Name()}); + op_desc.SetInput(conv_pattern.bias_name(), {conv_bias->Name()}); op_desc.SetInput(conv_pattern.filter_name(), {conv_filter->Name()}); op_desc.SetInput(conv_pattern.residual_data_name(), {elementwise_add_x->Name()}); @@ -196,6 +201,7 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { auto fused_conv_op = g->CreateOpNode(&op_desc); patterns::LinkNodes(conv_input, fused_conv_op); + patterns::LinkNodes(conv_bias, fused_conv_op); patterns::LinkNodes(conv_filter, fused_conv_op); patterns::LinkNodes(elementwise_add_x, fused_conv_op); patterns::LinkNodes(fused_conv_op, conv_output); @@ -208,6 +214,8 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { conv_pattern.op_name()); auto conv_input = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, conv_pattern.input_name()); + auto conv_bias = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, + conv_pattern.bias_name()); auto conv_filter = patterns::GetNodeFromSubgraph( subgraph, pattern_ptr, conv_pattern.filter_name()); auto conv_output = patterns::GetNodeFromSubgraph( @@ -220,7 +228,8 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { auto elementwise_add_out = patterns::GetNodeFromSubgraph( subgraph, pattern_ptr, elementwise_add_pattern.out_name()); - fuse_conv(g, conv_input, conv_filter, conv_output, elementwise_add_x); + fuse_conv(g, conv_input, conv_bias, conv_filter, conv_output, + elementwise_add_x); patterns::CorrectGraphEdges(g, elementwise_add_out, conv_output); GraphSafeRemoveNodes(g, {elementwise_add_out, conv_op, elementwise_add_op}); }; diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc index 58b1097a25..3d37398076 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc @@ -34,7 +34,8 @@ void SetOp(ProgramDesc* prog, const std::string& type, if (type == "conv2d") { op->SetAttr("use_mkldnn", true); op->SetInput("Input", {inputs[0]}); - op->SetInput("Filter", {inputs[1]}); + op->SetInput("Bias", {inputs[1]}); + op->SetInput("Filter", {inputs[2]}); op->SetOutput("Output", outputs); } else if (type == "elementwise_add") { op->SetInput("X", {inputs[0]}); @@ -98,8 +99,8 @@ struct IsReachable { TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { auto build_program_desc = [&]() -> ProgramDesc { ProgramDesc prog; - for (auto& v : - std::vector({"a", "b", "weights", "c", "d", "e"})) { + for (auto& v : std::vector( + {"a", "b", "bias", "weights", "c", "d", "e", "f"})) { auto* var = prog.MutableBlock(0)->Var(v); var->SetType(proto::VarType::LOD_TENSOR); if (v == "weights") { @@ -107,7 +108,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { } } - SetOp(&prog, "conv2d", {"a", "weights"}, {"b"}); + SetOp(&prog, "conv2d", {"a", "bias", "weights"}, {"b"}); SetOp(&prog, "elementwise_add", {"c", "b"}, {"d"}); SetOp(&prog, "relu", {"d"}, {"e"}); @@ -150,7 +151,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionElementwiseAdd) { auto build_program_desc = [&]() -> ProgramDesc { ProgramDesc prog; - for (auto& v : std::vector({"a", "b", "weights"})) { + for (auto& v : std::vector({"a", "b", "bias", "weights"})) { auto* var = prog.MutableBlock(0)->Var(v); var->SetType(proto::VarType::LOD_TENSOR); if (v == "weights" || v == "bias") { @@ -158,7 +159,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionElementwiseAdd) { } } - SetOp(&prog, "conv2d", {"a", "weights"}, {"b"}); + SetOp(&prog, "conv2d", {"a", "bias", "weights"}, {"b"}); SetOp(&prog, "elementwise_add", {"c", "b"}, {"d"}); return prog; @@ -199,8 +200,8 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionElementwiseAdd) { TEST(ConvElementwiseAddMKLDNNFusePass, SigmoidConvolutionAddElementwiseRelu) { auto build_program_desc = [&]() -> ProgramDesc { ProgramDesc prog; - for (auto& v : - std::vector({"a", "b", "weights", "c", "d", "e", "f"})) { + for (auto& v : std::vector( + {"a", "b", "bias", "weights", "c", "d", "e", "f"})) { auto* var = prog.MutableBlock(0)->Var(v); var->SetType(proto::VarType::LOD_TENSOR); if (v.find("weights")) { @@ -209,7 +210,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, SigmoidConvolutionAddElementwiseRelu) { } SetOp(&prog, "sigmoid", {"a"}, {"b"}); - SetOp(&prog, "conv2d", {"b", "weights"}, {"c"}); + SetOp(&prog, "conv2d", {"b", "bias", "weights"}, {"c"}); SetOp(&prog, "elementwise_add", {"d", "c"}, {"e"}); SetOp(&prog, "relu", {"e"}, {"f"}); From efd76614fb9446a93cd15a50c0dfafa1e62d5d29 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Wed, 26 Sep 2018 13:28:27 +0200 Subject: [PATCH 28/53] MKLDNN conv + elementwise_add fusion: implementation changed to conform with Paddle API --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 82 ++++++++----------- .../framework/ir/graph_pattern_detector.cc | 39 +++++++++ .../framework/ir/graph_pattern_detector.h | 26 ++++++ 3 files changed, 101 insertions(+), 46 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index eca4319c41..f96db7e89b 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -22,6 +22,7 @@ namespace framework { namespace ir { namespace patterns { +/* struct Pattern : public PatternBase { Pattern(PDPattern* pattern, const std::string& name_scope) : PatternBase{pattern, name_scope, ""} {} @@ -45,7 +46,8 @@ struct Pattern : public PatternBase { return node_pattern()->NewNode(node_name(op_name)); } }; - +*/ +/* struct Conv { std::string op_name() const { return "conv2d"; } std::string input_name() const { return "Input"; } @@ -105,7 +107,8 @@ struct ElementwiseAdd { }; } }; - +*/ +/* Node* GetNodeFromSubgraph(const GraphPatternDetector::subgraph_t& subgraph, std::shared_ptr pattern, const std::string& op_name) { @@ -116,6 +119,7 @@ Node* GetNodeFromSubgraph(const GraphPatternDetector::subgraph_t& subgraph, return var; } +*/ void LinkNodes(Node* from, Node* to) { from->outputs.push_back(to); @@ -172,64 +176,50 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { GraphPatternDetector gpd; auto pattern = gpd.mutable_pattern(); - auto pattern_ptr = std::make_shared(pattern, name_scope_); - patterns::Conv conv_pattern; - auto conv_output = conv_pattern(pattern_ptr)(); + patterns::Conv conv_pattern{pattern, "skip_connections_fusion"}; + auto conv_output = conv_pattern(); - patterns::ElementwiseAdd elementwise_add_pattern; - elementwise_add_pattern(pattern_ptr)(conv_output); + patterns::ElementwiseAdd elementwise_add_pattern{pattern, + "skip_connections_fusion"}; + elementwise_add_pattern(conv_output); conv_output->AsIntermediate(); - auto fuse_conv = [&conv_pattern](Graph* g, Node* conv_input, Node* conv_bias, - Node* conv_filter, Node* conv_output, - Node* elementwise_add_x) { + auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, + Graph* g) { + GET_IR_NODE_FROM_SUBGRAPH(conv_op, conv_op, conv_pattern); + GET_IR_NODE_FROM_SUBGRAPH(conv_input, conv_input, conv_pattern); + GET_IR_NODE_FROM_SUBGRAPH(conv_bias, conv_bias, conv_pattern); + GET_IR_NODE_FROM_SUBGRAPH(conv_filter, conv_filter, conv_pattern); + GET_IR_NODE_FROM_SUBGRAPH(conv_output, conv_output, conv_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_x, elementwise_add_x, + elementwise_add_pattern); + GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, + elementwise_add_pattern); + OpDesc op_desc; - op_desc.SetType(conv_pattern.op_name()); + op_desc.SetType("conv2d"); - op_desc.SetInput(conv_pattern.input_name(), {conv_input->Name()}); - op_desc.SetInput(conv_pattern.bias_name(), {conv_bias->Name()}); - op_desc.SetInput(conv_pattern.filter_name(), {conv_filter->Name()}); - op_desc.SetInput(conv_pattern.residual_data_name(), - {elementwise_add_x->Name()}); - op_desc.SetOutput(conv_pattern.output_name(), {conv_output->Name()}); + op_desc.SetInput("Input", {conv_input->Name()}); + op_desc.SetInput("Bias", {conv_bias->Name()}); + op_desc.SetInput("Filter", {conv_filter->Name()}); + op_desc.SetInput("ResidualData", {elementwise_add_x->Name()}); + op_desc.SetOutput("Output", {conv_output->Name()}); op_desc.SetAttr("use_mkldnn", true); op_desc.SetAttr("fuse_eltwise", true); auto fused_conv_op = g->CreateOpNode(&op_desc); - patterns::LinkNodes(conv_input, fused_conv_op); - patterns::LinkNodes(conv_bias, fused_conv_op); - patterns::LinkNodes(conv_filter, fused_conv_op); - patterns::LinkNodes(elementwise_add_x, fused_conv_op); - patterns::LinkNodes(fused_conv_op, conv_output); - }; + IR_NODE_LINK_TO(conv_input, fused_conv_op); + IR_NODE_LINK_TO(conv_bias, fused_conv_op); + IR_NODE_LINK_TO(conv_filter, fused_conv_op); + IR_NODE_LINK_TO(elementwise_add_x, fused_conv_op); + IR_NODE_LINK_TO(fused_conv_op, conv_output); - auto handler = [&conv_pattern, &elementwise_add_pattern, pattern_ptr, - fuse_conv](const GraphPatternDetector::subgraph_t& subgraph, - Graph* g) { - auto conv_op = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, - conv_pattern.op_name()); - auto conv_input = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, - conv_pattern.input_name()); - auto conv_bias = patterns::GetNodeFromSubgraph(subgraph, pattern_ptr, - conv_pattern.bias_name()); - auto conv_filter = patterns::GetNodeFromSubgraph( - subgraph, pattern_ptr, conv_pattern.filter_name()); - auto conv_output = patterns::GetNodeFromSubgraph( - subgraph, pattern_ptr, conv_pattern.output_name()); - - auto elementwise_add_op = patterns::GetNodeFromSubgraph( - subgraph, pattern_ptr, elementwise_add_pattern.op_name()); - auto elementwise_add_x = patterns::GetNodeFromSubgraph( - subgraph, pattern_ptr, elementwise_add_pattern.x_name()); - auto elementwise_add_out = patterns::GetNodeFromSubgraph( - subgraph, pattern_ptr, elementwise_add_pattern.out_name()); - - fuse_conv(g, conv_input, conv_bias, conv_filter, conv_output, - elementwise_add_x); patterns::CorrectGraphEdges(g, elementwise_add_out, conv_output); GraphSafeRemoveNodes(g, {elementwise_add_out, conv_op, elementwise_add_op}); }; diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index f28dfe40a2..e9517a20b6 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -999,6 +999,45 @@ PDNode *patterns::ConvBias::operator()( return eltwise_out_var; } +PDNode *patterns::Conv::operator()() { + auto conv_op = pattern->NewNode(conv_op_repr())->assert_is_op("conv2d"); + + auto input_var = pattern->NewNode(conv_input_repr()) + ->assert_is_op_input("conv2d", "Input"); + + auto bias_var = + pattern->NewNode(conv_bias_repr())->assert_is_op_input("conv2d", "Bias"); + + auto filter_var = pattern->NewNode(conv_filter_repr()) + ->assert_is_op_input("conv2d", "Filter"); + + auto output_var = pattern->NewNode(conv_output_repr()) + ->assert_is_op_output("conv2d", "Output"); + + conv_op->LinksFrom({input_var, bias_var, filter_var}); + conv_op->LinksTo({output_var}); + + return output_var; +} + +PDNode *patterns::ElementwiseAdd::operator()(PDNode *conv_output) { + auto elementwise_add_op = pattern->NewNode(elementwise_add_op_repr()) + ->assert_is_op("elementwise_add"); + + auto x_var = pattern->NewNode(elementwise_add_x_repr()) + ->assert_is_op_input("elementwise_add", "X"); + + conv_output->assert_is_op_input("elementwise_add", "Y"); + + auto out_var = pattern->NewNode(elementwise_add_out_repr()) + ->AsOutput() + ->assert_is_op_output("elementwise_add", "Out"); + + elementwise_add_op->LinksFrom({x_var, conv_output}); + elementwise_add_op->LinksTo({out_var}); + + return out_var; +} } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index 9dfd7046ca..e6bd57e95f 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -599,6 +599,32 @@ struct ConvBias : public PatternBase { PATTERN_DECL_NODE(eltwise_bias); PATTERN_DECL_NODE(eltwise_out); }; + +struct Conv : public PatternBase { + Conv(PDPattern* pattern, const std::string& name_scope) + : PatternBase(pattern, name_scope, "convolution") {} + + PDNode* operator()(); + + PATTERN_DECL_NODE(conv_op); + PATTERN_DECL_NODE(conv_input); + PATTERN_DECL_NODE(conv_bias); + PATTERN_DECL_NODE(conv_filter); + PATTERN_DECL_NODE(conv_residual_data); + PATTERN_DECL_NODE(conv_output); +}; + +struct ElementwiseAdd : public PatternBase { + ElementwiseAdd(PDPattern* pattern, const std::string& name_scope) + : PatternBase(pattern, name_scope, "elementwise_add") {} + + PDNode* operator()(PDNode* conv_output); + + PATTERN_DECL_NODE(elementwise_add_op); + PATTERN_DECL_NODE(elementwise_add_x); + PATTERN_DECL_NODE(elementwise_add_y); + PATTERN_DECL_NODE(elementwise_add_out); +}; } // namespace patterns // Link two ir::Nodes from each other. From f688197182e5a38e7b850841c372fd0d4c3d0e6c Mon Sep 17 00:00:00 2001 From: Michal Gallus Date: Tue, 25 Sep 2018 11:23:47 +0200 Subject: [PATCH 29/53] MKLDNN conv + elementwise_add fusion: Fix output_data to point to the right tensor, also fix transpiler integration --- paddle/fluid/operators/conv_mkldnn_op.cc | 2 +- .../fluid/transpiler/inference_transpiler.py | 28 +++++++++++++++---- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index 48f64b1144..0ea37964e7 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -399,8 +399,8 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { "Output and elementwise parameter need to have the " "same dimension sizes"); - output_data = output->mutable_data(ctx.GetPlace()); output->ShareDataWith(*residual_param); + output_data = output->mutable_data(ctx.GetPlace()); } else { output_data = output->mutable_data(ctx.GetPlace(), handler.GetDstMemorySize()); diff --git a/python/paddle/fluid/transpiler/inference_transpiler.py b/python/paddle/fluid/transpiler/inference_transpiler.py index c402535b27..b2cdad36a6 100644 --- a/python/paddle/fluid/transpiler/inference_transpiler.py +++ b/python/paddle/fluid/transpiler/inference_transpiler.py @@ -92,7 +92,8 @@ class InferenceTranspiler(object): if current_op.type in ['conv2d']: next_op = self.block.ops[i + 1] if next_op.type == 'elementwise_add': - self._fuse_conv_eltwise(current_op, next_op) + self._fuse_conv_eltwise(i, current_op, next_op) + self.block._remove_op(i + 1) # Remove old conv self.block._remove_op(i + 1) # Remove elementwise_add i = i + 1 self._adjust_input() @@ -444,7 +445,7 @@ class InferenceTranspiler(object): outputs={"Output": out_var}, attrs=attrs) - def _fuse_conv_eltwise(self, conv_op, eltwise_op): + def _fuse_conv_eltwise(self, index, conv_op, eltwise_op): ''' fuse the conv op with elementwise_add @@ -454,9 +455,26 @@ class InferenceTranspiler(object): :type eltwise_op: Operator ''' - conv_op._set_attr("fuse_eltwise", True) - self.input_map[conv_op.output("Output")[0]] = eltwise_op.input("Y")[0] - self.input_map[eltwise_op.output("Out")[0]] = eltwise_op.input("Y")[0] + residual_var = self.block.var(eltwise_op.input("X")[0]) + out_var = self.block.var(eltwise_op.output("Out")[0]) + filter_var = self.block.var(conv_op.input("Filter")[0]) + in_var = self.block.var(conv_op.input("Input")[0]) + bias_var = self.block.var(conv_op.input("Bias")[0]) + + conv_op.set_attr("fuse_eltwise", True) + attrs = {name: conv_op.attr(name) for name in conv_op.attr_names} + + self.block._insert_op( + index, + type="conv2d", + inputs={ + "Input": in_var, + "Filter": filter_var, + "Bias": bias_var, + "ResidualData": residual_var + }, + outputs={"Output": out_var}, + attrs=attrs) def _adjust_input(self): for i in range(len(self.block.ops)): From fb7a50b230dcf7117623591a41a9198cd7bd58e7 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Wed, 26 Sep 2018 16:48:52 +0200 Subject: [PATCH 30/53] MKLDNN conv + elementwise_add fusion: removed commented code. Internal functions marked as static. test=develop --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 105 +----------------- 1 file changed, 3 insertions(+), 102 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index f96db7e89b..b2c0fd63d0 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -22,112 +22,13 @@ namespace framework { namespace ir { namespace patterns { -/* -struct Pattern : public PatternBase { - Pattern(PDPattern* pattern, const std::string& name_scope) - : PatternBase{pattern, name_scope, ""} {} - - private: - std::string name_scope() { return name_scope_; } - std::string repr() { return repr_; } - size_t id() { return id_; } - PDPattern* node_pattern() { return pattern; } - - public: - std::string node_name(std::string op_name) { - return PDNodeName(name_scope(), repr(), id(), op_name); - } - - PDNode* retrieve_node(std::string op_name) { - return node_pattern()->RetrieveNode(node_name(op_name)); - } - - PDNode* new_node(std::string op_name) { - return node_pattern()->NewNode(node_name(op_name)); - } -}; -*/ -/* -struct Conv { - std::string op_name() const { return "conv2d"; } - std::string input_name() const { return "Input"; } - std::string bias_name() const { return "Bias"; } - std::string filter_name() const { return "Filter"; } - std::string residual_data_name() const { return "ResidualData"; } - std::string output_name() const { return "Output"; } - - std::function operator()(std::shared_ptr pattern) { - return [&]() -> PDNode* { - auto conv_op = pattern->new_node(op_name())->assert_is_op(op_name()); - - auto input_var = pattern->new_node(input_name()) - ->assert_is_op_input(op_name(), input_name()); - - auto bias_var = pattern->new_node(bias_name()) - ->assert_is_op_input(op_name(), bias_name()); - - auto filter_var = pattern->new_node(filter_name()) - ->assert_is_op_input(op_name(), filter_name()); - - auto output_var = pattern->new_node(output_name()) - ->assert_is_op_output(op_name(), output_name()); - - conv_op->LinksFrom({input_var, bias_var, filter_var}); - conv_op->LinksTo({output_var}); - - return output_var; - }; - } -}; - -struct ElementwiseAdd { - std::string op_name() const { return "elementwise_add"; } - std::string x_name() const { return "X"; } - std::string y_name() const { return "Y"; } - std::string out_name() const { return "Out"; } - - std::function operator()(std::shared_ptr pattern) { - return [&](PDNode* conv_output) -> PDNode* { - auto elementwise_add_op = - pattern->new_node(op_name())->assert_is_op(op_name()); - - auto x_var = - pattern->new_node(x_name())->assert_is_op_input(op_name(), x_name()); - - conv_output->assert_is_op_input(op_name(), y_name()); - - auto out_var = pattern->new_node(out_name()) - ->AsOutput() - ->assert_is_op_output(op_name(), out_name()); - - elementwise_add_op->LinksFrom({x_var, conv_output}); - elementwise_add_op->LinksTo({out_var}); - - return out_var; - }; - } -}; -*/ -/* -Node* GetNodeFromSubgraph(const GraphPatternDetector::subgraph_t& subgraph, - std::shared_ptr pattern, - const std::string& op_name) { - PADDLE_ENFORCE(subgraph.count(pattern->retrieve_node(op_name)), - "Node not found for PDNode %s", pattern->node_name(op_name)); - Node* var = subgraph.at(pattern->retrieve_node(op_name)); - PADDLE_ENFORCE(var, "node %s not exists in the sub-graph"); - - return var; -} -*/ - -void LinkNodes(Node* from, Node* to) { +static void LinkNodes(Node* from, Node* to) { from->outputs.push_back(to); to->inputs.push_back(from); } template -void ReplaceAllOccurances(IT s, IT e, FindFunc f, ReplaceFunc r) { +static void ReplaceAllOccurances(IT s, IT e, FindFunc f, ReplaceFunc r) { if (s == e) return; auto it = std::find_if(s, e, f); @@ -140,7 +41,7 @@ void ReplaceAllOccurances(IT s, IT e, FindFunc f, ReplaceFunc r) { ReplaceAllOccurances(it, e, f, r); } -void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { +static void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { for (auto& node : GraphTraits::DFS(*graph)) { auto same = std::find_if(std::begin(node.inputs), std::end(node.inputs), [from](Node* n) { return n == from; }); From f0efc244c6e051b14ff9e48863f32088b95e9858 Mon Sep 17 00:00:00 2001 From: Michal Gallus Date: Wed, 26 Sep 2018 14:46:09 +0200 Subject: [PATCH 31/53] MKLDNN conv + elementwise_add fusion: Fix transpiler integration to predict skip connection input of eltwise_add --- .../fluid/transpiler/inference_transpiler.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/python/paddle/fluid/transpiler/inference_transpiler.py b/python/paddle/fluid/transpiler/inference_transpiler.py index b2cdad36a6..9a36605d38 100644 --- a/python/paddle/fluid/transpiler/inference_transpiler.py +++ b/python/paddle/fluid/transpiler/inference_transpiler.py @@ -455,11 +455,15 @@ class InferenceTranspiler(object): :type eltwise_op: Operator ''' - residual_var = self.block.var(eltwise_op.input("X")[0]) - out_var = self.block.var(eltwise_op.output("Out")[0]) - filter_var = self.block.var(conv_op.input("Filter")[0]) - in_var = self.block.var(conv_op.input("Input")[0]) - bias_var = self.block.var(conv_op.input("Bias")[0]) + eltwise_input = "X" + if eltwise_op.input("X")[0] == conv_op.output("Output")[0]: + eltwise_input = "Y" + + residual_var = self.block.vars[eltwise_op.input(eltwise_input)[0]] + out_var = self.block.vars[eltwise_op.output("Out")[0]] + filter_var = self.block.vars[conv_op.input("Filter")[0]] + in_var = self.block.vars[conv_op.input("Input")[0]] + bias_var = self.block.vars[conv_op.input("Bias")[0]] conv_op.set_attr("fuse_eltwise", True) attrs = {name: conv_op.attr(name) for name in conv_op.attr_names} From 9a335e02774164f40895b3f7bce349f835c47246 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Thu, 27 Sep 2018 10:33:22 +0200 Subject: [PATCH 32/53] MKLDNN conv + elementwise_add fusion: changed a name of a formal argument in ElementwiseAdd pattern --- paddle/fluid/framework/ir/graph_pattern_detector.cc | 6 +++--- paddle/fluid/framework/ir/graph_pattern_detector.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index e9517a20b6..f6c8609fd7 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -1020,20 +1020,20 @@ PDNode *patterns::Conv::operator()() { return output_var; } -PDNode *patterns::ElementwiseAdd::operator()(PDNode *conv_output) { +PDNode *patterns::ElementwiseAdd::operator()(PDNode *y_var) { auto elementwise_add_op = pattern->NewNode(elementwise_add_op_repr()) ->assert_is_op("elementwise_add"); auto x_var = pattern->NewNode(elementwise_add_x_repr()) ->assert_is_op_input("elementwise_add", "X"); - conv_output->assert_is_op_input("elementwise_add", "Y"); + y_var->assert_is_op_input("elementwise_add", "Y"); auto out_var = pattern->NewNode(elementwise_add_out_repr()) ->AsOutput() ->assert_is_op_output("elementwise_add", "Out"); - elementwise_add_op->LinksFrom({x_var, conv_output}); + elementwise_add_op->LinksFrom({x_var, y_var}); elementwise_add_op->LinksTo({out_var}); return out_var; diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index e6bd57e95f..e586b7fe4e 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -618,7 +618,7 @@ struct ElementwiseAdd : public PatternBase { ElementwiseAdd(PDPattern* pattern, const std::string& name_scope) : PatternBase(pattern, name_scope, "elementwise_add") {} - PDNode* operator()(PDNode* conv_output); + PDNode* operator()(PDNode* y_var); PATTERN_DECL_NODE(elementwise_add_op); PATTERN_DECL_NODE(elementwise_add_x); From 4be45af1cc848604e2bd335b95ecfd8255148ff9 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Thu, 27 Sep 2018 11:12:32 +0200 Subject: [PATCH 33/53] MKLDNN conv + elementwise_add fusion: skip connection attribute renamed. Comments about patterns added. test=develop --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 2 +- .../framework/ir/graph_pattern_detector.h | 13 +++++++++ paddle/fluid/operators/conv_mkldnn_op.cc | 29 ++++++++++--------- paddle/fluid/operators/conv_op.cc | 8 ++--- .../fluid/transpiler/inference_transpiler.py | 4 +-- 5 files changed, 36 insertions(+), 20 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index b2c0fd63d0..4f1a291d16 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -111,7 +111,7 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { op_desc.SetOutput("Output", {conv_output->Name()}); op_desc.SetAttr("use_mkldnn", true); - op_desc.SetAttr("fuse_eltwise", true); + op_desc.SetAttr("fuse_residual_connection", true); auto fused_conv_op = g->CreateOpNode(&op_desc); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index e586b7fe4e..08fd8174ce 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -600,6 +600,15 @@ struct ConvBias : public PatternBase { PATTERN_DECL_NODE(eltwise_out); }; +// Convolution op +// Forward pass for convolution. +// conv_input, conv_bias and conv_filter are inputs. +// conv_output is a result of the operator. +// residual_data is data used by skip connection. +// If residual connection fusion is on, the formula is: +// conv_output = conv_op(conv_filter, conv_input, conv_bias) +// + conv_residual_data +// If the fusion is off, conv_residual_data is not added. struct Conv : public PatternBase { Conv(PDPattern* pattern, const std::string& name_scope) : PatternBase(pattern, name_scope, "convolution") {} @@ -614,6 +623,10 @@ struct Conv : public PatternBase { PATTERN_DECL_NODE(conv_output); }; +// ElementwiseAdd used in residual connections. +// y_var is used and convolution output. +// The operator is removed, when residual +// connection fusion is on. struct ElementwiseAdd : public PatternBase { ElementwiseAdd(PDPattern* pattern, const std::string& name_scope) : PatternBase(pattern, name_scope, "elementwise_add") {} diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index 0ea37964e7..521f423fb0 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -300,7 +300,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { std::vector paddings = ctx.Attr>("paddings"); std::vector dilations = ctx.Attr>("dilations"); bool fuse_relu = ctx.Attr("fuse_relu"); - bool fuse_eltwise = ctx.Attr("fuse_eltwise"); + bool fuse_residual_conn = ctx.Attr("fuse_residual_connection"); int groups = ctx.Attr("groups"); // TODO(tpatejko): add support for dilation @@ -369,11 +369,11 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { bias_tz, platform::MKLDNNGetDataType(), memory::format::x); conv_pd = ConvFwdPrimitiveDesc(src_md, weights_md, bias_md, dst_md, strides, paddings, mkldnn_engine, - fuse_relu, fuse_eltwise); + fuse_relu, fuse_residual_conn); } else { conv_pd = ConvFwdPrimitiveDesc(src_md, weights_md, dst_md, strides, paddings, - mkldnn_engine, fuse_relu, fuse_eltwise); + mkldnn_engine, fuse_relu, fuse_residual_conn); } // Save conv_pd/src_memory/weights_memory for backward pass dev_ctx.SetBlob(key_conv_pd, conv_pd); @@ -388,7 +388,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { T* output_data = nullptr; - if (fuse_eltwise) { + if (fuse_residual_conn) { auto residual_param = ctx.Input("ResidualData"); auto residual_param_data = residual_param->data(); @@ -442,14 +442,15 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { private: mkldnn::primitive_attr CreatePostOps(bool fuse_relu, - bool fuse_eltwise) const { + bool fuse_residual_conn) const { mkldnn::primitive_attr conv_attr; mkldnn::post_ops post_operations; // Fusion with Elementwise layer relies on adding a sum post-operation with - // the scale parameter. It is assumed that when fuse_eltwise is true, the - // Output tensor contains the data coming from residual connection. The - // result of this post_op is: Output = scale * Output + Conv_Out. - if (fuse_eltwise) { + // the scale parameter. It is assumed that when fuse_residual_connection is + // true, the output tensor contains the data coming from residual + // connection. The result of this post_op is: + // Output = scale * Output + Conv_Out. + if (fuse_residual_conn) { post_operations.append_sum(1.0f); } // Fusion with ReLU layer is executed through the PostOps feature. Create a @@ -470,7 +471,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { const memory::desc& dst, const std::vector& strides, const std::vector& paddings, const mkldnn::engine& engine, const bool fuse_relu, - const bool fuse_eltwise) const { + const bool fuse_residual_conn) const { memory::dims stride_dims = {strides[0], strides[1]}; memory::dims padding_dims = {paddings[0], paddings[1]}; @@ -479,7 +480,8 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { dst, stride_dims, padding_dims, padding_dims, mkldnn::padding_kind::zero); - mkldnn::primitive_attr conv_attr = CreatePostOps(fuse_relu, fuse_eltwise); + mkldnn::primitive_attr conv_attr = + CreatePostOps(fuse_relu, fuse_residual_conn); auto p_conv_pd = new mkldnn::convolution_forward::primitive_desc( conv_desc, conv_attr, engine); @@ -494,7 +496,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { const std::vector& strides, const std::vector& paddings, const mkldnn::engine& engine, const bool fuse_relu, - const bool fuse_eltwise) const { + const bool fuse_residual_conn) const { memory::dims stride_dims = {strides[0], strides[1]}; memory::dims padding_dims = {paddings[0], paddings[1]}; @@ -503,7 +505,8 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { bias, dst, stride_dims, padding_dims, padding_dims, mkldnn::padding_kind::zero); - mkldnn::primitive_attr conv_attr = CreatePostOps(fuse_relu, fuse_eltwise); + mkldnn::primitive_attr conv_attr = + CreatePostOps(fuse_relu, fuse_residual_conn); auto p_conv_pd = new mkldnn::convolution_forward::primitive_desc( conv_desc, conv_attr, engine); diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 1e913dea1b..8f2561fcc3 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -135,7 +135,7 @@ void Conv2DOpMaker::Make() { AddInput("ResidualData", "(Tensor) Tensor with residual data " "to which convolution output will be added." - "Used on with fuse_eltwise fusion.") + "Used with fuse_residual_connection fusion.") .AsDispensable(); AddAttr>("strides", "(vector default:{1, 1}), the " @@ -169,10 +169,10 @@ void Conv2DOpMaker::Make() { .SetDefault(false); AddAttr("fuse_relu", "(bool, default false) Only used in mkldnn kernel") .SetDefault(false); - AddAttr("fuse_eltwise", + AddAttr("fuse_residual_connection", "(bool, default false) Only used in mkldnn kernel. Used " - "whenever convolution output is connected via skip connection " - "to a previous layer.") + "whenever convolution output is as an input to residual " + "connection.") .SetDefault(false); AddAttr( "data_format", diff --git a/python/paddle/fluid/transpiler/inference_transpiler.py b/python/paddle/fluid/transpiler/inference_transpiler.py index 9a36605d38..90b1a16a5a 100644 --- a/python/paddle/fluid/transpiler/inference_transpiler.py +++ b/python/paddle/fluid/transpiler/inference_transpiler.py @@ -74,7 +74,7 @@ class InferenceTranspiler(object): ''' Transpile the program fusing elementwise_add into conv for MKLDNN program. Elementwise add following convolution OP can be fused by adding - 'fuse_eltwise' attribute to convolution OP and replacing its output + 'fuse_residual_connection' attribute to convolution OP and replacing its output Tensor with second parameter of elementwise_add. The result of fuse is: - before: @@ -465,7 +465,7 @@ class InferenceTranspiler(object): in_var = self.block.vars[conv_op.input("Input")[0]] bias_var = self.block.vars[conv_op.input("Bias")[0]] - conv_op.set_attr("fuse_eltwise", True) + conv_op.set_attr("fuse_residual_connection", True) attrs = {name: conv_op.attr(name) for name in conv_op.attr_names} self.block._insert_op( From 3e033087f1d09f402fe93f20be6330386ee67b29 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Wed, 26 Sep 2018 18:32:51 +0200 Subject: [PATCH 34/53] MKLDNN conv + elementwise_add fusion: LinkNodes function removed and macro used. test=develop --- .../framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 4f1a291d16..00a68d5907 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -22,11 +22,6 @@ namespace framework { namespace ir { namespace patterns { -static void LinkNodes(Node* from, Node* to) { - from->outputs.push_back(to); - to->inputs.push_back(from); -} - template static void ReplaceAllOccurances(IT s, IT e, FindFunc f, ReplaceFunc r) { if (s == e) return; @@ -47,7 +42,7 @@ static void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { [from](Node* n) { return n == from; }); if (same != std::end(node.inputs)) { - LinkNodes(to, &node); + IR_NODE_LINK_TO(to, (&node)); auto inputs = node.Op()->Inputs(); From af8c71317c93a74801131231468a499d027c715c Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Fri, 28 Sep 2018 13:08:16 +0200 Subject: [PATCH 35/53] MKLDNN conv + elementwise_add fusion: CorrectGraphEdges refactored --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 52 ++++++------------- 1 file changed, 17 insertions(+), 35 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 00a68d5907..43b8f977cf 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -20,51 +20,33 @@ namespace paddle { namespace framework { namespace ir { -namespace patterns { - -template -static void ReplaceAllOccurances(IT s, IT e, FindFunc f, ReplaceFunc r) { - if (s == e) return; - - auto it = std::find_if(s, e, f); - - if (it != e) { - r(*it); - } - - it++; - ReplaceAllOccurances(it, e, f, r); -} - -static void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { +namespace { +void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { for (auto& node : GraphTraits::DFS(*graph)) { - auto same = std::find_if(std::begin(node.inputs), std::end(node.inputs), - [from](Node* n) { return n == from; }); + auto from_in_inputs = + std::find(std::begin(node.inputs), std::end(node.inputs), from); - if (same != std::end(node.inputs)) { + if (from_in_inputs != std::end(node.inputs)) { IR_NODE_LINK_TO(to, (&node)); auto inputs = node.Op()->Inputs(); using input_type = VariableNameMap::value_type; - ReplaceAllOccurances( - std::begin(inputs), std::end(inputs), - [from](const input_type& i) -> bool { - auto params = i.second; - auto pi = - std::find_if(std::begin(params), std::end(params), - std::bind(std::equal_to(), - from->Name(), std::placeholders::_1)); - return pi != std::end(params); - }, - [to, &node](const input_type& i) { - node.Op()->SetInput(i.first, {to->Name()}); - }); + std::for_each(std::begin(inputs), std::end(inputs), + [from, to, &node](const input_type& i) -> void { + auto param_names = i.second; + auto pi = std::find(std::begin(param_names), + std::end(param_names), from->Name()); + + if (pi != std::end(param_names)) { + node.Op()->SetInput(i.first, {to->Name()}); + } + }); } } } -} // namespace patterns +} // namespace using graph_ptr = std::unique_ptr; graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { @@ -116,7 +98,7 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { IR_NODE_LINK_TO(elementwise_add_x, fused_conv_op); IR_NODE_LINK_TO(fused_conv_op, conv_output); - patterns::CorrectGraphEdges(g, elementwise_add_out, conv_output); + CorrectGraphEdges(g, elementwise_add_out, conv_output); GraphSafeRemoveNodes(g, {elementwise_add_out, conv_op, elementwise_add_op}); }; From a27a8c5da8384a8d3d6a4334a412cf54ad9eec1b Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Fri, 28 Sep 2018 13:41:41 +0200 Subject: [PATCH 36/53] MKLDNN conv + elementwise_add fusion: bias in test marked as persistable --- .../ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc index 3d37398076..ce79a465ca 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc @@ -103,7 +103,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { {"a", "b", "bias", "weights", "c", "d", "e", "f"})) { auto* var = prog.MutableBlock(0)->Var(v); var->SetType(proto::VarType::LOD_TENSOR); - if (v == "weights") { + if (v == "weights" || v == "bias") { var->SetPersistable(true); } } From cc1c8e37c146906ed6aa492eee3193d793e2ccc9 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Fri, 28 Sep 2018 13:53:25 +0200 Subject: [PATCH 37/53] MKLDNN conv + elementwise_add fusion: attributes in new conv op copied from old op --- .../framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 43b8f977cf..4dd6e273bd 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -87,7 +87,10 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { op_desc.SetInput("ResidualData", {elementwise_add_x->Name()}); op_desc.SetOutput("Output", {conv_output->Name()}); - op_desc.SetAttr("use_mkldnn", true); + for (const auto& attr : conv_op->Op()->GetAttrMap()) { + op_desc.SetAttr(attr.first, attr.second); + } + op_desc.SetAttr("fuse_residual_connection", true); auto fused_conv_op = g->CreateOpNode(&op_desc); From 8fb29b2ca98164c15e6253001a5fd906ef90f792 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Fri, 28 Sep 2018 13:57:33 +0200 Subject: [PATCH 38/53] MKLDNN conv + elementwise_add fusion: new nodes marked as input or output test=develop --- paddle/fluid/framework/ir/graph_pattern_detector.cc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index f6c8609fd7..6d524651e0 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -1003,15 +1003,19 @@ PDNode *patterns::Conv::operator()() { auto conv_op = pattern->NewNode(conv_op_repr())->assert_is_op("conv2d"); auto input_var = pattern->NewNode(conv_input_repr()) + ->AsInput() ->assert_is_op_input("conv2d", "Input"); - auto bias_var = - pattern->NewNode(conv_bias_repr())->assert_is_op_input("conv2d", "Bias"); + auto bias_var = pattern->NewNode(conv_bias_repr()) + ->AsInput() + ->assert_is_op_input("conv2d", "Bias"); auto filter_var = pattern->NewNode(conv_filter_repr()) + ->AsInput() ->assert_is_op_input("conv2d", "Filter"); auto output_var = pattern->NewNode(conv_output_repr()) + ->AsOutput() ->assert_is_op_output("conv2d", "Output"); conv_op->LinksFrom({input_var, bias_var, filter_var}); @@ -1025,6 +1029,7 @@ PDNode *patterns::ElementwiseAdd::operator()(PDNode *y_var) { ->assert_is_op("elementwise_add"); auto x_var = pattern->NewNode(elementwise_add_x_repr()) + ->AsInput() ->assert_is_op_input("elementwise_add", "X"); y_var->assert_is_op_input("elementwise_add", "Y"); From 2c43419db1d0ff5e2872126dd64711c7b24d3449 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Tue, 9 Oct 2018 10:30:00 +0200 Subject: [PATCH 39/53] MKLDNN conv + elementwise_add fusion: comment explaining CorrectGraphEdges added --- .../framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 4dd6e273bd..0f3f1572fc 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -21,6 +21,10 @@ namespace paddle { namespace framework { namespace ir { namespace { + +// The function keeps the graph consistent by replacing +// a node 'from' in the set of inputs nodes +// of the visited node by a node 'to'. void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { for (auto& node : GraphTraits::DFS(*graph)) { auto from_in_inputs = From a1fa20328725cc54a5aafe1035eab3b85c43ef26 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Tue, 9 Oct 2018 10:41:26 +0200 Subject: [PATCH 40/53] MKLDNN conv + elementwise_add fusion: name of the pass reused with name_scope_ --- .../framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc | 7 +++---- .../framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 0f3f1572fc..2612a10415 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -54,16 +54,15 @@ void CorrectGraphEdges(Graph* graph, Node* from, Node* to) { using graph_ptr = std::unique_ptr; graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { - FusePassBase::Init("conv_elementwise_add_mkldnn_fuse_pass", graph.get()); + FusePassBase::Init(name_scope_, graph.get()); GraphPatternDetector gpd; auto pattern = gpd.mutable_pattern(); - patterns::Conv conv_pattern{pattern, "skip_connections_fusion"}; + patterns::Conv conv_pattern{pattern, name_scope_}; auto conv_output = conv_pattern(); - patterns::ElementwiseAdd elementwise_add_pattern{pattern, - "skip_connections_fusion"}; + patterns::ElementwiseAdd elementwise_add_pattern{pattern, name_scope_}; elementwise_add_pattern(conv_output); conv_output->AsIntermediate(); diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h index e8e407350d..f4a899f1ad 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h @@ -30,7 +30,7 @@ class ConvElementwiseAddMKLDNNFusePass : public FusePassBase { protected: std::unique_ptr ApplyImpl(std::unique_ptr graph) const; - const std::string name_scope_{"conv_elementwise_add_mkldnn_fuse_pass"}; + const std::string name_scope_{"residual_connections_fuse_pass"}; }; } // namespace ir From b73b86836678271774790ed2d7facd1f5b1ebe5d Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Tue, 9 Oct 2018 10:51:51 +0200 Subject: [PATCH 41/53] MKLDNN conv + elementwise_add fusion: bias in tests made persistent. test=develop --- .../ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc index ce79a465ca..08c3b23cf3 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc @@ -204,7 +204,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, SigmoidConvolutionAddElementwiseRelu) { {"a", "b", "bias", "weights", "c", "d", "e", "f"})) { auto* var = prog.MutableBlock(0)->Var(v); var->SetType(proto::VarType::LOD_TENSOR); - if (v.find("weights")) { + if (v.find("weights") || v.find("bias")) { var->SetPersistable(true); } } From 0fe3079c4641fb1ee20b40f7f445d7e63c13c345 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Tue, 16 Oct 2018 14:56:21 +0200 Subject: [PATCH 42/53] MKLDNN conv + elementwise_add fusion: fix for order of parameters in elementwise_add in resnet50 test=develop --- .../ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc | 6 +++--- paddle/fluid/framework/ir/graph_pattern_detector.cc | 10 +++++----- paddle/fluid/framework/ir/graph_pattern_detector.h | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc index 08c3b23cf3..fd47b96c10 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc @@ -109,7 +109,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { } SetOp(&prog, "conv2d", {"a", "bias", "weights"}, {"b"}); - SetOp(&prog, "elementwise_add", {"c", "b"}, {"d"}); + SetOp(&prog, "elementwise_add", {"b", "c"}, {"d"}); SetOp(&prog, "relu", {"d"}, {"e"}); return prog; @@ -160,7 +160,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionElementwiseAdd) { } SetOp(&prog, "conv2d", {"a", "bias", "weights"}, {"b"}); - SetOp(&prog, "elementwise_add", {"c", "b"}, {"d"}); + SetOp(&prog, "elementwise_add", {"b", "c"}, {"d"}); return prog; }; @@ -211,7 +211,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, SigmoidConvolutionAddElementwiseRelu) { SetOp(&prog, "sigmoid", {"a"}, {"b"}); SetOp(&prog, "conv2d", {"b", "bias", "weights"}, {"c"}); - SetOp(&prog, "elementwise_add", {"d", "c"}, {"e"}); + SetOp(&prog, "elementwise_add", {"c", "d"}, {"e"}); SetOp(&prog, "relu", {"e"}, {"f"}); return prog; diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 6d524651e0..786765bff7 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -1024,15 +1024,15 @@ PDNode *patterns::Conv::operator()() { return output_var; } -PDNode *patterns::ElementwiseAdd::operator()(PDNode *y_var) { +PDNode *patterns::ElementwiseAdd::operator()(PDNode *x_var) { auto elementwise_add_op = pattern->NewNode(elementwise_add_op_repr()) ->assert_is_op("elementwise_add"); - auto x_var = pattern->NewNode(elementwise_add_x_repr()) - ->AsInput() - ->assert_is_op_input("elementwise_add", "X"); + x_var->assert_is_op_input("elementwise_add", "X"); - y_var->assert_is_op_input("elementwise_add", "Y"); + auto y_var = pattern->NewNode(elementwise_add_x_repr()) + ->AsInput() + ->assert_is_op_input("elementwise_add", "Y"); auto out_var = pattern->NewNode(elementwise_add_out_repr()) ->AsOutput() diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index 08fd8174ce..8e4f4a14ab 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -631,7 +631,7 @@ struct ElementwiseAdd : public PatternBase { ElementwiseAdd(PDPattern* pattern, const std::string& name_scope) : PatternBase(pattern, name_scope, "elementwise_add") {} - PDNode* operator()(PDNode* y_var); + PDNode* operator()(PDNode* x_var); PATTERN_DECL_NODE(elementwise_add_op); PATTERN_DECL_NODE(elementwise_add_x); From 16760946978c7b58c4ec6aab90d1da2dff74f671 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Tue, 16 Oct 2018 18:27:48 +0200 Subject: [PATCH 43/53] MKLDNN conv + elementwise_add fusion: turn on residual connection pass when CAPI is used. test=develop --- paddle/fluid/inference/analysis/analyzer.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/inference/analysis/analyzer.h b/paddle/fluid/inference/analysis/analyzer.h index f13b362575..c92b8694a0 100644 --- a/paddle/fluid/inference/analysis/analyzer.h +++ b/paddle/fluid/inference/analysis/analyzer.h @@ -80,7 +80,8 @@ class Analyzer : public OrderedRegistry { "conv_eltwiseadd_bn_fuse_pass", // #ifdef PADDLE_WITH_MKLDNN "conv_bias_mkldnn_fuse_pass", // - "conv_relu_mkldnn_fuse_pass", // + "conv_relu_mkldnn_fuse_pass", // + "conv_elementwise_add_mkldnn_fuse_pass", // #endif }}; From 7c64aa0fdc6def71ac8e7b7bb2532692eb041ede Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Wed, 17 Oct 2018 11:17:34 +0200 Subject: [PATCH 44/53] MKLDNN conv + elementwise_add fusion: _set_attr corrected in residual connection fusion test=develop --- python/paddle/fluid/transpiler/inference_transpiler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/fluid/transpiler/inference_transpiler.py b/python/paddle/fluid/transpiler/inference_transpiler.py index 90b1a16a5a..5269bd94ce 100644 --- a/python/paddle/fluid/transpiler/inference_transpiler.py +++ b/python/paddle/fluid/transpiler/inference_transpiler.py @@ -465,7 +465,7 @@ class InferenceTranspiler(object): in_var = self.block.vars[conv_op.input("Input")[0]] bias_var = self.block.vars[conv_op.input("Bias")[0]] - conv_op.set_attr("fuse_residual_connection", True) + conv_op._set_attr("fuse_residual_connection", True) attrs = {name: conv_op.attr(name) for name in conv_op.attr_names} self.block._insert_op( From 415b261555de939c7620dc8bcec94107160998d0 Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Thu, 18 Oct 2018 15:51:04 +0200 Subject: [PATCH 45/53] MKLDNN conv + elementwise_add fusion: fusion options added --- .../fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 2612a10415..7aad9de1be 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -81,6 +81,8 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, elementwise_add_pattern); + if (FindFuseOption(conv_op, elementwise_add_op) != FUSE_MKLDNN) return; + OpDesc op_desc; op_desc.SetType("conv2d"); From 4e72ab411eece7345f4ab21a142d93e2004f716e Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Fri, 19 Oct 2018 09:50:10 +0200 Subject: [PATCH 46/53] MKLDNN conv + elementwise_add fusion: fix for crash when bias is not present --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 41 +++++++++++++++++-- .../framework/ir/graph_pattern_detector.cc | 6 +-- 2 files changed, 38 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 7aad9de1be..10b1d636e4 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -14,6 +14,7 @@ #include "paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.h" #include +#include #include "paddle/fluid/framework/ir/graph_traits.h" @@ -67,11 +68,32 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { conv_output->AsIntermediate(); + auto conv_op_has_bias = [](const Node& conv_op, + const Scope& scope) -> std::pair { + auto bias_input_names = conv_op.Op()->Inputs(); + auto bias_it = bias_input_names.find("Bias"); + + if (bias_it != std::end(bias_input_names)) { + bool has_bias = !bias_it->second.empty(); + + if (has_bias) { + auto conv_bias_names = bias_it->second; + auto conv_bias_names_it = + std::find_if(std::begin(conv_op.inputs), std::end(conv_op.inputs), + [&conv_bias_names](Node* n) -> bool { + return n->Name() == conv_bias_names[0]; + }); + return std::make_pair(has_bias, *conv_bias_names_it); + } + } + + return std::make_pair(false, nullptr); + }; + auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, Graph* g) { GET_IR_NODE_FROM_SUBGRAPH(conv_op, conv_op, conv_pattern); GET_IR_NODE_FROM_SUBGRAPH(conv_input, conv_input, conv_pattern); - GET_IR_NODE_FROM_SUBGRAPH(conv_bias, conv_bias, conv_pattern); GET_IR_NODE_FROM_SUBGRAPH(conv_filter, conv_filter, conv_pattern); GET_IR_NODE_FROM_SUBGRAPH(conv_output, conv_output, conv_pattern); GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_op, elementwise_add_op, @@ -81,17 +103,25 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { GET_IR_NODE_FROM_SUBGRAPH(elementwise_add_out, elementwise_add_out, elementwise_add_pattern); - if (FindFuseOption(conv_op, elementwise_add_op) != FUSE_MKLDNN) return; + if (FindFuseOption(*conv_op, *elementwise_add_op) != FUSE_MKLDNN) return; OpDesc op_desc; op_desc.SetType("conv2d"); op_desc.SetInput("Input", {conv_input->Name()}); - op_desc.SetInput("Bias", {conv_bias->Name()}); op_desc.SetInput("Filter", {conv_filter->Name()}); op_desc.SetInput("ResidualData", {elementwise_add_x->Name()}); op_desc.SetOutput("Output", {conv_output->Name()}); + bool has_bias; + Node* conv_bias; + + std::tie(has_bias, conv_bias) = conv_op_has_bias(*conv_op, *param_scope()); + + if (has_bias) { + op_desc.SetInput("Bias", {conv_bias->Name()}); + } + for (const auto& attr : conv_op->Op()->GetAttrMap()) { op_desc.SetAttr(attr.first, attr.second); } @@ -101,11 +131,14 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { auto fused_conv_op = g->CreateOpNode(&op_desc); IR_NODE_LINK_TO(conv_input, fused_conv_op); - IR_NODE_LINK_TO(conv_bias, fused_conv_op); IR_NODE_LINK_TO(conv_filter, fused_conv_op); IR_NODE_LINK_TO(elementwise_add_x, fused_conv_op); IR_NODE_LINK_TO(fused_conv_op, conv_output); + if (has_bias) { + IR_NODE_LINK_TO(conv_bias, fused_conv_op); + } + CorrectGraphEdges(g, elementwise_add_out, conv_output); GraphSafeRemoveNodes(g, {elementwise_add_out, conv_op, elementwise_add_op}); }; diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 786765bff7..da83bcdf37 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -1006,10 +1006,6 @@ PDNode *patterns::Conv::operator()() { ->AsInput() ->assert_is_op_input("conv2d", "Input"); - auto bias_var = pattern->NewNode(conv_bias_repr()) - ->AsInput() - ->assert_is_op_input("conv2d", "Bias"); - auto filter_var = pattern->NewNode(conv_filter_repr()) ->AsInput() ->assert_is_op_input("conv2d", "Filter"); @@ -1018,7 +1014,7 @@ PDNode *patterns::Conv::operator()() { ->AsOutput() ->assert_is_op_output("conv2d", "Output"); - conv_op->LinksFrom({input_var, bias_var, filter_var}); + conv_op->LinksFrom({input_var, /*bias_var,*/ filter_var}); conv_op->LinksTo({output_var}); return output_var; From ce2464fd988b3817674e566b15c7c483b976eaad Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Fri, 19 Oct 2018 13:31:32 +0200 Subject: [PATCH 47/53] MKLDNN conv + elementwise_add fusion: UT for missing bias added. UTs refactored. Some minor changes in the pass --- .../conv_elementwise_add_mkldnn_fuse_pass.cc | 5 +- ...elementwise_add_mkldnn_fuse_pass_tester.cc | 202 +++++++++--------- .../framework/ir/graph_pattern_detector.cc | 2 +- .../framework/ir/graph_pattern_detector.h | 1 - 4 files changed, 99 insertions(+), 111 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc index 10b1d636e4..8d0035ae98 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass.cc @@ -68,8 +68,7 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { conv_output->AsIntermediate(); - auto conv_op_has_bias = [](const Node& conv_op, - const Scope& scope) -> std::pair { + auto conv_op_has_bias = [](const Node& conv_op) -> std::pair { auto bias_input_names = conv_op.Op()->Inputs(); auto bias_it = bias_input_names.find("Bias"); @@ -116,7 +115,7 @@ graph_ptr ConvElementwiseAddMKLDNNFusePass::ApplyImpl(graph_ptr graph) const { bool has_bias; Node* conv_bias; - std::tie(has_bias, conv_bias) = conv_op_has_bias(*conv_op, *param_scope()); + std::tie(has_bias, conv_bias) = conv_op_has_bias(*conv_op); if (has_bias) { op_desc.SetInput("Bias", {conv_bias->Name()}); diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc index fd47b96c10..348a3dfc5d 100644 --- a/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/conv_elementwise_add_mkldnn_fuse_pass_tester.cc @@ -22,29 +22,22 @@ namespace paddle { namespace framework { namespace ir { +namespace { constexpr int nodes_removed = 3; constexpr int nodes_added = 1; void SetOp(ProgramDesc* prog, const std::string& type, - const std::vector& inputs, - const std::vector& outputs) { + const std::vector>& inputs, + const std::pair& output) { auto op = prog->MutableBlock(0)->AppendOp(); op->SetType(type); + op->SetAttr("use_mkldnn", true); - if (type == "conv2d") { - op->SetAttr("use_mkldnn", true); - op->SetInput("Input", {inputs[0]}); - op->SetInput("Bias", {inputs[1]}); - op->SetInput("Filter", {inputs[2]}); - op->SetOutput("Output", outputs); - } else if (type == "elementwise_add") { - op->SetInput("X", {inputs[0]}); - op->SetInput("Y", {inputs[1]}); - op->SetOutput("Out", outputs); - } else if (type == "relu" || type == "sigmoid") { - op->SetInput("X", {inputs[0]}); - op->SetOutput("Out", outputs); + for (const auto& input : inputs) { + op->SetInput(input.first, {input.second}); } + + op->SetOutput(output.first, {output.second}); } struct IsReachable { @@ -96,30 +89,59 @@ struct IsReachable { } }; -TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { - auto build_program_desc = [&]() -> ProgramDesc { - ProgramDesc prog; - for (auto& v : std::vector( - {"a", "b", "bias", "weights", "c", "d", "e", "f"})) { - auto* var = prog.MutableBlock(0)->Var(v); - var->SetType(proto::VarType::LOD_TENSOR); - if (v == "weights" || v == "bias") { - var->SetPersistable(true); - } +void AssertOpsCount(const std::unique_ptr& graph) { + int conv_count = 0; + int elementwise_add_count = 0; + + for (auto* node : graph->Nodes()) { + if (node->IsOp() && node->Op()->Type() == "conv2d") { + ++conv_count; + } + if (node->IsOp() && node->Op()->Type() == "elementwise_add") { + ++elementwise_add_count; } + } + EXPECT_EQ(conv_count, 1); + EXPECT_EQ(elementwise_add_count, 0); +} + +ProgramDesc BuildProgramDesc(const std::vector& transient_vars, + const std::vector& persistent_vars) { + ProgramDesc prog; - SetOp(&prog, "conv2d", {"a", "bias", "weights"}, {"b"}); - SetOp(&prog, "elementwise_add", {"b", "c"}, {"d"}); - SetOp(&prog, "relu", {"d"}, {"e"}); + auto add_var_to_prog = [&prog](const std::string& var_name) -> VarDesc* { + auto var = prog.MutableBlock(0)->Var(var_name); + var->SetType(proto::VarType::LOD_TENSOR); - return prog; + return var; }; - auto prog = build_program_desc(); + for (const auto& v : transient_vars) { + add_var_to_prog(v); + } + + for (const auto& v : persistent_vars) { + auto var = add_var_to_prog(v); + var->SetPersistable(true); + } + + return prog; +} +} // namespace + +TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { + auto prog = + BuildProgramDesc({"a", "b", "c", "d", "e", "f"}, {"bias", "weights"}); + + SetOp(&prog, "conv2d", + {{"Input", "a"}, {"Bias", "bias"}, {"Filter", "weights"}}, + {"Output", "b"}); + SetOp(&prog, "elementwise_add", {{"X", "b"}, {"Y", "c"}}, {"Out", "d"}); + SetOp(&prog, "relu", {{"X", "d"}}, {"Out", "e"}); + std::unique_ptr graph(new ir::Graph(prog)); IsReachable is_reachable; - EXPECT_TRUE(is_reachable(graph)("a", "relu")); auto pass = @@ -132,40 +154,45 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionWithElementwiseAddRelu) { EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, current_nodes_num); - // Assert conv_relu op in newly generated graph - int conv_count = 0; - int elementwise_add_count = 0; - for (auto* node : graph->Nodes()) { - if (node->IsOp() && node->Op()->Type() == "conv2d") { - ++conv_count; - } - if (node->IsOp() && node->Op()->Type() == "elementwise_add") { - ++elementwise_add_count; - } - } - EXPECT_EQ(conv_count, 1); - EXPECT_EQ(elementwise_add_count, 0); + AssertOpsCount(graph); } -TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionElementwiseAdd) { - auto build_program_desc = [&]() -> ProgramDesc { - ProgramDesc prog; - for (auto& v : std::vector({"a", "b", "bias", "weights"})) { - auto* var = prog.MutableBlock(0)->Var(v); - var->SetType(proto::VarType::LOD_TENSOR); - if (v == "weights" || v == "bias") { - var->SetPersistable(true); - } - } +TEST(ConvElementwiseAddMKLDNNFusePass, + ConvolutionWithElementwiseAddReluNoBias) { + auto prog = BuildProgramDesc({"a", "b", "c", "d", "e"}, {"weights"}); + SetOp(&prog, "conv2d", {{"Input", "a"}, {"Filter", "weights"}}, + {"Output", "b"}); + SetOp(&prog, "elementwise_add", {{"X", "b"}, {"Y", "c"}}, {"Out", "d"}); + SetOp(&prog, "relu", {{"X", "d"}}, {"Out", "e"}); - SetOp(&prog, "conv2d", {"a", "bias", "weights"}, {"b"}); - SetOp(&prog, "elementwise_add", {"b", "c"}, {"d"}); + std::unique_ptr graph(new ir::Graph(prog)); - return prog; - }; + IsReachable is_reachable; + + EXPECT_TRUE(is_reachable(graph)("a", "relu")); + + auto pass = + PassRegistry::Instance().Get("conv_elementwise_add_mkldnn_fuse_pass"); + int original_nodes_num = graph->Nodes().size(); + graph = pass->Apply(std::move(graph)); + int current_nodes_num = graph->Nodes().size(); + + EXPECT_TRUE(is_reachable(graph)("a", "relu")); + + EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, + current_nodes_num); + + AssertOpsCount(graph); +} + +TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionElementwiseAdd) { + auto prog = BuildProgramDesc({"a", "b", "c", "d"}, {"bias", "weights"}); + SetOp(&prog, "conv2d", + {{"Input", "a"}, {"Bias", "bias"}, {"Filter", "weights"}}, + {"Output", "b"}); + SetOp(&prog, "elementwise_add", {{"X", "b"}, {"Y", "c"}}, {"Out", "d"}); - auto prog = build_program_desc(); std::unique_ptr graph(new ir::Graph(prog)); IsReachable is_reachable; @@ -181,43 +208,19 @@ TEST(ConvElementwiseAddMKLDNNFusePass, ConvolutionElementwiseAdd) { EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, current_nodes_num); - // Assert conv_relu op in newly generated graph - int conv_count = 0; - int elementwise_add_count = 0; - - for (auto* node : graph->Nodes()) { - if (node->IsOp() && node->Op()->Type() == "conv2d") { - ++conv_count; - } - if (node->IsOp() && node->Op()->Type() == "elementwise_add") { - ++elementwise_add_count; - } - } - EXPECT_EQ(conv_count, 1); - EXPECT_EQ(elementwise_add_count, 0); + AssertOpsCount(graph); } TEST(ConvElementwiseAddMKLDNNFusePass, SigmoidConvolutionAddElementwiseRelu) { - auto build_program_desc = [&]() -> ProgramDesc { - ProgramDesc prog; - for (auto& v : std::vector( - {"a", "b", "bias", "weights", "c", "d", "e", "f"})) { - auto* var = prog.MutableBlock(0)->Var(v); - var->SetType(proto::VarType::LOD_TENSOR); - if (v.find("weights") || v.find("bias")) { - var->SetPersistable(true); - } - } - - SetOp(&prog, "sigmoid", {"a"}, {"b"}); - SetOp(&prog, "conv2d", {"b", "bias", "weights"}, {"c"}); - SetOp(&prog, "elementwise_add", {"c", "d"}, {"e"}); - SetOp(&prog, "relu", {"e"}, {"f"}); - - return prog; - }; + auto prog = + BuildProgramDesc({"a", "b", "c", "d", "e", "f"}, {"bias", "weights"}); + SetOp(&prog, "sigmoid", {{"X", "a"}}, {"Out", "b"}); + SetOp(&prog, "conv2d", + {{"Input", "b"}, {"Bias", "bias"}, {"Filter", "weights"}}, + {"Output", "c"}); + SetOp(&prog, "elementwise_add", {{"X", "c"}, {"Y", "d"}}, {"Out", "e"}); + SetOp(&prog, "relu", {{"X", "e"}}, {"Out", "f"}); - auto prog = build_program_desc(); std::unique_ptr graph(new ir::Graph(prog)); IsReachable is_reachable; @@ -234,20 +237,7 @@ TEST(ConvElementwiseAddMKLDNNFusePass, SigmoidConvolutionAddElementwiseRelu) { EXPECT_EQ(original_nodes_num - nodes_removed + nodes_added, current_nodes_num); - // Assert conv_relu op in newly generated graph - int conv_count = 0; - int elementwise_add_count = 0; - - for (auto* node : graph->Nodes()) { - if (node->IsOp() && node->Op()->Type() == "conv2d") { - ++conv_count; - } - if (node->IsOp() && node->Op()->Type() == "elementwise_add") { - ++elementwise_add_count; - } - } - EXPECT_EQ(conv_count, 1); - EXPECT_EQ(elementwise_add_count, 0); + AssertOpsCount(graph); } } // namespace ir diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index da83bcdf37..8447525193 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -1014,7 +1014,7 @@ PDNode *patterns::Conv::operator()() { ->AsOutput() ->assert_is_op_output("conv2d", "Output"); - conv_op->LinksFrom({input_var, /*bias_var,*/ filter_var}); + conv_op->LinksFrom({input_var, filter_var}); conv_op->LinksTo({output_var}); return output_var; diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index 8e4f4a14ab..63189d95d7 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -617,7 +617,6 @@ struct Conv : public PatternBase { PATTERN_DECL_NODE(conv_op); PATTERN_DECL_NODE(conv_input); - PATTERN_DECL_NODE(conv_bias); PATTERN_DECL_NODE(conv_filter); PATTERN_DECL_NODE(conv_residual_data); PATTERN_DECL_NODE(conv_output); From 56936b9e25451167699b1f1073373da144c43ed5 Mon Sep 17 00:00:00 2001 From: Dang Qingqing Date: Sat, 20 Oct 2018 19:26:57 +0800 Subject: [PATCH 48/53] Refine doc for generate_proposals_op. test=develop --- .../detection/generate_proposals_op.cc | 60 +++++++++++-------- 1 file changed, 36 insertions(+), 24 deletions(-) diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cc b/paddle/fluid/operators/detection/generate_proposals_op.cc index e9f966b577..a69d9c9a52 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cc +++ b/paddle/fluid/operators/detection/generate_proposals_op.cc @@ -453,33 +453,45 @@ class GenerateProposalsKernel : public framework::OpKernel { class GenerateProposalsOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("Scores", "The scores of anchors should be foreground."); - AddInput("BboxDeltas", "bbox_deltas."); - AddInput("ImInfo", "Information for image reshape."); - AddInput("Anchors", "All anchors."); - AddInput("Variances", " variances"); - - AddOutput("RpnRois", "Anchors."); - AddOutput("RpnRoiProbs", "Anchors."); - AddAttr("pre_nms_topN", "pre_nms_topN"); - AddAttr("post_nms_topN", "post_nms_topN"); - AddAttr("nms_thresh", "nms_thres"); - AddAttr("min_size", "min size"); + AddInput("Scores", + "(Tensor) The scores from conv is in shape (N, A, H, W), " + "N is batch size, A is number of anchors, " + "H and W are height and width of the feature map"); + AddInput("BboxDeltas", + "(Tensor) Bounding box deltas from conv is in " + "shape (N, 4*A, H, W)."); + AddInput("ImInfo", + "(Tensor) Information for image reshape is in shape (N, 3), " + "in format (height, width, scale)"); + AddInput("Anchors", + "(Tensor) Bounding box anchors from anchor_generator_op " + "is in shape (A, H, W, 4)."); + AddInput("Variances", + "(Tensor) Bounding box variances with same shape as `Anchors`."); + + AddOutput("RpnRois", + "(LoDTensor), Output proposals with shape (rois_num, 4)."); + AddOutput("RpnRoiProbs", + "(LoDTensor) Scores of proposals with shape (rois_num, 1)."); + AddAttr("pre_nms_topN", + "Number of top scoring RPN proposals to keep before " + "applying NMS."); + AddAttr("post_nms_topN", + "Number of top scoring RPN proposals to keep after " + "applying NMS"); + AddAttr("nms_thresh", "NMS threshold used on RPN proposals."); + AddAttr("min_size", + "Proposal height and width both need to be greater " + "than this min_size."); AddAttr("eta", "The parameter for adaptive NMS."); AddComment(R"DOC( -Generate Proposals OP +This operator Generate bounding box proposals for Faster RCNN. +The propoasls are generated for a list of images based on image +score 'Scores', bounding box regression result 'BboxDeltas' as +well as predefined bounding box shapes 'anchors'. Greedy +non-maximum suppression is applied to generate the final bounding +boxes. -This operator proposes rois according to each box with their probability to be a foreground object and -the box can be calculated by anchors. Bbox_details and scores are the output of RPN. Final proposals -could be used to train detection net. - -Scores is the probability for each box to be an object. In format of (N, A, H, W) where N is batch size, A is number -of anchors, H and W are height and width of the feature map. -BboxDeltas is the differece between predicted box location and anchor location. In format of (N, 4*A, H, W) - -For generating proposals, this operator transposes and resizes scores and bbox_deltas in size of (H*W*A, 1) and (H*W*A, 4) and - calculate box locations as proposals candidates. Then clip boxes to image and remove predicted boxes with small area. -Finally, apply nms to get final proposals as output. )DOC"); } }; From aa35aaa1ab71216c9902820c649a6a8db41303cc Mon Sep 17 00:00:00 2001 From: Tomasz Patejko Date: Sat, 20 Oct 2018 23:16:56 +0200 Subject: [PATCH 49/53] MKLDNN conv + elementwise_add fusion: fixing formatting test=develop --- paddle/fluid/inference/analysis/analyzer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/inference/analysis/analyzer.h b/paddle/fluid/inference/analysis/analyzer.h index c92b8694a0..165e12194b 100644 --- a/paddle/fluid/inference/analysis/analyzer.h +++ b/paddle/fluid/inference/analysis/analyzer.h @@ -79,7 +79,7 @@ class Analyzer : public OrderedRegistry { "conv_bn_fuse_pass", // "conv_eltwiseadd_bn_fuse_pass", // #ifdef PADDLE_WITH_MKLDNN - "conv_bias_mkldnn_fuse_pass", // + "conv_bias_mkldnn_fuse_pass", // "conv_relu_mkldnn_fuse_pass", // "conv_elementwise_add_mkldnn_fuse_pass", // #endif From 82d2903b635ab724ea3af7e235d77e5d44e09d1a Mon Sep 17 00:00:00 2001 From: chengduozh Date: Sun, 21 Oct 2018 17:07:12 +0800 Subject: [PATCH 50/53] Fix fast ParallelExe bug test=develop --- paddle/fluid/framework/details/var_handle.h | 2 ++ paddle/fluid/framework/parallel_executor.cc | 6 ++++++ paddle/fluid/platform/device_context.cc | 10 ++++++++++ paddle/fluid/platform/device_context.h | 3 +++ 4 files changed, 21 insertions(+) diff --git a/paddle/fluid/framework/details/var_handle.h b/paddle/fluid/framework/details/var_handle.h index d8c2bc40b9..a1f458c660 100644 --- a/paddle/fluid/framework/details/var_handle.h +++ b/paddle/fluid/framework/details/var_handle.h @@ -49,6 +49,8 @@ struct VarHandleBase { void AddOutput(OpHandleBase* out, ir::Node* node) { if (pending_ops_.find(out) == pending_ops_.end()) { + PADDLE_ENFORCE(out != nullptr, "The output of %s should not be nullptr", + this->Node()->Name()); pending_ops_.insert(out); node_->outputs.push_back(node); } diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index e8adabd265..093108cb54 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -299,6 +299,12 @@ void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes( } ParallelExecutor::~ParallelExecutor() { + const auto dev_ctxs = + platform::DeviceContextPool::Instance().GetAllDeviceContexts(); + for (auto &dev_ctx : dev_ctxs) { + dev_ctx->Wait(); + } + if (member_->own_local_scope_) { for (size_t i = 1; i < member_->local_scopes_.size(); ++i) { Scope *local_scope = member_->local_scopes_[i]; diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index 4286242b2a..7d1cf57253 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -35,6 +35,16 @@ platform::DeviceContext* DeviceContextPool::Get(const platform::Place& place) { return it->second.get(); } +const std::vector +DeviceContextPool::GetAllDeviceContexts() const { + std::vector all_device_ctx; + all_device_ctx.reserve(device_contexts_.size()); + for (auto& dev_ctx : device_contexts_) { + all_device_ctx.emplace_back(dev_ctx.second.get()); + } + return all_device_ctx; +} + DeviceContextPool::DeviceContextPool( const std::vector& places) { PADDLE_ENFORCE_GT(places.size(), 0); diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index e1ff1a1746..999bbe00f1 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -217,6 +217,9 @@ class DeviceContextPool { /*! \brief Return handle of single device context. */ platform::DeviceContext* Get(const platform::Place& place); + /*! \brief Return all the device contexts. */ + const std::vector GetAllDeviceContexts() const; + template const typename DefaultDeviceContextType::TYPE* GetByPlace( const Place& place) { From 58c027cc38189114f584d7f7b732211ac523b686 Mon Sep 17 00:00:00 2001 From: gongweibao Date: Mon, 22 Oct 2018 14:40:09 +0800 Subject: [PATCH 51/53] Add rpc profiler flags. (#13989) Add rpc profiler flags --- paddle/fluid/operators/distributed/grpc_client.cc | 14 +++++++------- paddle/fluid/operators/distributed/grpc_serde.cc | 4 ++-- paddle/fluid/platform/profiler.cc | 9 +++++++++ paddle/fluid/platform/profiler.h | 10 ++++++++++ python/paddle/fluid/__init__.py | 1 + 5 files changed, 29 insertions(+), 9 deletions(-) diff --git a/paddle/fluid/operators/distributed/grpc_client.cc b/paddle/fluid/operators/distributed/grpc_client.cc index 076ecc1f01..f5d5627815 100644 --- a/paddle/fluid/operators/distributed/grpc_client.cc +++ b/paddle/fluid/operators/distributed/grpc_client.cc @@ -86,7 +86,7 @@ VarHandlePtr GRPCClient::AsyncSendVar(const std::string& ep, // stub context s->response_call_back_ = nullptr; - platform::RecordEvent record_event(method, p_ctx); + platform::RecordRPCEvent record_event(method, p_ctx); auto call = s->stub_g_.PrepareUnaryCall( s->context_.get(), "/sendrecv.SendRecvService/SendVariable", req, &cq_); @@ -143,7 +143,7 @@ VarHandlePtr GRPCClient::AsyncGetVar(const std::string& ep, // stub context s->response_call_back_ = ProcGetResponse; - platform::RecordEvent record_event(method, p_ctx); + platform::RecordRPCEvent record_event(method, p_ctx); auto call = s->stub_g_.PrepareUnaryCall( s->context_.get(), "/sendrecv.SendRecvService/GetVariable", buf, &cq_); @@ -191,7 +191,7 @@ VarHandlePtr GRPCClient::AsyncPrefetchVar(const std::string& ep, // stub context s->response_call_back_ = ProcGetResponse; - platform::RecordEvent record_event(method, p_ctx); + platform::RecordRPCEvent record_event(method, p_ctx); auto call = s->stub_g_.PrepareUnaryCall( s->context_.get(), "/sendrecv.SendRecvService/PrefetchVariable", req, @@ -221,7 +221,7 @@ VarHandlePtr GRPCClient::AsyncSendBatchBarrier(const std::string& ep, sendrecv::VariableMessage req; req.set_varname(BATCH_BARRIER_MESSAGE); - platform::RecordEvent record_event(method, nullptr); + platform::RecordRPCEvent record_event(method, nullptr); auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_); rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); @@ -246,7 +246,7 @@ VarHandlePtr GRPCClient::AsyncSendFetchBarrier(const std::string& ep, sendrecv::VariableMessage req; req.set_varname(FETCH_BARRIER_MESSAGE); - platform::RecordEvent record_event(method, nullptr); + platform::RecordRPCEvent record_event(method, nullptr); auto rpc = s->stub_->AsyncGetVariable(s->context_.get(), req, &cq_); rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); @@ -271,7 +271,7 @@ VarHandlePtr GRPCClient::AsyncSendComplete(const std::string& ep, sendrecv::VariableMessage req; req.set_varname(COMPLETE_MESSAGE); - platform::RecordEvent record_event(method, nullptr); + platform::RecordRPCEvent record_event(method, nullptr); auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_); rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); @@ -301,7 +301,7 @@ VarHandlePtr GRPCClient::AsyncCheckpointNotify(const std::string& ep, req.set_varname(CHECKPOINT_SAVE_MESSAGE); req.set_out_varname(dir); - platform::RecordEvent record_event(method, nullptr); + platform::RecordRPCEvent record_event(method, nullptr); auto rpc = s->stub_->AsyncCheckpointNotify(s->context_.get(), req, &cq_); rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); diff --git a/paddle/fluid/operators/distributed/grpc_serde.cc b/paddle/fluid/operators/distributed/grpc_serde.cc index ffe8f082db..bac098b892 100644 --- a/paddle/fluid/operators/distributed/grpc_serde.cc +++ b/paddle/fluid/operators/distributed/grpc_serde.cc @@ -36,7 +36,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, const platform::DeviceContext& ctx, ::grpc::ByteBuffer* msg, const std::string& out_name) { - platform::RecordEvent record_event("serial", &ctx); + platform::RecordRPCEvent record_event("serial", &ctx); // Default DestroyCallback does nothing, When using GPU // the CPU buffer need to be freed. DestroyCallback destroy_callback = [](void* backing) {}; @@ -148,7 +148,7 @@ void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, const platform::DeviceContext& ctx, const framework::Scope* scope, framework::Variable** var) { - platform::RecordEvent record_event("deserial", &ctx); + platform::RecordRPCEvent record_event("deserial", &ctx); operators::distributed::GRPCVariableResponse resp(scope, &ctx); PADDLE_ENFORCE(resp.Parse(msg) == 0, "parse bytebuffer to tensor error!"); *var = resp.GetVar(); diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index a35147da90..da46a1abe1 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -30,6 +30,8 @@ limitations under the License. */ #include "paddle/fluid/platform/device_tracer.h" #include "paddle/fluid/string/printf.h" +DEFINE_bool(enable_rpc_profiler, false, "Enable rpc profiler or not."); + namespace paddle { namespace platform { @@ -193,6 +195,13 @@ RecordEvent::~RecordEvent() { PopEvent(name_, dev_ctx_); } +RecordRPCEvent::RecordRPCEvent(const std::string& name, + const DeviceContext* dev_ctx) { + if (FLAGS_enable_rpc_profiler) { + event_.reset(new platform::RecordEvent(name, dev_ctx)); + } +} + RecordBlock::RecordBlock(int block_id) : is_enabled_(false), start_ns_(PosixInNsec()) { std::lock_guard l(profiler_mu); diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h index 62c1762f32..e8eae874af 100644 --- a/paddle/fluid/platform/profiler.h +++ b/paddle/fluid/platform/profiler.h @@ -87,6 +87,16 @@ struct RecordEvent { std::string full_name_; }; +class RecordRPCEvent { + public: + // dev_ctx can be set to nullptr if device is cpu. + RecordRPCEvent(const std::string& name, const DeviceContext* dev_ctx); + ~RecordRPCEvent() {} + + private: + std::unique_ptr event_; +}; + struct RecordBlock { explicit RecordBlock(int block_id); ~RecordBlock(); diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 41678918b8..bcd4e4f607 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -120,6 +120,7 @@ def __bootstrap__(): read_env_flags.append('rpc_deadline') read_env_flags.append('rpc_server_profile_period') read_env_flags.append('rpc_server_profile_path') + read_env_flags.append('enable_rpc_profiler') if core.is_compiled_with_cuda(): read_env_flags += [ From 4625f83f92c7f5d549be3666c830fd513789a82f Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Fri, 19 Oct 2018 10:58:53 +0800 Subject: [PATCH 52/53] better handle var type inference avoid the default one that usually overwrites manually set ones test=develop --- paddle/fluid/framework/op_desc.cc | 16 +- python/paddle/fluid/layer_helper.py | 15 +- python/paddle/fluid/layers/control_flow.py | 33 +- python/paddle/fluid/layers/detection.py | 65 ++-- python/paddle/fluid/layers/io.py | 2 +- .../fluid/layers/layer_function_generator.py | 8 +- python/paddle/fluid/layers/metric_op.py | 10 +- python/paddle/fluid/layers/nn.py | 350 +++++++++--------- python/paddle/fluid/layers/tensor.py | 31 +- python/paddle/fluid/regularizer.py | 4 +- .../fluid/tests/unittests/test_slice_var.py | 1 - 11 files changed, 286 insertions(+), 249 deletions(-) diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 121e00b1a3..c293cf92b4 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -515,20 +515,14 @@ void OpDesc::InferShape(const BlockDesc &block) const { } void OpDesc::InferVarType(BlockDesc *block) const { + // There are a few places that var type can be set. + // When VarDesc is created, default set to LOD_TENSOR. + // When output variable is created, default is defaut set to LOD_TENSOR. + // We limit here to be the only place that operator defines its customized + // var type inference. Hence, we don't do any "default" setting here. auto &info = OpInfoMap::Instance().Get(this->Type()); if (info.infer_var_type_) { info.infer_var_type_(*this, block); - } else { - // all output type is LoDTensor by default - VLOG(10) << this->Type() - << " has not registered InferVarType. Set output variables to " - "LOD_TENSOR"; - for (auto &out_pair : this->outputs_) { - for (auto &out_var_name : out_pair.second) { - block->FindRecursiveOrCreateVar(out_var_name) - .SetType(proto::VarType::LOD_TENSOR); - } - } } } diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index bd9727b6ac..dc317de9ab 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -324,10 +324,19 @@ class LayerHelper(object): raise ValueError("no Parameter name %s found" % name) return param - def create_tmp_variable(self, dtype, stop_gradient=False): + def create_variable_for_type_inference(self, dtype, stop_gradient=False): + """Create a temporary variable that should be type inferred layer. + + Note: + The default type will be set to LOD_TENSOR. However, when + the var is used as operator output, its type will be updated + based on operator's `VarTypeInference` implementation in + infer_var_type. + """ return self.main_program.current_block().create_var( name=unique_name.generate(".".join([self.name, 'tmp'])), dtype=dtype, + type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, stop_gradient=stop_gradient) @@ -388,7 +397,7 @@ class LayerHelper(object): b = self.create_parameter( attr=bias_attr, shape=size, dtype=input_var.dtype, is_bias=True) - tmp = self.create_tmp_variable(dtype=input_var.dtype) + tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) self.append_op( type='elementwise_add', inputs={'X': [input_var], @@ -414,7 +423,7 @@ class LayerHelper(object): tmp = input_var # NOTE(dzhwinter): some activation support inplace compution. if not core.IsInplace(act_type): - tmp = self.create_tmp_variable(dtype=input_var.dtype) + tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) self.append_op( type=act_type, inputs={"X": [input_var]}, diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 4af97e8632..459be4339b 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -80,8 +80,8 @@ def split_lod_tensor(input, mask, level=0): """ helper = LayerHelper('split_lod_tensor', **locals()) - out_true = helper.create_tmp_variable(dtype=input.dtype) - out_false = helper.create_tmp_variable(dtype=input.dtype) + out_true = helper.create_variable_for_type_inference(dtype=input.dtype) + out_false = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='split_lod_tensor', inputs={ @@ -131,7 +131,7 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0): in_true=out_true, in_false=out_false, mask=y, x=x, level=level) """ helper = LayerHelper('merge_lod_tensor', **locals()) - out = helper.create_tmp_variable(dtype=in_true.dtype) + out = helper.create_variable_for_type_inference(dtype=in_true.dtype) helper.append_op( type='merge_lod_tensor', inputs={'X': x, @@ -524,7 +524,7 @@ class StaticRNN(object): if not isinstance(o, Variable): raise TypeError("step output takes a Variable") - tmp_o = self.helper.create_tmp_variable(dtype=o.dtype) + tmp_o = self.helper.create_variable_for_type_inference(dtype=o.dtype) self.helper.append_op( type='rnn_memory_helper', inputs={'X': [o]}, @@ -606,7 +606,8 @@ class StaticRNN(object): pre_memories.append(mem.pre_mem.name) mem_var = rnn_block.var(mem.mem.name) assert isinstance(mem_var, Variable) - new_mem = self.helper.create_tmp_variable(dtype=mem_var.dtype) + new_mem = self.helper.create_variable_for_type_inference( + dtype=mem_var.dtype) rnn_block.append_op( type='rnn_memory_helper', @@ -813,7 +814,7 @@ def max_sequence_len(rank_table): ${out_comment}. """ helper = LayerHelper("max_seqence_len", **locals()) - res = helper.create_tmp_variable(dtype="int64") + res = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="max_sequence_len", inputs={"RankTable": rank_table}, @@ -884,7 +885,7 @@ def array_to_lod_tensor(x, table): lod_tensor = fluid.layers.array_to_lod_tensor(array, table) """ helper = LayerHelper("array_to_lod_tensor", **locals()) - tmp = helper.create_tmp_variable(dtype=x.dtype) + tmp = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="array_to_lod_tensor", inputs={'X': x, @@ -915,7 +916,7 @@ def increment(x, value=1.0, in_place=True): """ helper = LayerHelper("increment", **locals()) if not in_place: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = x helper.append_op( @@ -1012,7 +1013,7 @@ def less_than(x, y, force_cpu=None, cond=None, **ignored): """ helper = LayerHelper("less_than", **locals()) if cond is None: - cond = helper.create_tmp_variable(dtype='bool') + cond = helper.create_variable_for_type_inference(dtype='bool') cond.stop_gradient = True attrs = dict() @@ -1051,7 +1052,7 @@ def equal(x, y, cond=None, **ignored): """ helper = LayerHelper("equal", **locals()) if cond is None: - cond = helper.create_tmp_variable(dtype='bool') + cond = helper.create_variable_for_type_inference(dtype='bool') cond.stop_gradient = True helper.append_op( @@ -1098,7 +1099,7 @@ def array_read(array, i): array, Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: raise TypeError("array should be tensor array vairable") - out = helper.create_tmp_variable(dtype=array.dtype) + out = helper.create_variable_for_type_inference(dtype=array.dtype) helper.append_op( type='read_from_array', inputs={'X': [array], @@ -1133,7 +1134,7 @@ def shrink_memory(x, i, table): usage. """ helper = LayerHelper('shrink_memory', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='shrink_rnn_memory', inputs={'X': [x], @@ -1170,7 +1171,7 @@ def array_length(array): """ helper = LayerHelper('array_length', **locals()) - tmp = helper.create_tmp_variable(dtype='int64') + tmp = helper.create_variable_for_type_inference(dtype='int64') tmp.stop_gradient = True helper.append_op( type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]}) @@ -1590,7 +1591,7 @@ class DynamicRNN(object): self.mem_dict = dict() self.output_array = [] self.outputs = [] - self.cond = self.helper.create_tmp_variable(dtype='bool') + self.cond = self.helper.create_variable_for_type_inference(dtype='bool') self.cond.stop_gradient = False self.while_op = While(self.cond) self.input_array = [] @@ -1924,7 +1925,7 @@ def reorder_lod_tensor_by_rank(x, rank_table): helper.is_instance('x', Variable) helper.is_instance('rank_table', Variable) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='reorder_lod_tensor_by_rank', inputs={'X': [x], @@ -1958,7 +1959,7 @@ def is_empty(x, cond=None, **ignored): """ helper = LayerHelper("is_empty", **locals()) if cond is None: - cond = helper.create_tmp_variable(dtype='bool') + cond = helper.create_variable_for_type_inference(dtype='bool') cond.stop_gradient = True elif not isinstance(cond, Variable): raise TypeError("cond takes a variable") diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 1cfcbbb9c1..b94b59631a 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -147,10 +147,11 @@ def rpn_target_assign(bbox_pred, helper = LayerHelper('rpn_target_assign', **locals()) # Assign target label to anchors - loc_index = helper.create_tmp_variable(dtype='int32') - score_index = helper.create_tmp_variable(dtype='int32') - target_label = helper.create_tmp_variable(dtype='int32') - target_bbox = helper.create_tmp_variable(dtype=anchor_box.dtype) + loc_index = helper.create_variable_for_type_inference(dtype='int32') + score_index = helper.create_variable_for_type_inference(dtype='int32') + target_label = helper.create_variable_for_type_inference(dtype='int32') + target_bbox = helper.create_variable_for_type_inference( + dtype=anchor_box.dtype) helper.append_op( type="rpn_target_assign", inputs={ @@ -282,7 +283,8 @@ def detection_output(loc, scores = nn.reshape(x=scores, shape=compile_shape, actual_shape=run_shape) scores = nn.transpose(scores, perm=[0, 2, 1]) scores.stop_gradient = True - nmsed_outs = helper.create_tmp_variable(dtype=decoded_box.dtype) + nmsed_outs = helper.create_variable_for_type_inference( + dtype=decoded_box.dtype) helper.append_op( type="multiclass_nms", inputs={'Scores': scores, @@ -314,7 +316,7 @@ def iou_similarity(x, y, name=None): """ helper = LayerHelper("iou_similarity", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -351,7 +353,8 @@ def box_coder(prior_box, helper = LayerHelper("box_coder", **locals()) if name is None: - output_box = helper.create_tmp_variable(dtype=prior_box.dtype) + output_box = helper.create_variable_for_type_inference( + dtype=prior_box.dtype) else: output_box = helper.create_variable( name=name, dtype=prior_box.dtype, persistable=False) @@ -382,7 +385,7 @@ def polygon_box_transform(input, name=None): """ helper = LayerHelper("polygon_box_transform", **locals()) if name is None: - output = helper.create_tmp_variable(dtype=input.dtype) + output = helper.create_variable_for_type_inference(dtype=input.dtype) else: output = helper.create_variable( name=name, dtype=prior_box.input, persistable=False) @@ -450,7 +453,7 @@ def detection_map(detect_res, helper = LayerHelper("detection_map", **locals()) def __create_var(type): - return helper.create_tmp_variable(dtype=type) + return helper.create_variable_for_type_inference(dtype=type) map_out = __create_var('float32') accum_pos_count_out = out_states[0] if out_states else __create_var('int32') @@ -557,8 +560,9 @@ def bipartite_match(dist_matrix, >>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou) """ helper = LayerHelper('bipartite_match', **locals()) - match_indices = helper.create_tmp_variable(dtype='int32') - match_distance = helper.create_tmp_variable(dtype=dist_matrix.dtype) + match_indices = helper.create_variable_for_type_inference(dtype='int32') + match_distance = helper.create_variable_for_type_inference( + dtype=dist_matrix.dtype) helper.append_op( type='bipartite_match', inputs={'DistMat': dist_matrix}, @@ -644,8 +648,8 @@ def target_assign(input, gt, matched_indices, mismatch_value=0) """ helper = LayerHelper('target_assign', **locals()) - out = helper.create_tmp_variable(dtype=input.dtype) - out_weight = helper.create_tmp_variable(dtype='float32') + out = helper.create_variable_for_type_inference(dtype=input.dtype) + out_weight = helper.create_variable_for_type_inference(dtype='float32') helper.append_op( type='target_assign', inputs={ @@ -816,9 +820,10 @@ def ssd_loss(location, conf_loss = nn.reshape( x=conf_loss, shape=(num, num_prior), actual_shape=actual_shape) conf_loss.stop_gradient = True - neg_indices = helper.create_tmp_variable(dtype='int32') + neg_indices = helper.create_variable_for_type_inference(dtype='int32') dtype = matched_indices.dtype - updated_matched_indices = helper.create_tmp_variable(dtype=dtype) + updated_matched_indices = helper.create_variable_for_type_inference( + dtype=dtype) helper.append_op( type='mine_hard_examples', inputs={ @@ -998,8 +1003,8 @@ def prior_box(input, max_sizes = [max_sizes] attrs['max_sizes'] = max_sizes - box = helper.create_tmp_variable(dtype) - var = helper.create_tmp_variable(dtype) + box = helper.create_variable_for_type_inference(dtype) + var = helper.create_variable_for_type_inference(dtype) helper.append_op( type="prior_box", inputs={"Input": input, @@ -1337,8 +1342,8 @@ def anchor_generator(input, 'offset': offset } - anchor = helper.create_tmp_variable(dtype) - var = helper.create_tmp_variable(dtype) + anchor = helper.create_variable_for_type_inference(dtype) + var = helper.create_variable_for_type_inference(dtype) helper.append_op( type="anchor_generator", inputs={"Input": input}, @@ -1384,7 +1389,7 @@ def roi_perspective_transform(input, """ helper = LayerHelper('roi_perspective_transform', **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="roi_perspective_transform", inputs={"X": input, @@ -1418,11 +1423,15 @@ def generate_proposal_labels(rpn_rois, helper = LayerHelper('generate_proposal_labels', **locals()) - rois = helper.create_tmp_variable(dtype=rpn_rois.dtype) - labels_int32 = helper.create_tmp_variable(dtype=gt_classes.dtype) - bbox_targets = helper.create_tmp_variable(dtype=rpn_rois.dtype) - bbox_inside_weights = helper.create_tmp_variable(dtype=rpn_rois.dtype) - bbox_outside_weights = helper.create_tmp_variable(dtype=rpn_rois.dtype) + rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype) + labels_int32 = helper.create_variable_for_type_inference( + dtype=gt_classes.dtype) + bbox_targets = helper.create_variable_for_type_inference( + dtype=rpn_rois.dtype) + bbox_inside_weights = helper.create_variable_for_type_inference( + dtype=rpn_rois.dtype) + bbox_outside_weights = helper.create_variable_for_type_inference( + dtype=rpn_rois.dtype) helper.append_op( type="generate_proposal_labels", @@ -1504,8 +1513,10 @@ def generate_proposals(scores, """ helper = LayerHelper('generate_proposals', **locals()) - rpn_rois = helper.create_tmp_variable(dtype=bbox_deltas.dtype) - rpn_roi_probs = helper.create_tmp_variable(dtype=scores.dtype) + rpn_rois = helper.create_variable_for_type_inference( + dtype=bbox_deltas.dtype) + rpn_roi_probs = helper.create_variable_for_type_inference( + dtype=scores.dtype) helper.append_op( type="generate_proposals", inputs={ diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index dcd5a064a8..95e13669ad 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -954,7 +954,7 @@ def read_file(reader): """ helper = LayerHelper('read_file') out = [ - helper.create_tmp_variable( + helper.create_variable_for_type_inference( stop_gradient=True, dtype='float32') for _ in range(len(reader.desc.shapes())) ] diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 8c11921d9b..eea0a362a0 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -202,10 +202,12 @@ def generate_layer_fn(op_type): out_var = out[0] if (isinstance(out, list) or isinstance(out, tuple)) else out else: - out_var = helper.create_tmp_variable(dtype=dtype) + out_var = helper.create_variable_for_type_inference(dtype=dtype) outputs[o_name] = [out_var] for name in intermediate_output_names: - outputs[name] = [helper.create_tmp_variable(dtype=dtype)] + outputs[name] = [ + helper.create_variable_for_type_inference(dtype=dtype) + ] helper.append_op( type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs) return helper.append_activation(out_var) @@ -229,7 +231,7 @@ def generate_layer_fn_noattr(op_type): def func(x, name=None): helper = LayerHelper(op_type, **locals()) - output = helper.create_tmp_variable(dtype=x.dtype) + output = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": output}) return output diff --git a/python/paddle/fluid/layers/metric_op.py b/python/paddle/fluid/layers/metric_op.py index a3064b565d..b2d2c93ead 100644 --- a/python/paddle/fluid/layers/metric_op.py +++ b/python/paddle/fluid/layers/metric_op.py @@ -58,11 +58,11 @@ def accuracy(input, label, k=1, correct=None, total=None): """ helper = LayerHelper("accuracy", **locals()) topk_out, topk_indices = nn.topk(input, k=k) - acc_out = helper.create_tmp_variable(dtype="float32") + acc_out = helper.create_variable_for_type_inference(dtype="float32") if correct is None: - correct = helper.create_tmp_variable(dtype="int64") + correct = helper.create_variable_for_type_inference(dtype="int64") if total is None: - total = helper.create_tmp_variable(dtype="int64") + total = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="accuracy", inputs={ @@ -124,8 +124,8 @@ def auc(input, auc_out=fluid.layers.auc(input=prediction, label=label) """ helper = LayerHelper("auc", **locals()) - auc_out = helper.create_tmp_variable(dtype="float64") - batch_auc_out = helper.create_tmp_variable(dtype="float64") + auc_out = helper.create_variable_for_type_inference(dtype="float64") + batch_auc_out = helper.create_variable_for_type_inference(dtype="float64") # make tp, tn, fp, fn persistable, so that can accumulate all batches. # for batch auc diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 538035de1a..d8e497731a 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -242,7 +242,7 @@ def fc(input, w = helper.create_parameter( attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False) - tmp = helper.create_tmp_variable(dtype) + tmp = helper.create_variable_for_type_inference(dtype) helper.append_op( type="mul", inputs={"X": input_var, @@ -255,7 +255,7 @@ def fc(input, if len(mul_results) == 1: pre_bias = mul_results[0] else: - pre_bias = helper.create_tmp_variable(dtype) + pre_bias = helper.create_variable_for_type_inference(dtype) helper.append_op( type="sum", inputs={"X": mul_results}, @@ -314,7 +314,7 @@ def embedding(input, helper = LayerHelper('embedding', **locals()) w = helper.create_parameter( attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False) - tmp = helper.create_tmp_variable(dtype) + tmp = helper.create_variable_for_type_inference(dtype) padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( size[0] + padding_idx) helper.append_op( @@ -418,10 +418,10 @@ def dynamic_lstm(input, bias = helper.create_parameter( attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) - hidden = helper.create_tmp_variable(dtype) - cell = helper.create_tmp_variable(dtype) - batch_gate = helper.create_tmp_variable(dtype) - batch_cell_pre_act = helper.create_tmp_variable(dtype) + hidden = helper.create_variable_for_type_inference(dtype) + cell = helper.create_variable_for_type_inference(dtype) + batch_gate = helper.create_variable_for_type_inference(dtype) + batch_cell_pre_act = helper.create_variable_for_type_inference(dtype) inputs = {'Input': input, 'Weight': weight, 'Bias': bias} batch_size = input.shape[0] if h_0: @@ -621,12 +621,12 @@ def dynamic_lstmp(input, bias = helper.create_parameter( attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) - projection = helper.create_tmp_variable(dtype) - cell = helper.create_tmp_variable(dtype) - ordered_proj0 = helper.create_tmp_variable(dtype) - batch_hidden = helper.create_tmp_variable(dtype) - batch_gate = helper.create_tmp_variable(dtype) - batch_cell_pre_act = helper.create_tmp_variable(dtype) + projection = helper.create_variable_for_type_inference(dtype) + cell = helper.create_variable_for_type_inference(dtype) + ordered_proj0 = helper.create_variable_for_type_inference(dtype) + batch_hidden = helper.create_variable_for_type_inference(dtype) + batch_gate = helper.create_variable_for_type_inference(dtype) + batch_cell_pre_act = helper.create_variable_for_type_inference(dtype) helper.append_op( type='lstmp', @@ -751,10 +751,10 @@ def dynamic_gru(input, ), 'The shape of h0 should be(batch_size, %d)' % size inputs['H0'] = h_0 - hidden = helper.create_tmp_variable(dtype) - batch_gate = helper.create_tmp_variable(dtype) - batch_reset_hidden_prev = helper.create_tmp_variable(dtype) - batch_hidden = helper.create_tmp_variable(dtype) + hidden = helper.create_variable_for_type_inference(dtype) + batch_gate = helper.create_variable_for_type_inference(dtype) + batch_reset_hidden_prev = helper.create_variable_for_type_inference(dtype) + batch_hidden = helper.create_variable_for_type_inference(dtype) helper.append_op( type='gru', @@ -844,9 +844,9 @@ def gru_unit(input, weight = helper.create_parameter( attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype) - gate = helper.create_tmp_variable(dtype) - reset_hidden_pre = helper.create_tmp_variable(dtype) - updated_hidden = helper.create_tmp_variable(dtype) + gate = helper.create_variable_for_type_inference(dtype) + reset_hidden_pre = helper.create_variable_for_type_inference(dtype) + updated_hidden = helper.create_variable_for_type_inference(dtype) inputs = {'Input': input, 'HiddenPrev': hidden, 'Weight': weight} # create bias if helper.bias_attr: @@ -896,10 +896,14 @@ def linear_chain_crf(input, label, param_attr=None): attr=helper.param_attr, shape=[size + 2, size], dtype=helper.input_dtype()) - alpha = helper.create_tmp_variable(dtype=helper.input_dtype()) - emission_exps = helper.create_tmp_variable(dtype=helper.input_dtype()) - transition_exps = helper.create_tmp_variable(dtype=helper.input_dtype()) - log_likelihood = helper.create_tmp_variable(dtype=helper.input_dtype()) + alpha = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) + emission_exps = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) + transition_exps = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) + log_likelihood = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) helper.append_op( type='linear_chain_crf', inputs={"Emission": [input], @@ -938,7 +942,8 @@ def crf_decoding(input, param_attr, label=None): """ helper = LayerHelper('crf_decoding', **locals()) transition = helper.get_parameter(param_attr.name) - viterbi_path = helper.create_tmp_variable(dtype=helper.input_dtype()) + viterbi_path = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) helper.append_op( type='crf_decoding', inputs={"Emission": [input], @@ -962,9 +967,9 @@ def cos_sim(X, Y): Variable: the output of cosine(X, Y). """ helper = LayerHelper('cos_sim', **locals()) - out = helper.create_tmp_variable(dtype=X.dtype) - xnorm = helper.create_tmp_variable(dtype=X.dtype) - ynorm = helper.create_tmp_variable(dtype=X.dtype) + out = helper.create_variable_for_type_inference(dtype=X.dtype) + xnorm = helper.create_variable_for_type_inference(dtype=X.dtype) + ynorm = helper.create_variable_for_type_inference(dtype=X.dtype) helper.append_op( type='cos_sim', inputs={'X': [X], @@ -1008,8 +1013,9 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None): """ helper = LayerHelper('dropout', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) - mask = helper.create_tmp_variable(dtype=x.dtype, stop_gradient=True) + out = helper.create_variable_for_type_inference(dtype=x.dtype) + mask = helper.create_variable_for_type_inference( + dtype=x.dtype, stop_gradient=True) if (seed is None or seed == 0) and helper.main_program.random_seed != 0: seed = helper.main_program.random_seed @@ -1094,7 +1100,7 @@ def cross_entropy(input, label, soft_label=False, ignore_index=-100): cost = fluid.layers.cross_entropy(input=predict, label=label) """ helper = LayerHelper('cross_entropy', **locals()) - out = helper.create_tmp_variable(dtype=input.dtype) + out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='cross_entropy', inputs={'X': [input], @@ -1141,14 +1147,14 @@ def square_error_cost(input, label): """ helper = LayerHelper('square_error_cost', **locals()) - minus_out = helper.create_tmp_variable(dtype=input.dtype) + minus_out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='elementwise_sub', inputs={'X': [input], 'Y': [label]}, outputs={'Out': [minus_out]}) - square_out = helper.create_tmp_variable(dtype=input.dtype) + square_out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='square', inputs={'X': [minus_out]}, outputs={'Out': [square_out]}) @@ -1254,12 +1260,13 @@ def chunk_eval(input, helper = LayerHelper("chunk_eval", **locals()) # prepare output - precision = helper.create_tmp_variable(dtype="float32") - recall = helper.create_tmp_variable(dtype="float32") - f1_score = helper.create_tmp_variable(dtype="float32") - num_infer_chunks = helper.create_tmp_variable(dtype="int64") - num_label_chunks = helper.create_tmp_variable(dtype="int64") - num_correct_chunks = helper.create_tmp_variable(dtype="int64") + precision = helper.create_variable_for_type_inference(dtype="float32") + recall = helper.create_variable_for_type_inference(dtype="float32") + f1_score = helper.create_variable_for_type_inference(dtype="float32") + num_infer_chunks = helper.create_variable_for_type_inference(dtype="int64") + num_label_chunks = helper.create_variable_for_type_inference(dtype="int64") + num_correct_chunks = helper.create_variable_for_type_inference( + dtype="int64") helper.append_op( type="chunk_eval", @@ -1326,7 +1333,7 @@ def sequence_conv(input, filter_shape = [filter_size * input.shape[1], num_filters] filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype) - pre_bias = helper.create_tmp_variable(dtype) + pre_bias = helper.create_variable_for_type_inference(dtype) helper.append_op( type='sequence_conv', @@ -1382,7 +1389,7 @@ def sequence_softmax(input, use_cudnn=False, name=None): """ helper = LayerHelper('sequence_softmax', **locals()) dtype = helper.input_dtype() - softmax_out = helper.create_tmp_variable(dtype) + softmax_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="sequence_softmax", inputs={"X": input}, @@ -1436,7 +1443,7 @@ def softmax(input, use_cudnn=True, name=None): """ helper = LayerHelper('softmax', **locals()) dtype = helper.input_dtype() - softmax_out = helper.create_tmp_variable(dtype) + softmax_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="softmax", inputs={"X": input}, @@ -1599,7 +1606,7 @@ def conv2d(input, dtype=dtype, default_initializer=_get_default_param_initializer()) - pre_bias = helper.create_tmp_variable(dtype) + pre_bias = helper.create_variable_for_type_inference(dtype) helper.append_op( type=l_type, @@ -1770,7 +1777,7 @@ def conv3d(input, dtype=dtype, default_initializer=_get_default_param_initializer()) - pre_bias = helper.create_tmp_variable(dtype) + pre_bias = helper.create_variable_for_type_inference(dtype) helper.append_op( type=l_type, @@ -1849,8 +1856,8 @@ def sequence_pool(input, pool_type): """ helper = LayerHelper('sequence_pool', **locals()) dtype = helper.input_dtype() - pool_out = helper.create_tmp_variable(dtype) - max_index = helper.create_tmp_variable(dtype) + pool_out = helper.create_variable_for_type_inference(dtype) + max_index = helper.create_variable_for_type_inference(dtype) helper.append_op( type="sequence_pool", @@ -1886,7 +1893,7 @@ def sequence_concat(input, name=None): out = fluid.layers.sequence_concat(input=[seq1, seq2, seq3]) """ helper = LayerHelper('sequence_concat', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='sequence_concat', inputs={'X': input}, outputs={'Out': [out]}) return out @@ -2013,7 +2020,7 @@ def sequence_slice(input, offset, length, name=None): """ helper = LayerHelper("sequence_slice", **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) offset.stop_gradient = True length.stop_gradient = True @@ -2099,7 +2106,7 @@ def pool2d(input, helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() - pool_out = helper.create_tmp_variable(dtype) + pool_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type=l_type, @@ -2167,7 +2174,7 @@ def pool3d(input, l_type = "pool3d" helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() - pool_out = helper.create_tmp_variable(dtype) + pool_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type=l_type, @@ -2310,10 +2317,13 @@ def batch_norm(input, mean_out = mean # variance and variance out share the same memory variance_out = variance - saved_mean = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) - saved_variance = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) + saved_mean = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True) + saved_variance = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True) - batch_norm_out = input if in_place else helper.create_tmp_variable(dtype) + batch_norm_out = input if in_place else helper.create_variable_for_type_inference( + dtype) helper.append_op( type="batch_norm", @@ -2430,9 +2440,11 @@ def layer_norm(input, inputs['Bias'] = bias # create output - mean_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) - variance_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) - layer_norm_out = helper.create_tmp_variable(dtype) + mean_out = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True) + variance_out = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True) + layer_norm_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="layer_norm", @@ -2619,7 +2631,7 @@ def conv2d_transpose(input, img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) - pre_bias = helper.create_tmp_variable(dtype=input.dtype) + pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type=op_type, inputs={'Input': [input], @@ -2797,7 +2809,7 @@ def conv3d_transpose(input, img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) - pre_bias = helper.create_tmp_variable(dtype=input.dtype) + pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type=l_type, inputs={'Input': [input], @@ -2876,7 +2888,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): """ helper = LayerHelper('sequence_expand', input=x, **locals()) dtype = helper.input_dtype() - tmp = helper.create_tmp_variable(dtype) + tmp = helper.create_variable_for_type_inference(dtype) helper.append_op( type='sequence_expand', inputs={'X': x, @@ -2942,7 +2954,7 @@ def sequence_expand_as(x, y, name=None): """ helper = LayerHelper('sequence_expand_as', input=x, **locals()) dtype = helper.input_dtype() - tmp = helper.create_tmp_variable(dtype) + tmp = helper.create_variable_for_type_inference(dtype) helper.append_op( type='sequence_expand_as', inputs={'X': x, @@ -2987,8 +2999,8 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): helper = LayerHelper('sequence_pad', input=x, **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) - length = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) + length = helper.create_variable_for_type_inference(dtype) pad_value.stop_gradient = True length.stop_gradient = True @@ -3053,7 +3065,7 @@ def sequence_unpad(x, length, name=None): helper = LayerHelper('sequence_unpad', input=x, **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) length.stop_gradient = True @@ -3152,8 +3164,9 @@ def beam_search(pre_ids, score_type = scores.dtype id_type = ids.dtype - selected_scores = helper.create_tmp_variable(dtype=score_type) - selected_ids = helper.create_tmp_variable(dtype=id_type) + selected_scores = helper.create_variable_for_type_inference( + dtype=score_type) + selected_ids = helper.create_variable_for_type_inference(dtype=id_type) helper.append_op( type='beam_search', @@ -3210,8 +3223,8 @@ def beam_search_decode(ids, scores, beam_size, end_id, name=None): ids, scores, beam_size=5, end_id=0) """ helper = LayerHelper('beam_search_decode', **locals()) - sentence_ids = helper.create_tmp_variable(dtype=ids.dtype) - sentence_scores = helper.create_tmp_variable(dtype=ids.dtype) + sentence_ids = helper.create_variable_for_type_inference(dtype=ids.dtype) + sentence_scores = helper.create_variable_for_type_inference(dtype=ids.dtype) helper.append_op( type="beam_search_decode", @@ -3341,8 +3354,8 @@ def lstm_unit(x_t, param_attr=param_attr, bias_attr=bias_attr) dtype = x_t.dtype - c = helper.create_tmp_variable(dtype) - h = helper.create_tmp_variable(dtype) + c = helper.create_variable_for_type_inference(dtype) + h = helper.create_variable_for_type_inference(dtype) helper.append_op( type='lstm_unit', @@ -3396,7 +3409,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): """ helper = LayerHelper('reduce_sum', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( @@ -3453,7 +3466,7 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None): fluid.layers.reduce_mean(x, dim=[0, 1]) # [4.0, 5.0] """ helper = LayerHelper('reduce_mean', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( @@ -3508,7 +3521,7 @@ def reduce_max(input, dim=None, keep_dim=False, name=None): fluid.layers.reduce_max(x, dim=[0, 1]) # [7.0, 8.0] """ helper = LayerHelper('reduce_max', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( @@ -3563,7 +3576,7 @@ def reduce_min(input, dim=None, keep_dim=False, name=None): fluid.layers.reduce_min(x, dim=[0, 1]) # [1.0, 2.0] """ helper = LayerHelper('reduce_min', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( @@ -3619,7 +3632,7 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): fluid.layers.reduce_prod(x, dim=[0, 1]) # [105.0, 384.0] """ helper = LayerHelper('reduce_prod', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) if dim is not None and not isinstance(dim, list): dim = [dim] helper.append_op( @@ -3679,7 +3692,7 @@ def split(input, num_or_sections, dim=-1, name=None): dim], 'len(num_or_sections) must not be more than input.shape[dim].' num = len(num_or_sections) outs = [ - helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.create_variable_for_type_inference(dtype=helper.input_dtype()) for i in range(num) ] helper.append_op( @@ -3736,8 +3749,8 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): axis = 0 helper = LayerHelper("l2_normalize", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) - norm = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) + norm = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="norm", inputs={"X": x}, @@ -3846,7 +3859,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): __check_input(x, y) helper = LayerHelper('matmul', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul', inputs={'X': x, @@ -3917,8 +3930,8 @@ def topk(input, k, name=None): top5_values, top5_indices = layers.topk(input, k=5) """ helper = LayerHelper("top_k", **locals()) - values = helper.create_tmp_variable(dtype=input.dtype) - indices = helper.create_tmp_variable(dtype="int64") + values = helper.create_variable_for_type_inference(dtype=input.dtype) + indices = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="top_k", inputs={"X": [input]}, @@ -3976,8 +3989,8 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None): # remove some tokens from input and labels if ignored_tokens is not None and len(ignored_tokens) > 0: - erased_input = helper.create_tmp_variable(dtype="int64") - erased_label = helper.create_tmp_variable(dtype="int64") + erased_input = helper.create_variable_for_type_inference(dtype="int64") + erased_label = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="sequence_erase", @@ -3994,8 +4007,8 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None): label = erased_label # edit distance op - edit_distance_out = helper.create_tmp_variable(dtype="int64") - sequence_num = helper.create_tmp_variable(dtype="int64") + edit_distance_out = helper.create_variable_for_type_inference(dtype="int64") + sequence_num = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="edit_distance", inputs={"Hyps": [input], @@ -4070,7 +4083,7 @@ def ctc_greedy_decoder(input, blank, name=None): _, topk_indices = topk(input, k=1) # ctc align op - ctc_out = helper.create_tmp_variable(dtype="int64") + ctc_out = helper.create_variable_for_type_inference(dtype="int64") helper.append_op( type="ctc_align", inputs={"Input": [topk_indices]}, @@ -4120,8 +4133,8 @@ def warpctc(input, label, blank=0, norm_by_times=False): """ helper = LayerHelper('warpctc', **locals()) - loss_out = helper.create_tmp_variable(dtype=input.dtype) - grad_out = helper.create_tmp_variable(dtype=input.dtype) + loss_out = helper.create_variable_for_type_inference(dtype=input.dtype) + grad_out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type='warpctc', inputs={'Logits': [input], @@ -4182,7 +4195,7 @@ def sequence_reshape(input, new_dim): x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10) """ helper = LayerHelper('sequence_reshape', **locals()) - out = helper.create_tmp_variable(helper.input_dtype()) + out = helper.create_variable_for_type_inference(helper.input_dtype()) helper.append_op( type='sequence_reshape', inputs={'X': [input]}, @@ -4279,9 +4292,9 @@ def nce(input, is_bias=True, dtype=input.dtype) inputs['Bias'] = b - cost = helper.create_tmp_variable(dtype=input.dtype) - sample_logits = helper.create_tmp_variable(dtype=input.dtype) - sample_labels = helper.create_tmp_variable(dtype=label.dtype) + cost = helper.create_variable_for_type_inference(dtype=input.dtype) + sample_logits = helper.create_variable_for_type_inference(dtype=input.dtype) + sample_labels = helper.create_variable_for_type_inference(dtype=label.dtype) if num_neg_samples is None: num_neg_samples = 10 @@ -4357,8 +4370,8 @@ def hsigmoid(input, helper = LayerHelper('hierarchical_sigmoid', **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) - pre_out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) + pre_out = helper.create_variable_for_type_inference(dtype) dim = input.shape[1] if num_classes < 2: raise ValueError("num_classes must not be less than 2.") @@ -4418,8 +4431,8 @@ def transpose(x, perm, name=None): (idx, perm[idx], len(x.shape))) helper = LayerHelper('transpose', **locals()) - out = helper.create_tmp_variable(x.dtype) - x_shape = helper.create_tmp_variable(x.dtype) + out = helper.create_variable_for_type_inference(x.dtype) + x_shape = helper.create_variable_for_type_inference(x.dtype) helper.append_op( type='transpose2', inputs={'X': [x]}, @@ -4561,7 +4574,7 @@ def im2sequence(input, inputs["Y"] = input_image_size attrs["out_stride"] = out_stride helper = LayerHelper('im2sequence', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out @@ -4594,7 +4607,7 @@ def row_conv(input, future_context_size, param_attr=None, act=None): filter_shape = [future_context_size + 1, input.shape[1]] filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='row_conv', inputs={'X': [input], @@ -4627,7 +4640,7 @@ def multiplex(inputs, index): raise ValueError("inputs should be a list object and contains at least " "2 elements.") - out = helper.create_tmp_variable(inputs[0].dtype) + out = helper.create_variable_for_type_inference(inputs[0].dtype) helper.append_op( type='multiplex', inputs={'X': inputs, @@ -4698,8 +4711,8 @@ def softmax_with_cross_entropy(logits, logits=fc, label=label) """ helper = LayerHelper('softmax_with_cross_entropy', **locals()) - softmax = helper.create_tmp_variable(dtype=logits.dtype) - loss = helper.create_tmp_variable(dtype=logits.dtype) + softmax = helper.create_variable_for_type_inference(dtype=logits.dtype) + loss = helper.create_variable_for_type_inference(dtype=logits.dtype) helper.append_op( type='softmax_with_cross_entropy', inputs={'Logits': logits, @@ -4749,8 +4762,8 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): """ helper = LayerHelper('smooth_l1_loss', **locals()) - diff = helper.create_tmp_variable(dtype=x.dtype) - loss = helper.create_tmp_variable(dtype=x.dtype) + diff = helper.create_variable_for_type_inference(dtype=x.dtype) + loss = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='smooth_l1_loss', inputs={ @@ -4783,7 +4796,7 @@ def one_hot(input, depth): one_hot_label = layers.one_hot(input=label, depth=10) """ helper = LayerHelper("one_hot", **locals()) - one_hot_out = helper.create_tmp_variable(dtype='float32') + one_hot_out = helper.create_variable_for_type_inference(dtype='float32') helper.append_op( type="one_hot", inputs={'X': input}, @@ -4925,8 +4938,8 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None): "except one unknown dimension.") helper = LayerHelper("reshape2", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) - x_shape = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) + x_shape = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="reshape2", inputs=inputs, @@ -4975,8 +4988,8 @@ def squeeze(input, axes, name=None): y = layers.sequeeze(input=x, axes=[1]) """ helper = LayerHelper("squeeze", **locals()) - out = helper.create_tmp_variable(dtype=input.dtype) - x_shape = helper.create_tmp_variable(dtype=input.dtype) + out = helper.create_variable_for_type_inference(dtype=input.dtype) + x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type="squeeze2", inputs={"X": input}, @@ -5012,8 +5025,8 @@ def unsqueeze(input, axes, name=None): y = layers.unsequeeze(input=x, axes=[1]) """ helper = LayerHelper("unsqueeze", **locals()) - out = helper.create_tmp_variable(dtype=input.dtype) - x_shape = helper.create_tmp_variable(dtype=input.dtype) + out = helper.create_variable_for_type_inference(dtype=input.dtype) + x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( type="unsqueeze2", inputs={"X": input}, @@ -5103,7 +5116,7 @@ def lod_reset(x, y=None, target_lod=None): out = layers.lod_reset(x=x, y=y) """ helper = LayerHelper("lod_reset", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) if y is not None: helper.append_op( type="lod_reset", inputs={'X': x, @@ -5172,8 +5185,9 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): "dims of input must be 4(not %d), and it's order must be NCHW" % (dims)) - mid_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) - lrn_out = helper.create_tmp_variable(dtype) + mid_out = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=True) + lrn_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="lrn", inputs={"X": input}, @@ -5238,7 +5252,7 @@ def pad(x, paddings, pad_value=0., name=None): """ helper = LayerHelper('pad', input=x, **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='pad', inputs={'X': x}, @@ -5318,7 +5332,7 @@ def pad_constant_like(x, y, pad_value=0., name=None): """ helper = LayerHelper('pad_constant_like', input=x, **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='pad_constant_like', inputs={'X': x, @@ -5383,7 +5397,7 @@ def label_smooth(label, raise ValueError("The value of epsilon must be between 0 and 1.") helper = LayerHelper("label_smooth", **locals()) label.stop_gradient = True - smooth_label = helper.create_tmp_variable(dtype) + smooth_label = helper.create_variable_for_type_inference(dtype) helper.append_op( type="label_smooth", inputs={"X": label, @@ -5415,8 +5429,8 @@ def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): """ helper = LayerHelper('roi_pool', **locals()) dtype = helper.input_dtype() - pool_out = helper.create_tmp_variable(dtype) - argmaxes = helper.create_tmp_variable(dtype='int32') + pool_out = helper.create_variable_for_type_inference(dtype) + argmaxes = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type="roi_pool", inputs={"X": input, @@ -5589,7 +5603,7 @@ def image_resize(input, out_h = int(input.shape[2] * scale) out_w = int(input.shape[3] * scale) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type=resample_methods[resample], inputs=inputs, @@ -5698,7 +5712,7 @@ def gather(input, index): """ helper = LayerHelper('gather', **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="gather", inputs={"X": input, @@ -5738,7 +5752,7 @@ def scatter(input, index, updates, name=None): """ helper = LayerHelper('scatter', **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="scatter", inputs={"X": input, @@ -5798,7 +5812,7 @@ def sequence_scatter(input, index, updates, name=None): """ helper = LayerHelper('sequence_scatter', **locals()) dtype = helper.input_dtype() - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="sequence_scatter", inputs={"X": input, @@ -5828,7 +5842,7 @@ def random_crop(x, shape, seed=None): """ helper = LayerHelper("random_crop", **locals()) dtype = x.dtype - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) if seed is None: seed = np.random.randint(-65536, 65536) op_attrs = {"shape": shape} @@ -5874,7 +5888,7 @@ def log(x, name=None): """ helper = LayerHelper('log', **locals()) dtype = helper.input_dtype(input_param_name='x') - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out}) return out @@ -5905,7 +5919,7 @@ def relu(x, name=None): """ helper = LayerHelper('relu', **locals()) dtype = helper.input_dtype(input_param_name='x') - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op(type="relu", inputs={"X": x}, outputs={"Out": out}) return out @@ -5944,9 +5958,9 @@ def mean_iou(input, label, num_classes): """ helper = LayerHelper('mean_iou', **locals()) dtype = helper.input_dtype() - out_mean_iou = helper.create_tmp_variable(dtype='float32') - out_wrong = helper.create_tmp_variable(dtype='int32') - out_correct = helper.create_tmp_variable(dtype='int32') + out_mean_iou = helper.create_variable_for_type_inference(dtype='float32') + out_wrong = helper.create_variable_for_type_inference(dtype='int32') + out_correct = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type="mean_iou", inputs={"Predictions": input, @@ -6038,7 +6052,7 @@ def crop(x, shape=None, offsets=None, name=None): if offsets is None: offsets = [0] * len(x.shape) - out = helper.create_tmp_variable(x.dtype) + out = helper.create_variable_for_type_inference(x.dtype) ipts = {'X': x} attrs = {} if isinstance(shape, Variable): @@ -6118,7 +6132,7 @@ def rank_loss(label, left, right, name=None): if not (isinstance(right, Variable)): raise ValueError("The right should be a Variable") - out = helper.create_tmp_variable("float32") + out = helper.create_variable_for_type_inference("float32") helper.append_op( type='rank_loss', @@ -6164,8 +6178,8 @@ def margin_rank_loss(label, left, right, margin=0.1, name=None): raise ValueError("The left should be a Variable.") if not isinstance(right, Variable): raise ValueError("The right should be a Variable.") - out = helper.create_tmp_variable(left.dtype) - act = helper.create_tmp_variable(left.dtype) + out = helper.create_variable_for_type_inference(left.dtype) + act = helper.create_variable_for_type_inference(left.dtype) helper.append_op( type='margin_rank_loss', inputs={"Label": label, @@ -6250,7 +6264,7 @@ def pad2d(input, helper = LayerHelper('pad2d', **locals()) dtype = helper.input_dtype(input_param_name='input') - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='pad2d', inputs={'X': input}, @@ -6279,7 +6293,7 @@ def elu(x, alpha=1.0, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('elu', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='elu', inputs={'X': x}, @@ -6302,7 +6316,7 @@ def relu6(x, threshold=6.0, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('relu6', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='relu6', inputs={'X': x}, @@ -6325,7 +6339,7 @@ def pow(x, factor=1.0, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('pow', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='pow', inputs={'X': x}, @@ -6349,7 +6363,7 @@ def stanh(x, scale_a=2.0 / 3.0, scale_b=1.7159, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('stanh', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='stanh', inputs={'X': x}, @@ -6374,7 +6388,7 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('hard_sigmoid', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='hard_sigmoid', inputs={'X': x}, @@ -6398,7 +6412,7 @@ def swish(x, beta=1.0, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('swish', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='swish', inputs={'X': x}, @@ -6450,7 +6464,7 @@ def prelu(x, mode, param_attr=None, name=None): dtype='float32', is_bias=False, default_initializer=Constant(1.0)) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="prelu", inputs={"X": x, @@ -6474,7 +6488,7 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('brelu', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='brelu', inputs={'X': x}, @@ -6497,7 +6511,7 @@ def leaky_relu(x, alpha=0.02, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('leaky_relu', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='leaky_relu', inputs={'X': x}, @@ -6519,7 +6533,7 @@ def soft_relu(x, threshold=40.0, name=None): output(${out_type}): ${out_comment} """ helper = LayerHelper('soft_relu', **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='soft_relu', inputs={'X': x}, @@ -6586,8 +6600,8 @@ def flatten(x, axis=1, name=None): if not (isinstance(axis, int)) or axis > len(x.shape) or axis < 0: raise ValueError("The axis should be a int, and in range [0, rank(x)]") - out = helper.create_tmp_variable(x.dtype) - x_shape = helper.create_tmp_variable(x.dtype) + out = helper.create_variable_for_type_inference(x.dtype) + x_shape = helper.create_variable_for_type_inference(x.dtype) helper.append_op( type='flatten2', inputs={"X": x}, @@ -6633,7 +6647,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None): out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0) """ helper = LayerHelper('sequence_enumerate', **locals()) - out = helper.create_tmp_variable(helper.input_dtype(), stop_gradient=True) + out = helper.create_variable_for_type_inference( + helper.input_dtype(), stop_gradient=True) helper.append_op( type='sequence_enumerate', inputs={'X': input}, @@ -6673,9 +6688,9 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None): helper = LayerHelper('sequence_mask', **locals()) if name is None: - out = helper.create_tmp_variable(dtype=dtype) + out = helper.create_variable_for_type_inference(dtype=dtype) else: - out = helper.create_tmp_variable(dtype=dtype, name=name) + out = helper.create_variable_for_type_inference(dtype=dtype, name=name) helper.append_op( type='sequence_mask', @@ -6718,7 +6733,7 @@ def stack(x, axis=0): if not isinstance(x, list) and not isinstance(x, tuple): x = [x] - out = helper.create_tmp_variable(x[0].dtype) + out = helper.create_variable_for_type_inference(x[0].dtype) helper.append_op( type='stack', inputs={'X': x}, outputs={'Y': out}, attrs={'axis': axis}) @@ -6756,7 +6771,7 @@ def unstack(x, axis=0, num=None): outs = [] for _ in num: - outs.append(helper.create_tmp_variable(x.dtype)) + outs.append(helper.create_variable_for_type_inference(x.dtype)) helper.append_op( type='unstack', @@ -6808,7 +6823,7 @@ def expand(x, expand_times, name=None): """ helper = LayerHelper('expand', input=x, **locals()) dtype = helper.input_dtype(input_param_name='x') - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='expand', inputs={'X': x}, @@ -6847,7 +6862,7 @@ def uniform_random_batch_size_like(input, """ helper = LayerHelper('uniform_random_batch_size_like', **locals()) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) helper.append_op( type='uniform_random_batch_size_like', @@ -6884,7 +6899,7 @@ def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'): """ helper = LayerHelper('gaussian_random', **locals()) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) helper.append_op( type='gaussian_random', @@ -6919,7 +6934,7 @@ def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'): """ helper = LayerHelper('sampling_id', **locals()) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) helper.append_op( type='sampling_id', inputs={'X': x}, @@ -6958,7 +6973,7 @@ def gaussian_random_batch_size_like(input, """ helper = LayerHelper('gaussian_random_batch_size_like', **locals()) - out = helper.create_tmp_variable(dtype) + out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) helper.append_op( type='gaussian_random_batch_size_like', @@ -6990,7 +7005,8 @@ def sum(x): """ helper = LayerHelper('sum', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype('x')) + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype('x')) helper.append_op( type='sum', inputs={'X': x}, @@ -7017,7 +7033,8 @@ def slice(input, axes, starts, ends): """ helper = LayerHelper('slice', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype('input')) + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype('input')) helper.append_op( type='slice', inputs={'Input': input}, @@ -7043,7 +7060,8 @@ def shape(input): """ helper = LayerHelper('shape', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype('input')) + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype('input')) helper.append_op( type='shape', inputs={'Input': input}, outputs={'Out': out}) @@ -7060,7 +7078,7 @@ def _elementwise_op(helper): use_mkldnn = helper.kwargs.get('use_mkldnn', False) name = helper.kwargs.get('name', None) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7094,7 +7112,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): helper = LayerHelper('scale', **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7160,7 +7178,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): if out is None: if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7268,7 +7286,7 @@ def clip(x, min, max, name=None): helper = LayerHelper("clip", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7300,7 +7318,7 @@ def clip_by_norm(x, max_norm, name=None): helper = LayerHelper("clip_by_norm", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7330,7 +7348,7 @@ def mean(x, name=None): helper = LayerHelper("mean", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7360,7 +7378,7 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None): helper = LayerHelper("mul", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7394,7 +7412,7 @@ def sigmoid_cross_entropy_with_logits(x, label, name=None): helper = LayerHelper("sigmoid_cross_entropy_with_logits", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) @@ -7424,7 +7442,7 @@ def maxout(x, groups, name=None): helper = LayerHelper("maxout", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 9c6a2112a6..09a7cb8dc9 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -152,7 +152,7 @@ def cast(x, dtype): result = fluid.layers.cast(x=data, dtype='float64') """ helper = LayerHelper('cast', **locals()) - out = helper.create_tmp_variable(dtype=dtype) + out = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='cast', inputs={'X': [x]}, @@ -184,7 +184,7 @@ def concat(input, axis=0, name=None): out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth]) """ helper = LayerHelper('concat', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='concat', inputs={'X': input}, @@ -221,7 +221,8 @@ def sums(input, out=None): """ helper = LayerHelper('sum', **locals()) if out is None: - out = helper.create_tmp_variable(dtype=helper.input_dtype()) + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) helper.append_op( type='sum', inputs={'X': input}, @@ -252,7 +253,7 @@ def assign(input, output=None): """ helper = LayerHelper('assign', **locals()) if output is None: - output = helper.create_tmp_variable(dtype=input.dtype) + output = helper.create_variable_for_type_inference(dtype=input.dtype) if isinstance(input, Variable): helper.append_op( type='assign', inputs={'X': [input]}, outputs={'Out': [output]}) @@ -311,7 +312,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): helper = LayerHelper("fill_constant", **locals()) if out is None: - out = helper.create_tmp_variable(dtype=dtype) + out = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='fill_constant', inputs={}, @@ -358,7 +359,7 @@ def fill_constant_batch_size_like(input, ${out_comment}. """ helper = LayerHelper("fill_constant_batch_size_like", **locals()) - out = helper.create_tmp_variable(dtype=dtype) + out = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='fill_constant_batch_size_like', inputs={'Input': input}, @@ -396,7 +397,7 @@ def argmin(x, axis=0): out = fluid.layers.argmin(x=in, axis=-1) """ helper = LayerHelper("arg_min", **locals()) - out = helper.create_tmp_variable(VarDesc.VarType.INT64) + out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='arg_min', inputs={'X': x}, @@ -427,7 +428,7 @@ def argmax(x, axis=0): out = fluid.layers.argmax(x=in, axis=-1) """ helper = LayerHelper("arg_max", **locals()) - out = helper.create_tmp_variable(VarDesc.VarType.INT64) + out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='arg_max', inputs={'X': x}, @@ -477,8 +478,10 @@ def argsort(input, axis=-1, name=None): out, indices = fluid.layers.argsort(input, axis=0) """ helper = LayerHelper("argsort", **locals()) - out = helper.create_tmp_variable(dtype=input.dtype, stop_gradient=True) - ids = helper.create_tmp_variable(VarDesc.VarType.INT64, stop_gradient=True) + out = helper.create_variable_for_type_inference( + dtype=input.dtype, stop_gradient=True) + ids = helper.create_variable_for_type_inference( + VarDesc.VarType.INT64, stop_gradient=True) helper.append_op( type='argsort', inputs={'X': input}, @@ -562,7 +565,7 @@ def reverse(x, axis): if isinstance(axis, int): axis = [axis] helper = LayerHelper("reverse", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='reverse', inputs={'Input': x}, @@ -654,7 +657,7 @@ def has_inf(x): Variable: The tensor variable storing the output, only a bool value. """ helper = LayerHelper("isinf", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out}) return out @@ -670,7 +673,7 @@ def has_nan(x): Variable: The tensor variable storing the output, only a bool value. """ helper = LayerHelper("isnan", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out}) return out @@ -687,6 +690,6 @@ def isfinite(x): Variable: The tensor variable storing the output, contains a bool value. """ helper = LayerHelper("isfinite", **locals()) - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out}) return out diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index 97644df007..c151fbd172 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -151,7 +151,7 @@ class L2DecayRegularizer(WeightDecayRegularizer): decay = block.create_var( dtype="float32", shape=param.shape, - type=core.VarDesc.VarType.SELECTED_ROWS) + type=core.VarDesc.VarType.LOD_TENSOR) block.append_op( type='extract_rows', inputs={'X': grad}, outputs={'Out': idx}) block.append_op( @@ -228,7 +228,7 @@ class L1DecayRegularizer(WeightDecayRegularizer): decay = block.create_var( dtype="float32", shape=param.shape, - type=core.VarDesc.VarType.SELECTED_ROWS) + type=core.VarDesc.VarType.LOD_TENSOR) block.append_op( type='extract_rows', inputs={'X': grad}, outputs={'Out': idx}) block.append_op( diff --git a/python/paddle/fluid/tests/unittests/test_slice_var.py b/python/paddle/fluid/tests/unittests/test_slice_var.py index fab63b7d56..b16c744603 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_var.py +++ b/python/paddle/fluid/tests/unittests/test_slice_var.py @@ -30,7 +30,6 @@ class TestSliceVar(unittest.TestCase): var = program.global_block().create_var( name=str(random.randint(10000, 99999)), persistable=True, - # dtype=core.VarDesc.VarType.LOD_TENSOR, shape=shape) var_list.append(var) blocks = slice_variable(var_list, 10, min_size) From c1383744f03da181749c0f590f2b6fcd9c4bc820 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Mon, 22 Oct 2018 22:11:09 +0800 Subject: [PATCH 53/53] resolve conflicts test=develop --- python/paddle/fluid/layers/nn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d8e497731a..cca618b9ad 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -5478,7 +5478,7 @@ def roi_align(input, """ helper = LayerHelper('roi_align', **locals()) dtype = helper.input_dtype() - align_out = helper.create_tmp_variable(dtype) + align_out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="roi_align", inputs={"X": input, @@ -7481,7 +7481,7 @@ def affine_channel(x, scale=None, bias=None, data_layout='NCHW', name=None): helper = LayerHelper("affine_channel", **locals()) if name is None: - out = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False)