commit
74a8e6b032
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,25 @@
|
|||||||
|
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This file contains the list of the ngraph operators for Paddle.
|
||||||
|
*
|
||||||
|
* ATTENTION: It requires some C++11 features, for lower version C++ or C, we
|
||||||
|
* might release another API.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ops/binary_unnary_op.h"
|
||||||
|
#include "ops/mul_op.h"
|
@ -0,0 +1,52 @@
|
|||||||
|
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. */
|
||||||
|
|
||||||
|
#ifdef PADDLE_WITH_NGRAPH
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include "ngraph/ngraph.hpp"
|
||||||
|
#include "paddle/fluid/platform/ngraph_helper.h"
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace operators {
|
||||||
|
namespace ngraphs {
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
static void BuildBinaryNode(
|
||||||
|
const std::shared_ptr<paddle::framework::OperatorBase>& op,
|
||||||
|
std::shared_ptr<
|
||||||
|
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
|
||||||
|
ngb_node_map) {
|
||||||
|
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
|
||||||
|
auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map);
|
||||||
|
auto out = std::make_shared<T>(x, y);
|
||||||
|
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
static void BuildUnaryNode(
|
||||||
|
const std::shared_ptr<paddle::framework::OperatorBase>& op,
|
||||||
|
std::shared_ptr<
|
||||||
|
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
|
||||||
|
ngb_node_map) {
|
||||||
|
auto input = paddle::platform::GetInputNode(op, "X", ngb_node_map);
|
||||||
|
auto out = std::make_shared<T>(input);
|
||||||
|
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace ngraphs
|
||||||
|
} // namespace operators
|
||||||
|
} // namespace paddle
|
||||||
|
#endif
|
@ -0,0 +1,134 @@
|
|||||||
|
/*Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. */
|
||||||
|
|
||||||
|
#ifdef PADDLE_WITH_NGRAPH
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include "ngraph/ngraph.hpp"
|
||||||
|
#include "paddle/fluid/platform/ngraph_helper.h"
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace operators {
|
||||||
|
namespace ngraphs {
|
||||||
|
|
||||||
|
static void BuildMulNode(
|
||||||
|
const std::shared_ptr<paddle::framework::OperatorBase>& op,
|
||||||
|
std::shared_ptr<
|
||||||
|
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
|
||||||
|
ngb_node_map) {
|
||||||
|
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
|
||||||
|
int x_num_col_dims = op_attrs.Get<int>("x_num_col_dims");
|
||||||
|
int y_num_col_dims = op_attrs.Get<int>("y_num_col_dims");
|
||||||
|
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
|
||||||
|
auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map);
|
||||||
|
|
||||||
|
auto x_reshape = x;
|
||||||
|
auto y_reshape = y;
|
||||||
|
|
||||||
|
if (x->get_shape().size() > 2) {
|
||||||
|
auto x_2d = paddle::platform::FlattenTo2d(x->get_shape(), x_num_col_dims);
|
||||||
|
x_reshape = paddle::platform::NgReshaper(x, x_2d);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (y->get_shape().size() > 2) {
|
||||||
|
auto y_2d = paddle::platform::FlattenTo2d(y->get_shape(), y_num_col_dims);
|
||||||
|
y_reshape = paddle::platform::NgReshaper(y, y_2d);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<ngraph::Node> out =
|
||||||
|
std::make_shared<ngraph::op::Dot>(x_reshape, y_reshape);
|
||||||
|
|
||||||
|
auto dummy_out = paddle::platform::GetOutputNode(op, "Out", ngb_node_map);
|
||||||
|
if (dummy_out && dummy_out->get_shape() != out->get_shape()) {
|
||||||
|
out = paddle::platform::NgReshaper(out, dummy_out->get_shape());
|
||||||
|
}
|
||||||
|
paddle::platform::SetOutputNode(op, "Out", out, ngb_node_map);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void BuildMulGradNode(
|
||||||
|
const std::shared_ptr<paddle::framework::OperatorBase>& op,
|
||||||
|
std::shared_ptr<
|
||||||
|
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>>>
|
||||||
|
ngb_node_map) {
|
||||||
|
auto op_attrs = paddle::framework::AttrReader(op->Attrs());
|
||||||
|
int x_num_col_dims = op_attrs.Get<int>("x_num_col_dims");
|
||||||
|
int y_num_col_dims = op_attrs.Get<int>("y_num_col_dims");
|
||||||
|
auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map);
|
||||||
|
auto y = paddle::platform::GetInputNode(op, "Y", ngb_node_map);
|
||||||
|
auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map);
|
||||||
|
|
||||||
|
bool is_dx = paddle::platform::HasOutput(op, "X@GRAD") ? true : false;
|
||||||
|
bool is_dy = paddle::platform::HasOutput(op, "Y@GRAD") ? true : false;
|
||||||
|
|
||||||
|
auto x_shape = x->get_shape();
|
||||||
|
auto y_shape = y->get_shape();
|
||||||
|
|
||||||
|
auto x_reshape = x;
|
||||||
|
auto y_reshape = y;
|
||||||
|
|
||||||
|
if (x_shape.size() > 2) {
|
||||||
|
auto x_2d_shape = paddle::platform::FlattenTo2d(x_shape, x_num_col_dims);
|
||||||
|
x_reshape = paddle::platform::NgReshaper(x, x_2d_shape);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (y_shape.size() > 2) {
|
||||||
|
auto y_2d_shape = paddle::platform::FlattenTo2d(y_shape, y_num_col_dims);
|
||||||
|
y_reshape = paddle::platform::NgReshaper(y, y_2d_shape);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto x_reshape_shape = x_reshape->get_shape();
|
||||||
|
std::reverse(x_reshape_shape.begin(), x_reshape_shape.end());
|
||||||
|
auto x_transpose = std::make_shared<ngraph::op::Reshape>(
|
||||||
|
x_reshape, ngraph::AxisVector{1, 0}, x_reshape_shape);
|
||||||
|
|
||||||
|
auto y_reshape_shape = y_reshape->get_shape();
|
||||||
|
std::reverse(y_reshape_shape.begin(), y_reshape_shape.end());
|
||||||
|
auto y_transpose = std::make_shared<ngraph::op::Reshape>(
|
||||||
|
y_reshape, ngraph::AxisVector{1, 0}, y_reshape_shape);
|
||||||
|
|
||||||
|
if (is_dx) {
|
||||||
|
if (dout->get_shape().size() > 2) {
|
||||||
|
auto dout_2d_shape = paddle::platform::FlattenTo2d(dout->get_shape(), 2);
|
||||||
|
dout = paddle::platform::NgReshaper(dout, dout_2d_shape);
|
||||||
|
}
|
||||||
|
auto dx = std::make_shared<ngraph::op::Dot>(dout, y_transpose);
|
||||||
|
|
||||||
|
if (dx->get_shape() == x_shape) {
|
||||||
|
paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map);
|
||||||
|
} else {
|
||||||
|
auto dx_reshape = paddle::platform::NgReshaper(dx, x_shape);
|
||||||
|
paddle::platform::SetOutputNode(op, "X@GRAD", dx_reshape, ngb_node_map);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_dy) {
|
||||||
|
if (dout->get_shape().size() > 2) {
|
||||||
|
auto dout_2d_shape = paddle::platform::FlattenTo2d(dout->get_shape(), 2);
|
||||||
|
dout = paddle::platform::NgReshaper(dout, dout_2d_shape);
|
||||||
|
}
|
||||||
|
auto dy = std::make_shared<ngraph::op::Dot>(x_transpose, dout);
|
||||||
|
|
||||||
|
if (dy->get_shape() == y_shape) {
|
||||||
|
paddle::platform::SetOutputNode(op, "Y@GRAD", dy, ngb_node_map);
|
||||||
|
} else {
|
||||||
|
auto dy_reshape = paddle::platform::NgReshaper(dy, y_shape);
|
||||||
|
paddle::platform::SetOutputNode(op, "Y@GRAD", dy_reshape, ngb_node_map);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace ngraphs
|
||||||
|
} // namespace operators
|
||||||
|
} // namespace paddle
|
||||||
|
#endif
|
@ -0,0 +1,124 @@
|
|||||||
|
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License. */
|
||||||
|
|
||||||
|
#include "paddle/fluid/framework/data_layout_transform.h"
|
||||||
|
#include "paddle/fluid/framework/op_registry.h"
|
||||||
|
#include "paddle/fluid/memory/malloc.h"
|
||||||
|
#include "paddle/fluid/platform/mkldnn_reuse.h"
|
||||||
|
|
||||||
|
namespace paddle {
|
||||||
|
namespace operators {
|
||||||
|
|
||||||
|
using Tensor = framework::Tensor;
|
||||||
|
using framework::DataLayout;
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
class TransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
|
||||||
|
public:
|
||||||
|
void Compute(const paddle::framework::ExecutionContext& ctx) const override {
|
||||||
|
PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()),
|
||||||
|
"It must use CPUPlace.");
|
||||||
|
const bool is_test = ctx.Attr<bool>("is_test");
|
||||||
|
PADDLE_ENFORCE(
|
||||||
|
is_test == true,
|
||||||
|
"ConvTransposeMKLDNN works only for inference!. Set is_test = True");
|
||||||
|
auto& dev_ctx =
|
||||||
|
ctx.template device_context<paddle::platform::MKLDNNDeviceContext>();
|
||||||
|
const auto& mkldnn_engine = dev_ctx.GetEngine();
|
||||||
|
std::vector<int> axis = ctx.Attr<std::vector<int>>("axis");
|
||||||
|
int ndims = axis.size();
|
||||||
|
auto* input = ctx.Input<Tensor>("X");
|
||||||
|
auto* output = ctx.Output<Tensor>("Out");
|
||||||
|
const T* input_data = input->data<T>();
|
||||||
|
|
||||||
|
if (ndims == 1) {
|
||||||
|
output->ShareDataWith(*input);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<int> nchw_axis(ndims, 0);
|
||||||
|
for (size_t i = 0; i < nchw_axis.size(); ++i) {
|
||||||
|
nchw_axis[i] = i;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<int> nchw_tz = paddle::framework::vectorize2int(input->dims());
|
||||||
|
std::string data_format = ctx.Attr<std::string>("data_format");
|
||||||
|
|
||||||
|
auto src_md =
|
||||||
|
input->format() != mkldnn::memory::format::nchw
|
||||||
|
? platform::MKLDNNMemDesc(nchw_tz, platform::MKLDNNGetDataType<T>(),
|
||||||
|
input->format())
|
||||||
|
: Axis2MemoryDesc(nchw_tz, nchw_axis);
|
||||||
|
|
||||||
|
this->TransposeKernel(ctx.GetPlace(), Axis2MemoryDesc(nchw_tz, axis),
|
||||||
|
src_md, output, input_data, nchw_tz, mkldnn_engine);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
mkldnn::memory::desc Axis2MemoryDesc(std::vector<int>& nchw_tz,
|
||||||
|
std::vector<int>& axis) const {
|
||||||
|
mkldnn_memory_desc_t mem_fmt;
|
||||||
|
|
||||||
|
mem_fmt.primitive_kind = mkldnn_memory;
|
||||||
|
mem_fmt.ndims = axis.size();
|
||||||
|
for (unsigned int i = 0; i < nchw_tz.size(); ++i) {
|
||||||
|
mem_fmt.dims[i] = nchw_tz[i]; // logical dimensions (nchw format,
|
||||||
|
// regardless physical layout)
|
||||||
|
}
|
||||||
|
mem_fmt.data_type = mkldnn_f32;
|
||||||
|
mem_fmt.format = mkldnn_blocked;
|
||||||
|
|
||||||
|
unsigned int total_stride = 1;
|
||||||
|
for (int i = nchw_tz.size() - 1; i >= 0; --i) {
|
||||||
|
mem_fmt.layout_desc.blocking.padding_dims[i] =
|
||||||
|
nchw_tz[i]; // logical dimensions (nchw format, regardless physical
|
||||||
|
// layout)
|
||||||
|
mem_fmt.layout_desc.blocking.block_dims[i] = 1;
|
||||||
|
mem_fmt.layout_desc.blocking.offset_padding_to_data[i] = 0; // no offset
|
||||||
|
mem_fmt.layout_desc.blocking.strides[0][axis[i]] = total_stride;
|
||||||
|
mem_fmt.layout_desc.blocking.strides[1][axis[i]] = 1;
|
||||||
|
total_stride *= nchw_tz[axis[i]];
|
||||||
|
}
|
||||||
|
mem_fmt.layout_desc.blocking.offset_padding = 0; // no initial offset
|
||||||
|
return mem_fmt;
|
||||||
|
}
|
||||||
|
|
||||||
|
void TransposeKernel(platform::Place place, mkldnn::memory::desc md_o,
|
||||||
|
mkldnn::memory::desc md_i, Tensor* output,
|
||||||
|
const T* data_i, std::vector<int>& nchw_dims,
|
||||||
|
const mkldnn::engine& eng) const {
|
||||||
|
// Make Memory primitive descriptors
|
||||||
|
auto mpd_o = mkldnn::memory::primitive_desc(md_o, eng);
|
||||||
|
auto mpd_i = mkldnn::memory::primitive_desc(md_i, eng);
|
||||||
|
|
||||||
|
auto data_o = output->mutable_data<T>(
|
||||||
|
place, paddle::memory::Allocator::kDefault, mpd_o.get_size());
|
||||||
|
|
||||||
|
auto src = mkldnn::memory(mpd_i, (T*)(data_i));
|
||||||
|
auto dst = mkldnn::memory(mpd_o, data_o);
|
||||||
|
|
||||||
|
auto r = mkldnn::reorder(src, dst);
|
||||||
|
mkldnn::stream(mkldnn::stream::kind::eager).submit({r}).wait();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace operators
|
||||||
|
} // namespace paddle
|
||||||
|
|
||||||
|
namespace ops = paddle::operators;
|
||||||
|
|
||||||
|
REGISTER_OP_KERNEL(transpose2, MKLDNN, ::paddle::platform::CPUPlace,
|
||||||
|
ops::TransposeMKLDNNOpKernel<float>);
|
||||||
|
REGISTER_OP_KERNEL(transpose, MKLDNN, ::paddle::platform::CPUPlace,
|
||||||
|
ops::TransposeMKLDNNOpKernel<float>);
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue