parent
2d8467ee9d
commit
f99841dd2a
@ -0,0 +1,39 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/elementwise_add_op.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
class ElementwiseAddOpMaker : public ElementwiseOpMaker {
|
||||
public:
|
||||
ElementwiseAddOpMaker(framework::OpProto* proto,
|
||||
framework::OpAttrChecker* op_checker)
|
||||
: ElementwiseOpMaker(proto, op_checker) {
|
||||
SetComment("add", "Out = X + Y");
|
||||
AddComment(comment_);
|
||||
}
|
||||
};
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OP(elementwise_add, ops::ElementwiseOp, ops::ElementwiseAddOpMaker,
|
||||
elementwise_add_grad, ops::ElementwiseOpGrad);
|
||||
REGISTER_OP_CPU_KERNEL(
|
||||
elementwise_add,
|
||||
ops::ElementwiseAddKernel<paddle::platform::CPUPlace, float>);
|
||||
REGISTER_OP_CPU_KERNEL(
|
||||
elementwise_add_grad,
|
||||
ops::ElementwiseAddGradKernel<paddle::platform::CPUPlace, float>);
|
@ -0,0 +1,25 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#define EIGEN_USE_GPU
|
||||
#include "paddle/operators/elementwise_add_op.h"
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
|
||||
REGISTER_OP_GPU_KERNEL(
|
||||
elementwise_add,
|
||||
ops::ElementwiseAddKernel<paddle::platform::GPUPlace, float>);
|
||||
REGISTER_OP_GPU_KERNEL(
|
||||
elementwise_add_grad,
|
||||
ops::ElementwiseAddGradKernel<paddle::platform::GPUPlace, float>);
|
@ -0,0 +1,113 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/elementwise_op.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
template <typename Place, typename T>
|
||||
class ElementwiseAddKernel : public framework::OpKernel {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
||||
ElementwiseCompute<EigenAddFunctor, Place, T>(ctx);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ElementwiseAddGradFunctor {
|
||||
template <typename Device, typename X, typename Y, typename Z, typename dX,
|
||||
typename dY, typename dZ>
|
||||
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) {
|
||||
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(d) = dz_e;
|
||||
}
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(d) = dz_e;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ElementwiseAddOneGradFunctor {
|
||||
template <typename Device, typename X, typename Y, typename Z, typename dX,
|
||||
typename dY, typename dZ>
|
||||
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) {
|
||||
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(d) = dz_e;
|
||||
}
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(d) = dz_e.sum();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ElementwiseAddBroadCastGradFunctor {
|
||||
template <typename Device, typename X, typename Y, typename Z, typename dX,
|
||||
typename dY, typename dZ, typename Pre, typename N>
|
||||
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n) {
|
||||
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(d) = dz_e;
|
||||
}
|
||||
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(d) = dz_e.reshape(Eigen::DSizes<int, 2>(pre, n))
|
||||
.sum(Eigen::array<int, 1>{{0}});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ElementwiseAddBroadCast2GradFunctor {
|
||||
template <typename Device, typename X, typename Y, typename Z, typename dX,
|
||||
typename dY, typename dZ, typename Pre, typename N, typename Post>
|
||||
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n,
|
||||
Post post) {
|
||||
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(d) = dz_e;
|
||||
}
|
||||
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(d) = dz_e.reshape(Eigen::DSizes<int, 3>(pre, n, post))
|
||||
.sum(Eigen::array<int, 2>{{0, 2}});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Place, typename T>
|
||||
class ElementwiseAddGradKernel : public framework::OpKernel {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
||||
ElementwiseGradCompute<Place, T, ElementwiseAddGradFunctor<T>,
|
||||
ElementwiseAddOneGradFunctor<T>,
|
||||
ElementwiseAddBroadCastGradFunctor<T>,
|
||||
ElementwiseAddBroadCast2GradFunctor<T>>(ctx);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
@ -0,0 +1,40 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/elementwise_div_op.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
class ElementwiseDivOpMaker : public ElementwiseOpMaker {
|
||||
public:
|
||||
ElementwiseDivOpMaker(framework::OpProto* proto,
|
||||
framework::OpAttrChecker* op_checker)
|
||||
: ElementwiseOpMaker(proto, op_checker) {
|
||||
SetComment("Div", "Out = X / Y");
|
||||
AddComment(comment_);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OP(elementwise_div, ops::ElementwiseOp, ops::ElementwiseDivOpMaker,
|
||||
elementwise_div_grad, ops::ElementwiseOpGrad);
|
||||
REGISTER_OP_CPU_KERNEL(
|
||||
elementwise_div,
|
||||
ops::ElementwiseDivKernel<paddle::platform::CPUPlace, float>);
|
||||
REGISTER_OP_CPU_KERNEL(
|
||||
elementwise_div_grad,
|
||||
ops::ElementwiseDivGradKernel<paddle::platform::CPUPlace, float>);
|
@ -0,0 +1,25 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#define EIGEN_USE_GPU
|
||||
#include "paddle/operators/elementwise_div_op.h"
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
|
||||
REGISTER_OP_GPU_KERNEL(
|
||||
elementwise_div,
|
||||
ops::ElementwiseDivKernel<paddle::platform::GPUPlace, float>);
|
||||
REGISTER_OP_GPU_KERNEL(
|
||||
elementwise_div_grad,
|
||||
ops::ElementwiseDivGradKernel<paddle::platform::GPUPlace, float>);
|
@ -0,0 +1,115 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/elementwise_op.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
template <typename Place, typename T>
|
||||
class ElementwiseDivKernel : public framework::OpKernel {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
||||
ElementwiseCompute<EigenDivFunctor, Place, T>(ctx);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ElementwiseDivGradFunctor {
|
||||
template <typename Device, typename X, typename Y, typename Z, typename dX,
|
||||
typename dY, typename dZ>
|
||||
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) {
|
||||
auto y_e = framework::EigenVector<T>::Flatten(*y);
|
||||
auto z_e = framework::EigenVector<T>::Flatten(*z);
|
||||
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
|
||||
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(d) = dz_e / y_e;
|
||||
}
|
||||
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(d) = -1.0 * dz_e * z_e / y_e;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ElementwiseDivBroadCastGradFunctor {
|
||||
template <typename Device, typename X, typename Y, typename Z, typename dX,
|
||||
typename dY, typename dZ, typename Pre, typename N>
|
||||
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n) {
|
||||
auto x_e = framework::EigenVector<T>::Flatten(*x);
|
||||
auto y_e = framework::EigenVector<T>::Flatten(*y);
|
||||
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
|
||||
|
||||
auto y_e_bcast = y_e.reshape(Eigen::DSizes<int, 2>(1, n))
|
||||
.broadcast(Eigen::DSizes<int, 2>(pre, 1))
|
||||
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
|
||||
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(d) = dz_e / y_e_bcast;
|
||||
}
|
||||
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(d) = (-1.0 * (x_e * dz_e) / (y_e_bcast * y_e_bcast))
|
||||
.reshape(Eigen::DSizes<int, 2>(pre, n))
|
||||
.sum(Eigen::array<int, 1>{{0}});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ElementwiseDivBroadCast2GradFunctor {
|
||||
template <typename Device, typename X, typename Y, typename Z, typename dX,
|
||||
typename dY, typename dZ, typename Pre, typename N, typename Post>
|
||||
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n,
|
||||
Post post) {
|
||||
auto x_e = framework::EigenVector<T>::Flatten(*x);
|
||||
auto y_e = framework::EigenVector<T>::Flatten(*y);
|
||||
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
|
||||
|
||||
auto y_e_bcast = y_e.reshape(Eigen::DSizes<int, 3>(1, n, 1))
|
||||
.broadcast(Eigen::DSizes<int, 3>(pre, 1, post))
|
||||
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(d) = dz_e / y_e_bcast;
|
||||
}
|
||||
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(d) = (-1.0 * (x_e * dz_e) / (y_e_bcast * y_e_bcast))
|
||||
.reshape(Eigen::DSizes<int, 3>(pre, n, post))
|
||||
.sum(Eigen::array<int, 2>{{0, 2}});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Place, typename T>
|
||||
class ElementwiseDivGradKernel : public framework::OpKernel {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
||||
ElementwiseGradCompute<Place, T, ElementwiseDivGradFunctor<T>,
|
||||
ElementwiseDivGradFunctor<T>,
|
||||
ElementwiseDivBroadCastGradFunctor<T>,
|
||||
ElementwiseDivBroadCast2GradFunctor<T>>(ctx);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,39 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/elementwise_sub_op.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
class ElementwiseSubOpMaker : public ElementwiseOpMaker {
|
||||
public:
|
||||
ElementwiseSubOpMaker(framework::OpProto* proto,
|
||||
framework::OpAttrChecker* op_checker)
|
||||
: ElementwiseOpMaker(proto, op_checker) {
|
||||
SetComment("Sub", "Out = X - Y");
|
||||
AddComment(comment_);
|
||||
}
|
||||
};
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OP(elementwise_sub, ops::ElementwiseOp, ops::ElementwiseSubOpMaker,
|
||||
elementwise_sub_grad, ops::ElementwiseOpGrad);
|
||||
REGISTER_OP_CPU_KERNEL(
|
||||
elementwise_sub,
|
||||
ops::ElementwiseSubKernel<paddle::platform::CPUPlace, float>);
|
||||
REGISTER_OP_CPU_KERNEL(
|
||||
elementwise_sub_grad,
|
||||
ops::ElementwiseSubGradKernel<paddle::platform::CPUPlace, float>);
|
@ -0,0 +1,25 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#define EIGEN_USE_GPU
|
||||
#include "paddle/operators/elementwise_sub_op.h"
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
|
||||
REGISTER_OP_GPU_KERNEL(
|
||||
elementwise_sub,
|
||||
ops::ElementwiseSubKernel<paddle::platform::GPUPlace, float>);
|
||||
REGISTER_OP_GPU_KERNEL(
|
||||
elementwise_sub_grad,
|
||||
ops::ElementwiseSubGradKernel<paddle::platform::GPUPlace, float>);
|
@ -0,0 +1,115 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/elementwise_op.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
template <typename Place, typename T>
|
||||
class ElementwiseSubKernel : public framework::OpKernel {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
||||
ElementwiseCompute<EigenSubFunctor, Place, T>(ctx);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ElementwiseSubGradFunctor {
|
||||
template <typename Device, typename X, typename Y, typename Z, typename dX,
|
||||
typename dY, typename dZ>
|
||||
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) {
|
||||
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(d) = dz_e;
|
||||
}
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(d) = (-1.0) * dz_e;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ElementwiseSubOneGradFunctor {
|
||||
template <typename Device, typename X, typename Y, typename Z, typename dX,
|
||||
typename dY, typename dZ>
|
||||
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz) {
|
||||
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(d) = dz_e;
|
||||
}
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(d) = (-1.0) * dz_e.sum();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ElementwiseSubBroadCastGradFunctor {
|
||||
template <typename Device, typename X, typename Y, typename Z, typename dX,
|
||||
typename dY, typename dZ, typename Pre, typename N>
|
||||
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n) {
|
||||
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(d) = dz_e;
|
||||
}
|
||||
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(d) = (-1.0) *
|
||||
dz_e.reshape(Eigen::DSizes<int, 2>(pre, n))
|
||||
.sum(Eigen::array<int, 1>{{0}});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ElementwiseSubBroadCast2GradFunctor {
|
||||
template <typename Device, typename X, typename Y, typename Z, typename dX,
|
||||
typename dY, typename dZ, typename Pre, typename N, typename Post>
|
||||
void operator()(Device d, X x, Y y, Z z, dX dx, dY dy, dZ dz, Pre pre, N n,
|
||||
Post post) {
|
||||
auto dz_e = framework::EigenVector<T>::Flatten(*dz);
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(d) = dz_e;
|
||||
}
|
||||
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(d) = (-1.0) *
|
||||
dz_e.reshape(Eigen::DSizes<int, 3>(pre, n, post))
|
||||
.sum(Eigen::array<int, 2>{{0, 2}});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Place, typename T>
|
||||
class ElementwiseSubGradKernel : public framework::OpKernel {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
||||
ElementwiseGradCompute<Place, T, ElementwiseSubGradFunctor<T>,
|
||||
ElementwiseSubOneGradFunctor<T>,
|
||||
ElementwiseSubBroadCastGradFunctor<T>,
|
||||
ElementwiseSubBroadCast2GradFunctor<T>>(ctx);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
@ -0,0 +1,96 @@
|
||||
import unittest
|
||||
import numpy as np
|
||||
from op_test import OpTest
|
||||
|
||||
|
||||
class TestElementwiseOp(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_add"
|
||||
self.inputs = {
|
||||
'X': np.random.uniform(0.1, 1, [13, 17]).astype("float32"),
|
||||
'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float32")
|
||||
}
|
||||
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['Y'])}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad_normal(self):
|
||||
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.005)
|
||||
|
||||
def test_check_grad_ingore_x(self):
|
||||
self.check_grad(
|
||||
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
|
||||
|
||||
def test_check_grad_ingore_y(self):
|
||||
self.check_grad(
|
||||
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
|
||||
|
||||
|
||||
class TestElementwiseAddOp_Vector(TestElementwiseOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_add"
|
||||
self.inputs = {
|
||||
'X': np.random.random((32, )).astype("float32"),
|
||||
'Y': np.random.random((32, )).astype("float32")
|
||||
}
|
||||
self.outputs = {'Out': np.add(self.inputs['X'], self.inputs['Y'])}
|
||||
|
||||
|
||||
class TestElementwiseAddOp_broadcast_0(TestElementwiseOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_add"
|
||||
self.inputs = {
|
||||
'X': np.random.rand(2, 3, 4).astype(np.float32),
|
||||
'Y': np.random.rand(2).astype(np.float32)
|
||||
}
|
||||
|
||||
self.attrs = {'axis': 0}
|
||||
self.outputs = {
|
||||
'Out': self.inputs['X'] + self.inputs['Y'].reshape(2, 1, 1)
|
||||
}
|
||||
|
||||
|
||||
class TestElementwiseAddOp_broadcast_1(TestElementwiseOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_add"
|
||||
self.inputs = {
|
||||
'X': np.random.rand(2, 3, 4).astype(np.float32),
|
||||
'Y': np.random.rand(3).astype(np.float32)
|
||||
}
|
||||
|
||||
self.attrs = {'axis': 1}
|
||||
self.outputs = {
|
||||
'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 3, 1)
|
||||
}
|
||||
|
||||
|
||||
class TestElementwiseAddOp_broadcast_2(TestElementwiseOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_add"
|
||||
self.inputs = {
|
||||
'X': np.random.rand(2, 3, 4).astype(np.float32),
|
||||
'Y': np.random.rand(4).astype(np.float32)
|
||||
}
|
||||
|
||||
self.outputs = {
|
||||
'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 1, 4)
|
||||
}
|
||||
|
||||
|
||||
class TestElementwiseAddOp_broadcast_3(TestElementwiseOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_add"
|
||||
self.inputs = {
|
||||
'X': np.random.rand(2, 3, 4, 5).astype(np.float32),
|
||||
'Y': np.random.rand(3, 4).astype(np.float32)
|
||||
}
|
||||
|
||||
self.attrs = {'axis': 1}
|
||||
self.outputs = {
|
||||
'Out': self.inputs['X'] + self.inputs['Y'].reshape(1, 3, 4, 1)
|
||||
}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -0,0 +1,105 @@
|
||||
import unittest
|
||||
import numpy as np
|
||||
from op_test import OpTest
|
||||
|
||||
|
||||
class ElementwiseDivOp(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_div"
|
||||
""" Warning
|
||||
CPU gradient check error!
|
||||
'X': np.random.random((32,84)).astype("float32"),
|
||||
'Y': np.random.random((32,84)).astype("float32")
|
||||
"""
|
||||
self.inputs = {
|
||||
'X': np.random.uniform(0.1, 1, [13, 17]).astype("float32"),
|
||||
'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float32")
|
||||
}
|
||||
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad_normal(self):
|
||||
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05)
|
||||
|
||||
def test_check_grad_ingore_x(self):
|
||||
self.check_grad(
|
||||
['Y'], 'Out', max_relative_error=0.05, no_grad_set=set("X"))
|
||||
|
||||
def test_check_grad_ingore_y(self):
|
||||
self.check_grad(
|
||||
['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y'))
|
||||
|
||||
|
||||
class TestElementwiseDivOp_Vector(ElementwiseDivOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_div"
|
||||
self.inputs = {
|
||||
'X': np.random.uniform(0.1, 1, [32]).astype("float32"),
|
||||
'Y': np.random.uniform(0.1, 1, [32]).astype("float32")
|
||||
}
|
||||
self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])}
|
||||
|
||||
|
||||
class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_div"
|
||||
self.inputs = {
|
||||
'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float32"),
|
||||
'Y': np.random.uniform(0.1, 1, [2]).astype("float32")
|
||||
}
|
||||
|
||||
self.attrs = {'axis': 0}
|
||||
self.outputs = {
|
||||
'Out':
|
||||
np.divide(self.inputs['X'], self.inputs['Y'].reshape(2, 1, 1))
|
||||
}
|
||||
|
||||
|
||||
class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_div"
|
||||
self.inputs = {
|
||||
'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float32"),
|
||||
'Y': np.random.uniform(0.1, 1, [3]).astype("float32")
|
||||
}
|
||||
|
||||
self.attrs = {'axis': 1}
|
||||
self.outputs = {
|
||||
'Out':
|
||||
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 1))
|
||||
}
|
||||
|
||||
|
||||
class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_div"
|
||||
self.inputs = {
|
||||
'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float32"),
|
||||
'Y': np.random.uniform(0.1, 1, [4]).astype("float32")
|
||||
}
|
||||
|
||||
self.outputs = {
|
||||
'Out':
|
||||
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 4))
|
||||
}
|
||||
|
||||
|
||||
class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_div"
|
||||
self.inputs = {
|
||||
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32"),
|
||||
'Y': np.random.uniform(0.1, 1, [3, 4]).astype("float32")
|
||||
}
|
||||
|
||||
self.attrs = {'axis': 1}
|
||||
self.outputs = {
|
||||
'Out':
|
||||
np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 4, 1))
|
||||
}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -0,0 +1,96 @@
|
||||
import unittest
|
||||
import numpy as np
|
||||
from op_test import OpTest
|
||||
|
||||
|
||||
class TestElementwiseOp(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_sub"
|
||||
self.inputs = {
|
||||
'X': np.random.uniform(0.1, 1, [13, 17]).astype("float32"),
|
||||
'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float32")
|
||||
}
|
||||
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad_normal(self):
|
||||
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.005)
|
||||
|
||||
def test_check_grad_ingore_x(self):
|
||||
self.check_grad(
|
||||
['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X"))
|
||||
|
||||
def test_check_grad_ingore_y(self):
|
||||
self.check_grad(
|
||||
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
|
||||
|
||||
|
||||
class TestElementwiseSubOp_Vector(TestElementwiseOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_sub"
|
||||
self.inputs = {
|
||||
'X': np.random.random((32, )).astype("float32"),
|
||||
'Y': np.random.random((32, )).astype("float32")
|
||||
}
|
||||
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
|
||||
|
||||
|
||||
class TestElementwiseSubOp_broadcast_0(TestElementwiseOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_sub"
|
||||
self.inputs = {
|
||||
'X': np.random.rand(2, 3, 4).astype(np.float32),
|
||||
'Y': np.random.rand(2).astype(np.float32)
|
||||
}
|
||||
|
||||
self.attrs = {'axis': 0}
|
||||
self.outputs = {
|
||||
'Out': self.inputs['X'] - self.inputs['Y'].reshape(2, 1, 1)
|
||||
}
|
||||
|
||||
|
||||
class TestElementwiseSubOp_broadcast_1(TestElementwiseOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_sub"
|
||||
self.inputs = {
|
||||
'X': np.random.rand(2, 3, 4).astype(np.float32),
|
||||
'Y': np.random.rand(3).astype(np.float32)
|
||||
}
|
||||
|
||||
self.attrs = {'axis': 1}
|
||||
self.outputs = {
|
||||
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 3, 1)
|
||||
}
|
||||
|
||||
|
||||
class TestElementwiseSubOp_broadcast_2(TestElementwiseOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_sub"
|
||||
self.inputs = {
|
||||
'X': np.random.rand(2, 3, 4).astype(np.float32),
|
||||
'Y': np.random.rand(4).astype(np.float32)
|
||||
}
|
||||
|
||||
self.outputs = {
|
||||
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 1, 4)
|
||||
}
|
||||
|
||||
|
||||
class TestElementwiseSubOp_broadcast_3(TestElementwiseOp):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_sub"
|
||||
self.inputs = {
|
||||
'X': np.random.rand(2, 3, 4, 5).astype(np.float32),
|
||||
'Y': np.random.rand(3, 4).astype(np.float32)
|
||||
}
|
||||
|
||||
self.attrs = {'axis': 1}
|
||||
self.outputs = {
|
||||
'Out': self.inputs['X'] - self.inputs['Y'].reshape(1, 3, 4, 1)
|
||||
}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in new issue