commit
8d88c52d8a
Binary file not shown.
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 24 KiB |
@ -0,0 +1,109 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/operators/elementwise_mul_op.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
using Tensor = framework::Tensor;
|
||||
|
||||
class ElementWiseMulOp : public framework::OperatorWithKernel {
|
||||
public:
|
||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||
|
||||
protected:
|
||||
void InferShape(const framework::InferShapeContext &ctx) const override {
|
||||
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null");
|
||||
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null");
|
||||
auto x_dim = ctx.Input<Tensor>("X")->dims();
|
||||
auto y_dim = ctx.Input<Tensor>("Y")->dims();
|
||||
PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(),
|
||||
"Rank of first input must >= rank of second input.")
|
||||
ctx.Output<Tensor>("Out")->Resize(x_dim);
|
||||
}
|
||||
};
|
||||
|
||||
class ElementWiseMulOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
ElementWiseMulOpMaker(framework::OpProto *proto,
|
||||
framework::OpAttrChecker *op_checker)
|
||||
: OpProtoAndCheckerMaker(proto, op_checker) {
|
||||
AddInput("X", "The first input of elementwise mul op");
|
||||
AddInput("Y", "The second input of elementwise mul op");
|
||||
AddAttr<int>("axis",
|
||||
R"DOC(
|
||||
When shape(Y) does not equal shape(X),Y will be broadcasted
|
||||
to match the shape of X and axis should be dimension index Y in X
|
||||
)DOC")
|
||||
.SetDefault(-1)
|
||||
.EqualGreaterThan(-1);
|
||||
|
||||
AddOutput("Out", "The output of elementwise mul op");
|
||||
AddComment(R"DOC(
|
||||
Limited elementwise multiple operator.The equation is: Out = X ⊙ Y.
|
||||
1. The shape of Y should be same with X or
|
||||
2. Y's shape is a subset of X.
|
||||
Y will be broadcasted to match the shape of X and axis should be dimension index Y in X.
|
||||
example:
|
||||
shape(X) = (2, 3, 4, 5), shape(Y) = (,)
|
||||
shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
|
||||
shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5)
|
||||
shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
|
||||
shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
|
||||
)DOC");
|
||||
}
|
||||
};
|
||||
|
||||
class ElementWiseMulOpGrad : public framework::OperatorWithKernel {
|
||||
public:
|
||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||
|
||||
protected:
|
||||
void InferShape(const framework::InferShapeContext &ctx) const override {
|
||||
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null");
|
||||
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) should not be null");
|
||||
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
|
||||
"Input(Out@GRAD) should not be null");
|
||||
|
||||
auto x_dims = ctx.Input<Tensor>("X")->dims();
|
||||
auto y_dims = ctx.Input<Tensor>("Y")->dims();
|
||||
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
|
||||
auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
|
||||
auto *y_grad = ctx.Output<Tensor>(framework::GradVarName("Y"));
|
||||
|
||||
PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(),
|
||||
"Rank of first input must >= rank of second input.")
|
||||
|
||||
if (x_grad) {
|
||||
x_grad->Resize(x_dims);
|
||||
}
|
||||
|
||||
if (y_grad) {
|
||||
y_grad->Resize(y_dims);
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OP(elementwise_mul, ops::ElementWiseMulOp, ops::ElementWiseMulOpMaker,
|
||||
elementwise_mul_grad, ops::ElementWiseMulOpGrad);
|
||||
REGISTER_OP_CPU_KERNEL(
|
||||
elementwise_mul,
|
||||
ops::ElementWiseMulKernel<paddle::platform::CPUPlace, float>);
|
||||
REGISTER_OP_CPU_KERNEL(
|
||||
elementwise_mul_grad,
|
||||
ops::ElementWiseMulGradKernel<paddle::platform::CPUPlace, float>);
|
@ -0,0 +1,25 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#define EIGEN_USE_GPU
|
||||
#include "paddle/operators/elementwise_mul_op.h"
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
|
||||
REGISTER_OP_GPU_KERNEL(
|
||||
elementwise_mul,
|
||||
ops::ElementWiseMulKernel<paddle::platform::GPUPlace, float>);
|
||||
REGISTER_OP_GPU_KERNEL(
|
||||
elementwise_mul_grad,
|
||||
ops::ElementWiseMulGradKernel<paddle::platform::GPUPlace, float>);
|
@ -0,0 +1,185 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
#include <iostream>
|
||||
#include "paddle/framework/eigen.h"
|
||||
#include "paddle/framework/op_registry.h"
|
||||
#include "paddle/operators/math/math_function.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
/*
|
||||
* Out = X ⊙ Y
|
||||
* 1. shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
|
||||
* pre=2, n=3*4, post=5
|
||||
* 2. shape(X) = (2, 3, 4, 5), shape(Y) = (4,5)
|
||||
* pre=2*3, n=4*5, post=1
|
||||
*/
|
||||
|
||||
inline void get_mid_dims(const framework::DDim& x_dims,
|
||||
const framework::DDim& y_dims, const int axis,
|
||||
int& pre, int& n, int& post) {
|
||||
pre = 1;
|
||||
n = 1;
|
||||
post = 1;
|
||||
for (int i = 0; i < axis; ++i) {
|
||||
pre *= x_dims[i];
|
||||
}
|
||||
|
||||
for (int i = 0; i < y_dims.size(); ++i) {
|
||||
PADDLE_ENFORCE_EQ(x_dims[i + axis], y_dims[i],
|
||||
"Broadcast dimension mismatch.");
|
||||
n *= y_dims[i];
|
||||
}
|
||||
|
||||
for (int i = axis + y_dims.size(); i < x_dims.size(); ++i) {
|
||||
post *= x_dims[i];
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Place, typename T>
|
||||
class ElementWiseMulKernel : public framework::OpKernel {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
||||
using Tensor = framework::Tensor;
|
||||
|
||||
auto* x = ctx.Input<Tensor>("X");
|
||||
auto* y = ctx.Input<Tensor>("Y");
|
||||
auto* z = ctx.Output<Tensor>("Out");
|
||||
z->mutable_data<T>(ctx.GetPlace());
|
||||
|
||||
auto x_e = framework::EigenVector<T>::Flatten(*x);
|
||||
auto y_e = framework::EigenVector<T>::Flatten(*y);
|
||||
auto z_e = framework::EigenVector<T>::Flatten(*z);
|
||||
|
||||
auto x_dims = x->dims();
|
||||
auto y_dims = y->dims();
|
||||
PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(),
|
||||
"Rank of first input must >= rank of second input.")
|
||||
|
||||
if (x_dims == y_dims || product(y_dims) == 1) {
|
||||
z_e.device(ctx.GetEigenDevice<Place>()) = x_e * y_e;
|
||||
return;
|
||||
}
|
||||
|
||||
int axis = ctx.Attr<int>("axis");
|
||||
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
|
||||
PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(),
|
||||
"Axis should be in range [0, x_dims)");
|
||||
|
||||
int pre, n, post;
|
||||
get_mid_dims(x_dims, y_dims, axis, pre, n, post);
|
||||
if (post == 1) {
|
||||
auto y_bcast = y_e.reshape(Eigen::DSizes<int, 2>(1, n))
|
||||
.broadcast(Eigen::DSizes<int, 2>(pre, 1))
|
||||
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
|
||||
z_e.device(ctx.GetEigenDevice<Place>()) = x_e * y_bcast;
|
||||
return;
|
||||
} else {
|
||||
auto y_bcast = y_e.reshape(Eigen::DSizes<int, 3>(1, n, 1))
|
||||
.broadcast(Eigen::DSizes<int, 3>(pre, 1, post))
|
||||
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
|
||||
z_e.device(ctx.GetEigenDevice<Place>()) = x_e * y_bcast;
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Place, typename T>
|
||||
class ElementWiseMulGradKernel : public framework::OpKernel {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
||||
using Tensor = framework::Tensor;
|
||||
|
||||
auto* x = ctx.Input<Tensor>("X");
|
||||
auto* y = ctx.Input<Tensor>("Y");
|
||||
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
|
||||
|
||||
auto x_e = framework::EigenVector<T>::Flatten(*x);
|
||||
auto y_e = framework::EigenVector<T>::Flatten(*y);
|
||||
auto dout_e = framework::EigenVector<T>::Flatten(*dout);
|
||||
|
||||
auto x_dims = x->dims();
|
||||
auto y_dims = y->dims();
|
||||
|
||||
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
|
||||
auto* dy = ctx.Output<Tensor>(framework::GradVarName("Y"));
|
||||
if (dx) {
|
||||
dx->mutable_data<T>(ctx.GetPlace());
|
||||
}
|
||||
if (dy) {
|
||||
dy->mutable_data<T>(ctx.GetPlace());
|
||||
}
|
||||
|
||||
if (x_dims == y_dims || product(y_dims) == 1) {
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(ctx.GetEigenDevice<Place>()) = dout_e * y_e;
|
||||
}
|
||||
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(ctx.GetEigenDevice<Place>()) = x_e * dout_e;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int axis = ctx.Attr<int>("axis");
|
||||
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
|
||||
|
||||
int pre, n, post;
|
||||
get_mid_dims(x_dims, y_dims, axis, pre, n, post);
|
||||
|
||||
// TODO(gongweibao): wrap reshape to a function.
|
||||
if (post == 1) {
|
||||
auto y_e_bcast = y_e.reshape(Eigen::DSizes<int, 2>(1, n))
|
||||
.broadcast(Eigen::DSizes<int, 2>(pre, 1))
|
||||
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(ctx.GetEigenDevice<Place>()) = dout_e * y_e_bcast;
|
||||
}
|
||||
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(ctx.GetEigenDevice<Place>()) =
|
||||
(x_e * dout_e)
|
||||
.reshape(Eigen::DSizes<int, 2>(pre, n))
|
||||
.sum(Eigen::array<int, 1>{{0}});
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
auto y_e_bcast = y_e.reshape(Eigen::DSizes<int, 3>(1, n, 1))
|
||||
.broadcast(Eigen::DSizes<int, 3>(pre, 1, post))
|
||||
.reshape(Eigen::DSizes<int, 1>(x_e.size()));
|
||||
if (dx) {
|
||||
auto dx_e = framework::EigenVector<T>::Flatten(*dx);
|
||||
dx_e.device(ctx.GetEigenDevice<Place>()) = dout_e * y_e_bcast;
|
||||
}
|
||||
|
||||
if (dy) {
|
||||
auto dy_e = framework::EigenVector<T>::Flatten(*dy);
|
||||
dy_e.device(ctx.GetEigenDevice<Place>()) =
|
||||
(x_e * dout_e)
|
||||
.reshape(Eigen::DSizes<int, 3>(pre, n, post))
|
||||
.sum(Eigen::array<int, 2>{{0, 2}});
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
@ -0,0 +1,59 @@
|
||||
## Operator's Parameter Name Convention
|
||||
|
||||
To make the operator document itself more clear, we recommend operator names obey the listing conventions.
|
||||
|
||||
### OpProtoMaker names
|
||||
|
||||
When defining an operator in Paddle, a corresponding [OpProtoMaker](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/operator.h#L170) (TODO: OpProtoMaker Doc)need to be defined. All the Input/Output and Attributes will write into the [OpProto](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/framework.proto#L61) , and will be used in client language to create operator.
|
||||
|
||||
- Input/Output.
|
||||
- Input/Output names follow the **CamelCase**. e.g. `X`, `Y`, `Matrix`, `LastAxisInMatrix`. Input/Output much more like Variables, we prefer to meaningful English words.
|
||||
- If an operator's Input/Output are tensors in math, not match to any meaningful words, input name should starts from `X`. e.g. `X`, `Y`, and output name should starts from `Out`. e.g. `Out`. This rule intends making operators which have few inputs/outputs unified.
|
||||
|
||||
- Attribute.
|
||||
- Attribute name follows the **camelCase**. e.g. `x`, `y`, `axis`, `rowwiseMatrix`. Also, attribute name prefers to meaningful English words.
|
||||
|
||||
- Comments.
|
||||
- Input/Output/Attr comment follow the format of **(type,default value) usage**, corresponding to which type it can be and how it will be used in the operator. e.g. Attribute in Accumulator`"gamma" `,`(float, default 1.0) Accumulation multiplier`.
|
||||
- Operator comment format of` R"DOC(your comment here)DOC"`. You should explain the input/output of the operator first. If there is math calculation in this operator, you should write the equation in the comment. e.g. `Out = X + Y`.
|
||||
|
||||
- Order.
|
||||
- Follow the order of Input/Output, then Attribute, then Comments. See the example in best practice.
|
||||
|
||||
### Best Practice
|
||||
|
||||
Here we give some examples to show how these rules will be used.
|
||||
|
||||
- The operator has one input, one output. e.g.`relu`, inputs: `X`, outputs: `Out`.
|
||||
|
||||
- The operator has two input, one output. e.g. `rowwise_add`, inputs : `X`, `Y`, outputs : `Out`.
|
||||
|
||||
- The operator contains attribute. e.g. `cosine`, inputs : `X`, `axis`, outputs : `Out`.
|
||||
|
||||
We give a full example of Accumulator Operator.
|
||||
|
||||
```c++
|
||||
class AccumulateOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
AccumulateOpMaker(framework::OpProto *proto,
|
||||
framework::OpAttrChecker *op_checker)
|
||||
: OpProtoAndCheckerMaker(proto, op_checker) {
|
||||
AddInput("X", "(Tensor) The input tensor that has to be accumulated to the output tensor. If the output size is not the same as input size, the output tensor is first reshaped and initialized to zero, and only then, accumulation is done.");
|
||||
AddOutput("Out", "(Tensor) Accumulated output tensor");
|
||||
AddAttr<float>("gamma", "(float, default 1.0) Accumulation multiplier");
|
||||
AddComment(R"DOC(
|
||||
Accumulate operator accumulates the input tensor to the output tensor. If the
|
||||
output tensor already has the right size, we add to it; otherwise, we first
|
||||
initialize the output tensor to all zeros, and then do accumulation. Any
|
||||
further calls to the operator, given that no one else fiddles with the output
|
||||
in the interim, will do simple accumulations.
|
||||
Accumulation is done as shown:
|
||||
|
||||
Out = 1*X + gamma*Out
|
||||
|
||||
where X is the input tensor, Y is the output tensor and gamma is the multiplier
|
||||
argument.
|
||||
)DOC");
|
||||
}
|
||||
};
|
||||
```
|
@ -1,38 +1,5 @@
|
||||
py_test(test_net SRCS test_net.py)
|
||||
|
||||
py_test(test_scope SRCS test_scope.py)
|
||||
|
||||
py_test(test_tensor SRCS test_tensor.py)
|
||||
py_test(test_mul_op SRCS test_mul_op.py)
|
||||
py_test(test_cos_sim_op SRCS test_cos_sim_op.py)
|
||||
|
||||
py_test(test_mean_op SRCS test_mean_op.py)
|
||||
|
||||
py_test(test_protobuf SRCS test_protobuf.py)
|
||||
|
||||
py_test(test_add_two_op SRCS test_add_two_op.py)
|
||||
py_test(test_sigmoid_op SRCS test_sigmoid_op.py)
|
||||
py_test(test_softmax_op SRCS test_softmax_op.py)
|
||||
py_test(test_cross_entropy_op SRCS test_cross_entropy_op.py)
|
||||
py_test(test_gather_op SRCS test_gather_op.py)
|
||||
py_test(test_scatter_op SRCS test_scatter_op.py)
|
||||
py_test(test_fill_zeros_like_op SRCS test_fill_zeros_like_op.py)
|
||||
py_test(test_top_k_op SRCS test_top_k_op.py)
|
||||
|
||||
py_test(test_rowwise_add_op SRCS test_rowwise_add_op.py)
|
||||
|
||||
py_test(test_default_scope_funcs SRCS test_default_scope_funcs.py)
|
||||
|
||||
py_test(test_operator SRCS test_operator.py)
|
||||
py_test(test_gaussian_random_op SRCS test_gaussian_random_op.py)
|
||||
py_test(test_uniform_random_op SRCS test_uniform_random_op.py)
|
||||
py_test(test_recurrent_op SRCS test_recurrent_op.py)
|
||||
py_test(test_sgd_op SRCS test_sgd_op.py)
|
||||
py_test(test_gradient_checker SRCS test_gradient_checker.py)
|
||||
py_test(test_lookup_table SRCS test_lookup_table.py)
|
||||
py_test(test_scale_and_identity_op SRCS test_scale_and_identity_op.py)
|
||||
py_test(test_sum_op SRCS test_sum_op.py)
|
||||
py_test(mnist SRCS mnist.py)
|
||||
py_test(test_concat_op SRCS test_concat_op.py)
|
||||
py_test(test_squared_l2_distance_op SRCS test_squared_l2_distance_op.py)
|
||||
py_test(test_reshape_op SRCS test_reshape_op.py)
|
||||
file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py")
|
||||
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
|
||||
foreach(src ${TEST_OPS})
|
||||
py_test(${src} SRCS ${src}.py)
|
||||
endforeach()
|
||||
|
@ -0,0 +1,157 @@
|
||||
import unittest
|
||||
import numpy as np
|
||||
from op_test import OpTest
|
||||
|
||||
|
||||
class TestElementwiseMulOp_Matrix(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_mul"
|
||||
""" Warning
|
||||
CPU gradient check error!
|
||||
'X': np.random.random((32,84)).astype("float32"),
|
||||
'Y': np.random.random((32,84)).astype("float32")
|
||||
"""
|
||||
self.inputs = {
|
||||
'X': np.random.uniform(0.1, 1, [13, 17]).astype("float32"),
|
||||
'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float32")
|
||||
}
|
||||
self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad_normal(self):
|
||||
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1)
|
||||
|
||||
def test_check_grad_ingore_x(self):
|
||||
self.check_grad(
|
||||
['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X"))
|
||||
|
||||
def test_check_grad_ingore_y(self):
|
||||
self.check_grad(
|
||||
['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y'))
|
||||
|
||||
|
||||
class TestElementwiseMulOp_Vector(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_mul"
|
||||
self.inputs = {
|
||||
'X': np.random.random((32, )).astype("float32"),
|
||||
'Y': np.random.random((32, )).astype("float32")
|
||||
}
|
||||
self.outputs = {'Out': np.multiply(self.inputs['X'], self.inputs['Y'])}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad_normal(self):
|
||||
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1)
|
||||
|
||||
def test_check_grad_ingore_x(self):
|
||||
self.check_grad(
|
||||
['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X"))
|
||||
|
||||
def test_check_grad_ingore_y(self):
|
||||
self.check_grad(
|
||||
['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y'))
|
||||
|
||||
|
||||
class TestElementwiseMulOp_broadcast_0(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_mul"
|
||||
self.inputs = {
|
||||
'X': np.random.rand(2, 3, 4).astype(np.float32),
|
||||
'Y': np.random.rand(2).astype(np.float32)
|
||||
}
|
||||
|
||||
self.attrs = {'axis': 0}
|
||||
self.outputs = {
|
||||
'Out': self.inputs['X'] * self.inputs['Y'].reshape(2, 1, 1)
|
||||
}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad_normal(self):
|
||||
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1)
|
||||
|
||||
def test_check_grad_ingore_x(self):
|
||||
self.check_grad(
|
||||
['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X"))
|
||||
|
||||
def test_check_grad_ingore_y(self):
|
||||
self.check_grad(
|
||||
['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y'))
|
||||
|
||||
|
||||
class TestElementwiseMulOp_broadcast_1(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_mul"
|
||||
self.inputs = {
|
||||
'X': np.random.rand(2, 3, 4).astype(np.float32),
|
||||
'Y': np.random.rand(3).astype(np.float32)
|
||||
}
|
||||
|
||||
self.attrs = {'axis': 1}
|
||||
self.outputs = {
|
||||
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 3, 1)
|
||||
}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad_normal(self):
|
||||
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1)
|
||||
|
||||
def test_check_grad_ingore_x(self):
|
||||
self.check_grad(
|
||||
['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X"))
|
||||
|
||||
def test_check_grad_ingore_y(self):
|
||||
self.check_grad(
|
||||
['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y'))
|
||||
|
||||
|
||||
class TestElementwiseMulOp_broadcast_2(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_mul"
|
||||
self.inputs = {
|
||||
'X': np.random.rand(2, 3, 4).astype(np.float32),
|
||||
'Y': np.random.rand(4).astype(np.float32)
|
||||
}
|
||||
|
||||
self.outputs = {
|
||||
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 4)
|
||||
}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad_normal(self):
|
||||
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.1)
|
||||
|
||||
def test_check_grad_ingore_x(self):
|
||||
self.check_grad(
|
||||
['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X"))
|
||||
|
||||
def test_check_grad_ingore_y(self):
|
||||
self.check_grad(
|
||||
['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y'))
|
||||
|
||||
|
||||
class TestElementwiseMulOp_broadcast_3(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "elementwise_mul"
|
||||
self.inputs = {
|
||||
'X': np.random.rand(2, 3, 4, 5).astype(np.float32),
|
||||
'Y': np.random.rand(3, 4).astype(np.float32)
|
||||
}
|
||||
|
||||
self.attrs = {'axis': 1}
|
||||
self.outputs = {
|
||||
'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 3, 4, 1)
|
||||
}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in new issue