【NPU】Support npu op elementwise_div and elementwise_div_grad (#31573)
* Support npu op elementwise_div and elementwise_div_grad * Support npu op elementwise_div and elementwise_div_grad * Support npu op elementwise_div and elementwise_div_gradrevert-31562-mean
parent
ec2160a622
commit
de65486c19
@ -0,0 +1,140 @@
|
||||
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#ifdef PADDLE_WITH_ASCEND_CL
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "paddle/fluid/operators/elementwise/elementwise_div_op.h"
|
||||
#include "paddle/fluid/operators/npu_op_runner.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
using Tensor = framework::Tensor;
|
||||
|
||||
template <typename DeviceContext, typename T>
|
||||
class ElementwiseDivNPUKernel : public framework::OpKernel<T> {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
||||
auto* x = ctx.Input<Tensor>("X");
|
||||
auto* y = ctx.Input<Tensor>("Y");
|
||||
|
||||
auto* out = ctx.Output<Tensor>("Out");
|
||||
|
||||
auto place = ctx.GetPlace();
|
||||
|
||||
out->mutable_data<T>(place);
|
||||
|
||||
auto stream =
|
||||
ctx.template device_context<paddle::platform::NPUDeviceContext>()
|
||||
.stream();
|
||||
|
||||
auto runner = NpuOpRunner("Div", {*x, *y}, {*out}, {});
|
||||
runner.Run(stream);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename DeviceContext, typename T>
|
||||
class ElementwiseDivGradNPUKernel : public framework::OpKernel<T> {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& ctx) const override {
|
||||
auto* out = ctx.Input<Tensor>("Out");
|
||||
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
|
||||
auto* x = ctx.Input<Tensor>("X");
|
||||
auto* y = ctx.Input<Tensor>("Y");
|
||||
|
||||
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
|
||||
auto* dy = ctx.Output<Tensor>(framework::GradVarName("Y"));
|
||||
|
||||
auto place = ctx.GetPlace();
|
||||
|
||||
auto stream =
|
||||
ctx.template device_context<paddle::platform::NPUDeviceContext>()
|
||||
.stream();
|
||||
|
||||
Tensor y_power(y->type());
|
||||
y_power.mutable_data<T>(y->dims(), place);
|
||||
auto y_power_runner = NpuOpRunner("Power", {*y},
|
||||
{y_power}, {{"power", static_cast<float>(-1)}});
|
||||
y_power_runner.Run(stream);
|
||||
|
||||
if (dx) {
|
||||
dx->mutable_data<T>(place);
|
||||
|
||||
Tensor tensor_zeros(x->type());
|
||||
tensor_zeros.mutable_data<T>(x->dims(), place);
|
||||
auto tensor_zeros_runner = NpuOpRunner("ZerosLike", {*x},
|
||||
{tensor_zeros}, {});
|
||||
tensor_zeros_runner.Run(stream);
|
||||
|
||||
Tensor x_zero(paddle::framework::proto::VarType::BOOL);
|
||||
x_zero.mutable_data<bool>(x->dims(), place);
|
||||
auto x_zero_runner = NpuOpRunner("Equal", {*x, tensor_zeros},
|
||||
{x_zero}, {});
|
||||
x_zero_runner.Run(stream);
|
||||
|
||||
Tensor x_nozero(paddle::framework::proto::VarType::BOOL);
|
||||
x_nozero.mutable_data<bool>(x->dims(), place);
|
||||
auto x_nozero_runner = NpuOpRunner("LogicalNot", {x_zero},
|
||||
{x_nozero}, {});
|
||||
x_nozero_runner.Run(stream);
|
||||
|
||||
Tensor x_nozero_f(x->type());
|
||||
x_nozero_f.mutable_data<T>(x->dims(), place);
|
||||
auto x_nozero_f_runner = NpuOpRunner("Cast", {x_nozero},
|
||||
{x_nozero_f}, {{"dst_type", static_cast<int32_t>(0)}});
|
||||
x_nozero_f_runner.Run(stream);
|
||||
|
||||
Tensor x_grad_w(x->type());
|
||||
x_grad_w.mutable_data<T>(x->dims(), place);
|
||||
auto x_grad_w_runner = NpuOpRunner("Mul", {x_nozero_f, y_power},
|
||||
{x_grad_w}, {});
|
||||
x_grad_w_runner.Run(stream);
|
||||
|
||||
auto x_grad_runner = NpuOpRunner("Mul", {x_grad_w, *dout}, {*dx}, {});
|
||||
x_grad_runner.Run(stream);
|
||||
}
|
||||
|
||||
if (dy) {
|
||||
dy->mutable_data<T>(place);
|
||||
|
||||
Tensor y_grad_w(x->type());
|
||||
y_grad_w.mutable_data<T>(y->dims(), place);
|
||||
auto y_grad_w_runner = NpuOpRunner("Mul", {*out, y_power},
|
||||
{y_grad_w}, {});
|
||||
y_grad_w_runner.Run(stream);
|
||||
|
||||
auto y_grad_runner = NpuOpRunner("Mul", {y_grad_w, *dout}, {*dy}, {});
|
||||
y_grad_runner.Run(stream);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
namespace ops = paddle::operators;
|
||||
|
||||
REGISTER_OP_NPU_KERNEL(
|
||||
elementwise_div,
|
||||
ops::ElementwiseDivNPUKernel<paddle::platform::NPUDeviceContext, float>,
|
||||
ops::ElementwiseDivNPUKernel<paddle::platform::NPUDeviceContext,
|
||||
paddle::platform::float16>);
|
||||
|
||||
REGISTER_OP_NPU_KERNEL(
|
||||
elementwise_div_grad,
|
||||
ops::ElementwiseDivGradNPUKernel<paddle::platform::NPUDeviceContext, float>,
|
||||
ops::ElementwiseDivGradNPUKernel<paddle::platform::NPUDeviceContext,
|
||||
paddle::platform::float16>);
|
||||
#endif
|
||||
@ -0,0 +1,171 @@
|
||||
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import unittest
|
||||
import sys
|
||||
sys.path.append("..")
|
||||
from op_test import OpTest
|
||||
import paddle
|
||||
import paddle.fluid as fluid
|
||||
|
||||
paddle.enable_static()
|
||||
SEED = 2021
|
||||
|
||||
|
||||
@unittest.skipIf(not paddle.is_compiled_with_npu(),
|
||||
"core is not compiled with NPU")
|
||||
class TestElementwiseDiv(OpTest):
|
||||
def setUp(self):
|
||||
self.set_npu()
|
||||
self.op_type = "elementwise_div"
|
||||
self.place = paddle.NPUPlace(0)
|
||||
|
||||
self.init_dtype()
|
||||
np.random.seed(SEED)
|
||||
x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
|
||||
y = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
|
||||
out = np.divide(x, y)
|
||||
|
||||
self.inputs = {
|
||||
'X': OpTest.np_dtype_to_fluid_dtype(x),
|
||||
'Y': OpTest.np_dtype_to_fluid_dtype(y)
|
||||
}
|
||||
self.attrs = {}
|
||||
self.outputs = {'Out': out}
|
||||
|
||||
def set_npu(self):
|
||||
self.__class__.use_npu = True
|
||||
|
||||
def init_dtype(self):
|
||||
self.dtype = np.float32
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output_with_place(self.place, check_dygraph=False)
|
||||
|
||||
# TODO(ascendrc): Div grad test
|
||||
# def test_check_grad(self):
|
||||
# if self.dtype == np.float16:
|
||||
# return
|
||||
# self.check_grad(['X'], 'Out')
|
||||
#
|
||||
|
||||
|
||||
@unittest.skipIf(not paddle.is_compiled_with_npu(),
|
||||
"core is not compiled with NPU")
|
||||
class TestElementwiseDivFp16(OpTest):
|
||||
def setUp(self):
|
||||
self.set_npu()
|
||||
self.op_type = "elementwise_div"
|
||||
self.place = paddle.NPUPlace(0)
|
||||
|
||||
self.init_dtype()
|
||||
np.random.seed(SEED)
|
||||
x = np.random.uniform(1, 2, [3, 4]).astype(self.dtype)
|
||||
y = np.random.uniform(1, 2, [3, 4]).astype(self.dtype)
|
||||
out = np.divide(x, y)
|
||||
|
||||
self.inputs = {
|
||||
'X': OpTest.np_dtype_to_fluid_dtype(x),
|
||||
'Y': OpTest.np_dtype_to_fluid_dtype(y)
|
||||
}
|
||||
self.attrs = {}
|
||||
self.outputs = {'Out': out}
|
||||
|
||||
def set_npu(self):
|
||||
self.__class__.use_npu = True
|
||||
self.__class__.no_need_check_grad = True
|
||||
|
||||
def init_dtype(self):
|
||||
self.dtype = np.float16
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output_with_place(self.place, check_dygraph=False, atol=1e-5)
|
||||
|
||||
|
||||
@unittest.skipIf(not paddle.is_compiled_with_npu(),
|
||||
"core is not compiled with NPU")
|
||||
class TestElementwiseDivNet(unittest.TestCase):
|
||||
def _test(self, run_npu=True):
|
||||
main_prog = paddle.static.Program()
|
||||
startup_prog = paddle.static.Program()
|
||||
main_prog.random_seed = SEED
|
||||
startup_prog.random_seed = SEED
|
||||
np.random.seed(SEED)
|
||||
|
||||
a_np = np.random.uniform(1, 2, [32, 32]).astype('float32')
|
||||
b_np = np.random.uniform(1, 2, [32, 32]).astype('float32')
|
||||
c_np = np.random.uniform(1, 2, [32, 32]).astype('float32')
|
||||
d_np = np.random.uniform(1, 2, [32, 32]).astype('float32')
|
||||
label_np = np.random.randint(2, size=(32, 1)).astype('int64')
|
||||
|
||||
with paddle.static.program_guard(main_prog, startup_prog):
|
||||
a = paddle.static.data(name="a", shape=[32, 32], dtype='float32')
|
||||
b = paddle.static.data(name="b", shape=[32, 32], dtype='float32')
|
||||
c = paddle.static.data(name="c", shape=[32, 32], dtype='float32')
|
||||
d = paddle.static.data(name="d", shape=[32, 32], dtype='float32')
|
||||
label = paddle.static.data(
|
||||
name="label", shape=[32, 1], dtype='int64')
|
||||
|
||||
e = paddle.multiply(a, b)
|
||||
f = paddle.multiply(c, d)
|
||||
f.stop_gradient = True
|
||||
g = paddle.divide(e, f)
|
||||
|
||||
fc_1 = fluid.layers.fc(input=g, size=128)
|
||||
prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax')
|
||||
|
||||
cost = fluid.layers.cross_entropy(input=prediction, label=label)
|
||||
loss = fluid.layers.reduce_mean(cost)
|
||||
sgd = fluid.optimizer.SGD(learning_rate=0.01)
|
||||
sgd.minimize(loss)
|
||||
|
||||
if run_npu:
|
||||
place = paddle.NPUPlace(0)
|
||||
else:
|
||||
place = paddle.CPUPlace()
|
||||
|
||||
exe = paddle.static.Executor(place)
|
||||
exe.run(startup_prog)
|
||||
|
||||
print("Start run on {}".format(place))
|
||||
for epoch in range(100):
|
||||
|
||||
pred_res, loss_res = exe.run(main_prog,
|
||||
feed={
|
||||
"a": a_np,
|
||||
"b": b_np,
|
||||
"c": c_np,
|
||||
"d": d_np,
|
||||
"label": label_np
|
||||
},
|
||||
fetch_list=[prediction, loss])
|
||||
if epoch % 10 == 0:
|
||||
print("Epoch {} | Prediction[0]: {}, Loss: {}".format(
|
||||
epoch, pred_res[0], loss_res))
|
||||
|
||||
return pred_res, loss_res
|
||||
|
||||
def test_npu(self):
|
||||
cpu_pred, cpu_loss = self._test(False)
|
||||
npu_pred, npu_loss = self._test(True)
|
||||
|
||||
self.assertTrue(np.allclose(npu_pred, cpu_pred))
|
||||
self.assertTrue(np.allclose(npu_loss, cpu_loss))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
Loading…
Reference in new issue