parent
444c285202
commit
3dd992e24f
@ -0,0 +1,82 @@
|
||||
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#ifdef PADDLE_WITH_ASCEND_CL
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "paddle/fluid/framework/op_registry.h"
|
||||
#include "paddle/fluid/operators/expand_op.h"
|
||||
#include "paddle/fluid/operators/npu_op_runner.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
template <typename DeviceContext, typename T>
|
||||
class ExpandNPUKernel : public framework::OpKernel<T> {
|
||||
public:
|
||||
void Compute(const framework::ExecutionContext& context) const override {
|
||||
auto rank = context.Input<Tensor>("X")->dims().size();
|
||||
PADDLE_ENFORCE_GE(
|
||||
rank, 1,
|
||||
platform::errors::InvalidArgument(
|
||||
"The number of dimensions of the input 'x' for Op(expand) "
|
||||
"must be greater than or equal to 1, but the value received is %d.",
|
||||
rank));
|
||||
PADDLE_ENFORCE_LE(
|
||||
rank, MAX_RANK_SUPPORTED,
|
||||
platform::errors::InvalidArgument(
|
||||
"The number of dimensions of the input 'x' for Op(expand) "
|
||||
"must be less than or equal to %d, but the value received is %d.",
|
||||
MAX_RANK_SUPPORTED, rank));
|
||||
switch (rank) { REP_EXPAND_TEMPLATE(MAX_RANK_SUPPORTED) }
|
||||
}
|
||||
|
||||
protected:
|
||||
template <int Rank>
|
||||
void Expand(const framework::ExecutionContext& context) const {
|
||||
auto* in0 = context.Input<framework::LoDTensor>("X");
|
||||
auto in_dims = in0->dims();
|
||||
auto expand_times = get_expand_times(context);
|
||||
PADDLE_ENFORCE_EQ(
|
||||
static_cast<size_t>(in_dims.size()), expand_times.size(),
|
||||
platform::errors::InvalidArgument(
|
||||
"The number of elements (%d) of 'expand_times' for "
|
||||
"Op(expand) must be equal to the number "
|
||||
"of dimensions (%d) of the input.",
|
||||
expand_times.size(), static_cast<size_t>(in_dims.size())));
|
||||
auto* out0 = context.Output<framework::LoDTensor>("Out");
|
||||
framework::DDim out_dims(in_dims);
|
||||
for (size_t i = 0; i < expand_times.size(); ++i) {
|
||||
out_dims[i] *= expand_times[i];
|
||||
}
|
||||
out0->Resize(out_dims);
|
||||
out0->mutable_data<T>(context.device_context().GetPlace());
|
||||
auto runner = NpuOpRunner("TileD", {*in0}, {*out0}, {{"multiples", expand_times}});
|
||||
auto stream =
|
||||
context.template device_context<paddle::platform::NPUDeviceContext>()
|
||||
.stream();
|
||||
runner.Run(stream);
|
||||
}
|
||||
};
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
namespace ops = paddle::operators;
|
||||
REGISTER_OP_NPU_KERNEL(
|
||||
expand, ops::ExpandNPUKernel<paddle::platform::NPUDeviceContext, float>,
|
||||
ops::ExpandNPUKernel<paddle::platform::NPUDeviceContext,
|
||||
paddle::platform::float16>);
|
||||
|
||||
#endif
|
@ -0,0 +1,74 @@
|
||||
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#ifndef _WIN32
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <thread> // NOLINT
|
||||
#include <vector>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "paddle/fluid/framework/op_registry.h"
|
||||
#include "paddle/fluid/framework/operator.h"
|
||||
#include "paddle/fluid/framework/program_desc.h"
|
||||
#include "paddle/fluid/operators/dropout_op.h"
|
||||
#include "paddle/fluid/operators/math/math_function.h"
|
||||
#include "paddle/fluid/string/printf.h"
|
||||
|
||||
namespace f = paddle::framework;
|
||||
namespace p = paddle::platform;
|
||||
namespace m = paddle::operators::math;
|
||||
|
||||
USE_OP(expand);
|
||||
USE_OP_DEVICE_KERNEL(expand, NPU);
|
||||
|
||||
template <typename T>
|
||||
void Compare(f::Scope* scope, const p::DeviceContext& ctx) {
|
||||
// init
|
||||
auto in = scope->Var("X");
|
||||
auto expand_times = scope->Var("ExpandTimes");
|
||||
auto out = scope->Var("Out");
|
||||
auto in_t = in->GetMutable<f::LoDTensor>();
|
||||
auto out_t = out->GetMutable<f::LoDTensor>();
|
||||
auto expand_times_t = expand_times->GetMutable<f::LoDTensor>();
|
||||
|
||||
auto place = ctx.GetPlace();
|
||||
TensorFromVector(std::vector<T>(3 * 1 * 7, 1), ctx, in_t);
|
||||
TensorFromVector(std::vector<int>({1, 10, 1}), ctx, expand_times_t);
|
||||
|
||||
in_t->Resize(f::make_ddim({3, 1, 7}));
|
||||
expand_times_t->Resize(f::make_ddim({3}));
|
||||
out_t->Resize(f::make_ddim({3, 10, 7}));
|
||||
out_t->mutable_data<T>(place);
|
||||
|
||||
f::AttributeMap attrs = {{}};
|
||||
auto op = f::OpRegistry::CreateOp(
|
||||
"expand", {{"X", {"X"}}, {"ExpandTimes", {"ExpandTimes"}}},
|
||||
{{"Out", {"Out"}}}, attrs);
|
||||
op->Run(*scope, place);
|
||||
ctx.Wait();
|
||||
|
||||
auto out_dim = out_t->dims();
|
||||
EXPECT_EQ(out_dim.at(0), 3);
|
||||
EXPECT_EQ(out_dim.at(1), 10);
|
||||
EXPECT_EQ(out_dim.at(2), 7);
|
||||
}
|
||||
|
||||
TEST(expand, NPU_fp32) {
|
||||
f::Scope scope;
|
||||
p::NPUDeviceContext ctx(p::NPUPlace(0));
|
||||
Compare<float>(&scope, ctx);
|
||||
}
|
@ -0,0 +1,141 @@
|
||||
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import unittest
|
||||
import sys
|
||||
sys.path.append("..")
|
||||
from op_test import OpTest
|
||||
import paddle
|
||||
import paddle.fluid as fluid
|
||||
|
||||
paddle.enable_static()
|
||||
SEED = 2021
|
||||
|
||||
|
||||
@unittest.skipIf(not paddle.is_compiled_with_npu(),
|
||||
"core is not compiled with NPU")
|
||||
class TestExpand(OpTest):
|
||||
def setUp(self):
|
||||
self.set_npu()
|
||||
self.op_type = "expand"
|
||||
self.place = paddle.NPUPlace(0)
|
||||
|
||||
self.init_dtype()
|
||||
np.random.seed(SEED)
|
||||
x = np.random.randn(3,1,7).astype(self.dtype)
|
||||
out = np.tile(x, [1,10,1])
|
||||
|
||||
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
|
||||
self.attrs = {'expand_times': [1,10,1]}
|
||||
self.outputs = {'Out': out}
|
||||
|
||||
def set_npu(self):
|
||||
self.__class__.use_npu = True
|
||||
|
||||
def init_dtype(self):
|
||||
self.dtype = np.float32
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output_with_place(self.place, check_dygraph=False)
|
||||
|
||||
# TODO(ascendrc): Add grad test
|
||||
# def test_check_grad(self):
|
||||
# if self.dtype == np.float16:
|
||||
# return
|
||||
# self.check_grad(['X'], 'Out')
|
||||
#
|
||||
|
||||
@unittest.skipIf(not paddle.is_compiled_with_npu(),
|
||||
"core is not compiled with NPU")
|
||||
class TestExpandV2(TestExpand):
|
||||
def setUp(self):
|
||||
self.set_npu()
|
||||
self.op_type = "expand"
|
||||
self.place = paddle.NPUPlace(0)
|
||||
|
||||
self.init_dtype()
|
||||
np.random.seed(SEED)
|
||||
x = np.random.randn(3,1,7).astype(self.dtype)
|
||||
out = np.tile(x, [1,10,1])
|
||||
expand_times = np.array([1,10,1]).astype(np.int32)
|
||||
|
||||
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x), 'ExpandTimes': OpTest.np_dtype_to_fluid_dtype(expand_times)}
|
||||
self.attrs = {}
|
||||
self.outputs = {'Out': out}
|
||||
|
||||
|
||||
@unittest.skipIf(not paddle.is_compiled_with_npu(),
|
||||
"core is not compiled with NPU")
|
||||
class TestExpandFp16(TestExpand):
|
||||
no_need_check_grad = True
|
||||
def init_dtype(self):
|
||||
self.dtype = np.float16
|
||||
|
||||
|
||||
@unittest.skipIf(not paddle.is_compiled_with_npu(),
|
||||
"core is not compiled with NPU")
|
||||
class TestExpandNet(unittest.TestCase):
|
||||
def _test(self, run_npu=True):
|
||||
main_prog = paddle.static.Program()
|
||||
startup_prog = paddle.static.Program()
|
||||
main_prog.random_seed = SEED
|
||||
startup_prog.random_seed = SEED
|
||||
np.random.seed(SEED)
|
||||
|
||||
a_np = np.random.random(size=(32, 1)).astype('float32')
|
||||
label_np = np.random.randint(2, size=(32, 1)).astype('int64')
|
||||
|
||||
with paddle.static.program_guard(main_prog, startup_prog):
|
||||
a = paddle.static.data(name="a", shape=[32, 1], dtype='float32')
|
||||
label = paddle.static.data(
|
||||
name="label", shape=[32, 1], dtype='int64')
|
||||
|
||||
res = paddle.fluid.layers.expand(a, [1,32])
|
||||
loss = res.sum()
|
||||
sgd = fluid.optimizer.SGD(learning_rate=0.01)
|
||||
sgd.minimize(loss)
|
||||
|
||||
if run_npu:
|
||||
place = paddle.NPUPlace(0)
|
||||
else:
|
||||
place = paddle.CPUPlace()
|
||||
|
||||
exe = paddle.static.Executor(place)
|
||||
exe.run(startup_prog)
|
||||
|
||||
for epoch in range(100):
|
||||
|
||||
loss_res = exe.run(
|
||||
main_prog,
|
||||
feed={"a": a_np,
|
||||
"label": label_np},
|
||||
fetch_list=[loss])
|
||||
if epoch % 10 == 0:
|
||||
print("Epoch {} | Loss: {}".format(epoch, loss))
|
||||
|
||||
return loss_res
|
||||
|
||||
def test_npu(self):
|
||||
cpu_loss = self._test(False)
|
||||
npu_loss = self._test(True)
|
||||
|
||||
self.assertTrue(np.allclose(npu_loss, cpu_loss))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
Loading…
Reference in new issue