From d0b2f17445f21454ad4f78df3c86d2621572d904 Mon Sep 17 00:00:00 2001 From: TFbunny Date: Tue, 12 Jan 2021 16:19:01 -0500 Subject: [PATCH] add dynamic shape to GPU ReduceMean with testcases --- mindspore/core/abstract/infer_functions.h | 4 +- mindspore/core/abstract/prim_maths.cc | 5 +- .../core/abstract/primitive_infer_map.cc | 3 +- tests/st/ops/gpu/test_reduce_mean_op.py | 48 +++++++++++++++++++ 4 files changed, 55 insertions(+), 5 deletions(-) diff --git a/mindspore/core/abstract/infer_functions.h b/mindspore/core/abstract/infer_functions.h index c183b78a38..2805bad823 100644 --- a/mindspore/core/abstract/infer_functions.h +++ b/mindspore/core/abstract/infer_functions.h @@ -263,8 +263,8 @@ AbstractBasePtr InferImplSub(const AnalysisEnginePtr &, const PrimitivePtr &prim const AbstractBasePtrList &args_spec_list); AbstractBasePtr InferImplEqual(const AnalysisEnginePtr &, const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list); -AbstractBasePtr InferImplReduceSum(const AnalysisEnginePtr &, const PrimitivePtr &primitive, - const AbstractBasePtrList &args_spec_list); +AbstractBasePtr InferImplReduceFunc(const AnalysisEnginePtr &, const PrimitivePtr &primitive, + const AbstractBasePtrList &args_spec_list); AbstractBasePtr InferImplCast(const AnalysisEnginePtr &, const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list); AbstractBasePtr InferImplMinimum(const AnalysisEnginePtr &, const PrimitivePtr &primitive, diff --git a/mindspore/core/abstract/prim_maths.cc b/mindspore/core/abstract/prim_maths.cc index 1ee95a2ea3..4931a61e62 100644 --- a/mindspore/core/abstract/prim_maths.cc +++ b/mindspore/core/abstract/prim_maths.cc @@ -121,8 +121,9 @@ AbstractBasePtr InferImplEqual(const AnalysisEnginePtr &, const PrimitivePtr &pr return ret; } -AbstractBasePtr InferImplReduceSum(const AnalysisEnginePtr &, const PrimitivePtr &primitive, - const AbstractBasePtrList &args_spec_list) { +// To reduce code repeat, use InferImplReduceFunc. Currently registered with ReduceMean, ReduceSum. +AbstractBasePtr InferImplReduceFunc(const AnalysisEnginePtr &, const PrimitivePtr &primitive, + const AbstractBasePtrList &args_spec_list) { const std::string op_name = primitive->name(); CheckArgsSize(op_name, args_spec_list, 1); auto input_x = CheckArg(op_name, args_spec_list, 0); diff --git a/mindspore/core/abstract/primitive_infer_map.cc b/mindspore/core/abstract/primitive_infer_map.cc index b739f18eca..8028383735 100644 --- a/mindspore/core/abstract/primitive_infer_map.cc +++ b/mindspore/core/abstract/primitive_infer_map.cc @@ -44,7 +44,8 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() { {prim::kPrimSqrtGrad, {InferImplSqrtGrad, true}}, {prim::kPrimSub, {InferImplSub, true}}, {prim::kPrimEqual, {InferImplEqual, true}}, - {prim::kPrimReduceSum, {InferImplReduceSum, true}}, + {prim::kPrimReduceSum, {InferImplReduceFunc, true}}, + {prim::kPrimReduceMean, {InferImplReduceFunc, true}}, {prim::kPrimMinimum, {InferImplMinimum, true}}, {prim::kPrimDivNoNan, {InferImplDivNoNan, true}}, {prim::kPrimLinSpace, {InferImplLinSpace, true}}, diff --git a/tests/st/ops/gpu/test_reduce_mean_op.py b/tests/st/ops/gpu/test_reduce_mean_op.py index 7033449b5a..850148a950 100644 --- a/tests/st/ops/gpu/test_reduce_mean_op.py +++ b/tests/st/ops/gpu/test_reduce_mean_op.py @@ -21,6 +21,7 @@ import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function from mindspore.ops import operations as P +from mindspore.ops.operations import _inner_ops as inner x0 = np.random.rand(2, 3, 4, 4).astype(np.float32) axis0 = 3 @@ -265,3 +266,50 @@ def test_ReduceMean(): error14 = np.ones(shape=expect14.shape) * 1.0e-5 assert np.all(diff14 < error14) assert output[14].shape == expect14.shape + +class ReduceMean_Dynamic(nn.Cell): + def __init__(self, keepdims=False): + super(ReduceMean_Dynamic, self).__init__() + self.test_dynamic = inner.GpuConvertToDynamicShape() + self.reducemean = P.ReduceMean(keep_dims=keepdims) + + def construct(self, input_x, axis): + input_x = self.test_dynamic(input_x) + output = self.reducemean(input_x, axis) + return output + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_dynamic_reducemean_keepdims_true(): + net = ReduceMean_Dynamic(keepdims=True) + x_tensor_1 = Tensor(x14) + output_1 = net(x_tensor_1, axis14) + x_tensor_2 = Tensor(x0) + output_2 = net(x_tensor_2, axis0) + + expect_1 = np.mean(x14, axis=np_axis14, keepdims=True) + diff_1 = abs(output_1.asnumpy() - expect_1) + error_1 = np.ones(shape=expect_1.shape) * 1.0e-5 + assert np.all(diff_1 < error_1) + assert output_1.shape == expect_1.shape + + expect_2 = np.mean(x0, axis=axis0, keepdims=True) + diff_2 = abs(output_2.asnumpy() - expect_2) + error_2 = np.ones(shape=expect_2.shape) * 1.0e-5 + assert np.all(diff_2 < error_2) + assert output_2.shape == expect_2.shape + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_dynamic_reducemean_keepdims_false(): + net = ReduceMean_Dynamic(keepdims=False) + x_tensor = Tensor(x12) + output = net(x_tensor, axis12) + + expect = np.mean(x12, axis=axis12, keepdims=False) + diff = abs(output.asnumpy() - expect) + error = np.ones(shape=expect.shape) * 1.0e-5 + assert np.all(diff < error) + assert output.shape == expect.shape