!11213 Add dynamic shape support to GPU ReduceMean

From: @TFbunny
Reviewed-by: @tom__chen,@robingrosman
Signed-off-by: @robingrosman
pull/11213/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 6d0c0157aa

@ -265,7 +265,7 @@ AbstractBasePtr InferImplSub(const AnalysisEnginePtr &, const PrimitivePtr &prim
const AbstractBasePtrList &args_spec_list); const AbstractBasePtrList &args_spec_list);
AbstractBasePtr InferImplEqual(const AnalysisEnginePtr &, const PrimitivePtr &primitive, AbstractBasePtr InferImplEqual(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list); const AbstractBasePtrList &args_spec_list);
AbstractBasePtr InferImplReduceSum(const AnalysisEnginePtr &, const PrimitivePtr &primitive, AbstractBasePtr InferImplReduceFunc(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list); const AbstractBasePtrList &args_spec_list);
AbstractBasePtr InferImplCast(const AnalysisEnginePtr &, const PrimitivePtr &primitive, AbstractBasePtr InferImplCast(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list); const AbstractBasePtrList &args_spec_list);

@ -121,7 +121,8 @@ AbstractBasePtr InferImplEqual(const AnalysisEnginePtr &, const PrimitivePtr &pr
return ret; return ret;
} }
AbstractBasePtr InferImplReduceSum(const AnalysisEnginePtr &, const PrimitivePtr &primitive, // To reduce code repeat, use InferImplReduceFunc. Currently registered with ReduceMean, ReduceSum.
AbstractBasePtr InferImplReduceFunc(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) { const AbstractBasePtrList &args_spec_list) {
const std::string op_name = primitive->name(); const std::string op_name = primitive->name();
CheckArgsSize(op_name, args_spec_list, 1); CheckArgsSize(op_name, args_spec_list, 1);

@ -44,7 +44,8 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() {
{prim::kPrimSqrtGrad, {InferImplSqrtGrad, true}}, {prim::kPrimSqrtGrad, {InferImplSqrtGrad, true}},
{prim::kPrimSub, {InferImplSub, true}}, {prim::kPrimSub, {InferImplSub, true}},
{prim::kPrimEqual, {InferImplEqual, true}}, {prim::kPrimEqual, {InferImplEqual, true}},
{prim::kPrimReduceSum, {InferImplReduceSum, true}}, {prim::kPrimReduceSum, {InferImplReduceFunc, true}},
{prim::kPrimReduceMean, {InferImplReduceFunc, true}},
{prim::kPrimMinimum, {InferImplMinimum, true}}, {prim::kPrimMinimum, {InferImplMinimum, true}},
{prim::kPrimDivNoNan, {InferImplDivNoNan, true}}, {prim::kPrimDivNoNan, {InferImplDivNoNan, true}},
{prim::kPrimLinSpace, {InferImplLinSpace, true}}, {prim::kPrimLinSpace, {InferImplLinSpace, true}},

@ -21,6 +21,7 @@ import mindspore.nn as nn
from mindspore import Tensor from mindspore import Tensor
from mindspore.common.api import ms_function from mindspore.common.api import ms_function
from mindspore.ops import operations as P from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner
x0 = np.random.rand(2, 3, 4, 4).astype(np.float32) x0 = np.random.rand(2, 3, 4, 4).astype(np.float32)
axis0 = 3 axis0 = 3
@ -265,3 +266,50 @@ def test_ReduceMean():
error14 = np.ones(shape=expect14.shape) * 1.0e-5 error14 = np.ones(shape=expect14.shape) * 1.0e-5
assert np.all(diff14 < error14) assert np.all(diff14 < error14)
assert output[14].shape == expect14.shape assert output[14].shape == expect14.shape
class ReduceMean_Dynamic(nn.Cell):
def __init__(self, keepdims=False):
super(ReduceMean_Dynamic, self).__init__()
self.test_dynamic = inner.GpuConvertToDynamicShape()
self.reducemean = P.ReduceMean(keep_dims=keepdims)
def construct(self, input_x, axis):
input_x = self.test_dynamic(input_x)
output = self.reducemean(input_x, axis)
return output
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_reducemean_keepdims_true():
net = ReduceMean_Dynamic(keepdims=True)
x_tensor_1 = Tensor(x14)
output_1 = net(x_tensor_1, axis14)
x_tensor_2 = Tensor(x0)
output_2 = net(x_tensor_2, axis0)
expect_1 = np.mean(x14, axis=np_axis14, keepdims=True)
diff_1 = abs(output_1.asnumpy() - expect_1)
error_1 = np.ones(shape=expect_1.shape) * 1.0e-5
assert np.all(diff_1 < error_1)
assert output_1.shape == expect_1.shape
expect_2 = np.mean(x0, axis=axis0, keepdims=True)
diff_2 = abs(output_2.asnumpy() - expect_2)
error_2 = np.ones(shape=expect_2.shape) * 1.0e-5
assert np.all(diff_2 < error_2)
assert output_2.shape == expect_2.shape
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_reducemean_keepdims_false():
net = ReduceMean_Dynamic(keepdims=False)
x_tensor = Tensor(x12)
output = net(x_tensor, axis12)
expect = np.mean(x12, axis=axis12, keepdims=False)
diff = abs(output.asnumpy() - expect)
error = np.ones(shape=expect.shape) * 1.0e-5
assert np.all(diff < error)
assert output.shape == expect.shape

Loading…
Cancel
Save