From 36ea519009a3c97238faa2e0e40379b8099a6c1a Mon Sep 17 00:00:00 2001 From: jonwe Date: Fri, 27 Nov 2020 06:20:29 -0500 Subject: [PATCH] ExpandDims dynamic shape --- mindspore/core/abstract/prim_arrays.cc | 56 ++++++++++ mindspore/core/abstract/prim_others.cc | 26 ----- mindspore/ops/operations/array_ops.py | 24 +---- tests/st/ops/gpu/test_expand_dims.py | 141 +++++++++++++++++++++++++ 4 files changed, 201 insertions(+), 46 deletions(-) create mode 100644 tests/st/ops/gpu/test_expand_dims.py diff --git a/mindspore/core/abstract/prim_arrays.cc b/mindspore/core/abstract/prim_arrays.cc index fa90104f29..f3b12bdf4c 100644 --- a/mindspore/core/abstract/prim_arrays.cc +++ b/mindspore/core/abstract/prim_arrays.cc @@ -647,5 +647,61 @@ AbstractBasePtr InferImplReshape(const AnalysisEnginePtr &, const PrimitivePtr & std::make_shared(x->element(), std::make_shared(shape, min_shape, max_shape)); return ret; } + +AbstractBasePtr InferImplExpandDims(const AnalysisEnginePtr &, const PrimitivePtr &primitive, + const AbstractBasePtrList &args_spec_list) { + const std::string op_name = primitive->name(); + CheckArgsSize(op_name, args_spec_list, 2); + auto x = CheckArg(op_name, args_spec_list, 0); + MS_EXCEPTION_IF_NULL(x); + MS_EXCEPTION_IF_NULL(x->shape()); + + ShapeVector x_shape = x->shape()->shape(); + ShapeVector x_shape_min = x->shape()->min_shape(); + if (x_shape_min.empty()) { + x_shape_min = x_shape; + } + ShapeVector x_shape_max = x->shape()->max_shape(); + if (x_shape_max.empty()) { + x_shape_max = x_shape; + } + + int64_t value = 0; + if (args_spec_list[1]->isa()) { // axis is Tensor + auto axis = CheckArg(op_name, args_spec_list, 1); + auto axis_value = axis->BuildValue(); + if (!axis_value->isa()) { + MS_LOG(EXCEPTION) << axis_value << " axis_value should be tensor, but got " << axis_value->type_name(); + } + auto axis_tensor = axis_value->cast(); + value = *(static_cast(axis_tensor->data_c())); + } else if (args_spec_list[1]->isa()) { // axis is Scalar + auto axis = CheckArg(op_name, args_spec_list, 1); + MS_EXCEPTION_IF_NULL(axis); + value = GetValue(axis->BuildValue()); + } else { + MS_LOG(EXCEPTION) << "axis incorrect type in ExpandDims"; + } + + if (value < -(SizeToInt(x_shape.size()) + 1) || value > SizeToInt(x_shape.size())) { + MS_LOG(EXCEPTION) << " axis value shoud be in range [-intput_x.dim-1,input_x.dim], but axis value is" << value + << " and input_x.dim is" << x_shape.size(); + } + if (value < 0) { + value = value + SizeToInt(x_shape.size()) + 1; + } + ShapeVector shape; + shape.insert(shape.end(), x_shape.begin(), x_shape.end()); + shape.insert(shape.begin() + value, 1); + ShapeVector shape_min; + shape_min.insert(shape_min.end(), x_shape_min.begin(), x_shape_min.end()); + shape_min.insert(shape_min.begin() + value, 1); + ShapeVector shape_max; + shape_max.insert(shape_max.end(), x_shape_max.begin(), x_shape_max.end()); + shape_max.insert(shape_max.begin() + value, 1); + + auto ret = std::make_shared(x->element(), std::make_shared(shape, shape_min, shape_max)); + return ret; +} } // namespace abstract } // namespace mindspore diff --git a/mindspore/core/abstract/prim_others.cc b/mindspore/core/abstract/prim_others.cc index 46a30b3e6c..8726d36b88 100644 --- a/mindspore/core/abstract/prim_others.cc +++ b/mindspore/core/abstract/prim_others.cc @@ -492,32 +492,6 @@ AbstractBasePtr InferImplCast(const AnalysisEnginePtr &, const PrimitivePtr &pri return ret; } -AbstractBasePtr InferImplExpandDims(const AnalysisEnginePtr &, const PrimitivePtr &primitive, - const AbstractBasePtrList &args_spec_list) { - const std::string op_name = primitive->name(); - CheckArgsSize(op_name, args_spec_list, 1); - auto x = CheckArg(op_name, args_spec_list, 0); - MS_EXCEPTION_IF_NULL(x); - MS_EXCEPTION_IF_NULL(x->shape()); - - std::vector shape; - std::vector x_shape = x->shape()->shape(); - shape.insert(shape.end(), x_shape.begin(), x_shape.end()); - auto axis = primitive->GetAttr("axis"); - auto value = GetValue(axis); - if (value < -(SizeToInt(x_shape.size()) + 1) || value > SizeToInt(x_shape.size())) { - MS_LOG(EXCEPTION) << " axis value shoud be in range [-intput_x.dim-1,input_x.dim], but axis value is" << value - << " and input_x.dim is" << x_shape.size(); - } - if (value < 0) { - value = value + SizeToInt(x_shape.size()) + 1; - } - shape.insert(shape.begin() + value, 1); - - auto ret = std::make_shared(x->element(), std::make_shared(shape)); - return ret; -} - AbstractBasePtr InferImplGpuConvertToDynamicShape(const AnalysisEnginePtr &, const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list) { const std::string &op_name = primitive->name(); diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index a6b88ef85a..53181da8ab 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -122,7 +122,7 @@ def _check_infer_attr_reduce(axis, keep_dims, prim_name): validator.check_value_type('axis[%d]' % index, value, [int], prim_name) -class ExpandDims(PrimitiveWithInfer): +class ExpandDims(PrimitiveWithCheck): """ Adds an additional dimension at the given axis. @@ -160,29 +160,13 @@ class ExpandDims(PrimitiveWithInfer): """Initialize ExpandDims""" self.init_prim_io_names(inputs=['x', 'axis'], outputs=['output']) - def __infer__(self, x, axis): - validator.check_subclass("input_x", x['dtype'], mstype.tensor, self.name) + def __check__(self, x, axis): + validator.check_subclass("x", x['dtype'], mstype.tensor, self.name) + validator.check_subclass("axis", axis['dtype'], mstype.int_, self.name) x_shape = list(x['shape']) axis_v = axis['value'] rank = len(x_shape) validator.check_int_range(axis_v, -rank - 1, rank, Rel.INC_BOTH, 'axis', self.name) - value = None - if x['value'] is not None: - value = x['value'].asnumpy() - value = np.expand_dims(value, axis_v) - value = Tensor(value) - if axis_v < 0: - axis_v = rank + 1 + axis_v - x_shape.insert(axis_v, 1) - out = {'shape': x_shape, - 'dtype': x['dtype'], - 'value': value} - if 'min_shape' in x and 'max_shape' in x: - out['min_shape'] = x['min_shape'] - out['min_shape'].insert(axis_v, 1) - out['max_shape'] = x['max_shape'] - out['max_shape'].insert(axis_v, 1) - return out class DType(PrimitiveWithInfer): diff --git a/tests/st/ops/gpu/test_expand_dims.py b/tests/st/ops/gpu/test_expand_dims.py new file mode 100644 index 0000000000..1b054d6c33 --- /dev/null +++ b/tests/st/ops/gpu/test_expand_dims.py @@ -0,0 +1,141 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import numpy as np +import pytest + +import mindspore.context as context +import mindspore.nn as nn +from mindspore import Tensor +from mindspore.ops import operations as P +from mindspore.ops.operations import _inner_ops as inner + + +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.expand_dims = P.ExpandDims() + + def construct(self, tensor): + return self.expand_dims(tensor, -1) + + +class NetDynamic(nn.Cell): + def __init__(self): + super(NetDynamic, self).__init__() + self.conv = inner.GpuConvertToDynamicShape() + self.expand_dims = P.ExpandDims() + + def construct(self, x): + x_conv = self.conv(x) + return self.expand_dims(x_conv, -1) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_net_bool(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + x = np.random.randn(1, 16, 1, 1).astype(np.bool) + net = NetDynamic() + output = net(Tensor(x)) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_net_int8(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + x = np.random.randn(1, 16, 1, 1).astype(np.int8) + net = NetDynamic() + output = net(Tensor(x)) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_net_uint8(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + x = np.random.randn(1, 16, 1, 1).astype(np.uint8) + net = Net() + output = net(Tensor(x)) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_net_int16(): + context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") + x = np.random.randn(1, 16, 1, 1).astype(np.int16) + net = Net() + output = net(Tensor(x)) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_net_int32(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + x = np.random.randn(1, 16, 1, 1).astype(np.int32) + net = Net() + output = net(Tensor(x)) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_net_int64(): + context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") + x = np.random.randn(1, 16, 1, 1).astype(np.int64) + net = Net() + output = net(Tensor(x)) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_net_float16(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + x = np.random.randn(1, 16, 1, 1).astype(np.float16) + net = Net() + output = net(Tensor(x)) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_net_float32(): + context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") + x = np.random.randn(1, 16, 1, 1).astype(np.float32) + net = Net() + output = net(Tensor(x)) + assert np.all(output.asnumpy() == np.expand_dims(x, -1)) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_net_float64(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + x = np.random.randn(1, 16, 1, 1).astype(np.float64) + net = Net() + output = net(Tensor(x)) + assert np.all(output.asnumpy() == np.expand_dims(x, -1))