ExpandDims dynamic shape

pull/9040/head
jonwe 4 years ago
parent 3874160faf
commit 36ea519009

@ -647,5 +647,61 @@ AbstractBasePtr InferImplReshape(const AnalysisEnginePtr &, const PrimitivePtr &
std::make_shared<AbstractTensor>(x->element(), std::make_shared<Shape>(shape, min_shape, max_shape));
return ret;
}
AbstractBasePtr InferImplExpandDims(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) {
const std::string op_name = primitive->name();
CheckArgsSize(op_name, args_spec_list, 2);
auto x = CheckArg<AbstractTensor>(op_name, args_spec_list, 0);
MS_EXCEPTION_IF_NULL(x);
MS_EXCEPTION_IF_NULL(x->shape());
ShapeVector x_shape = x->shape()->shape();
ShapeVector x_shape_min = x->shape()->min_shape();
if (x_shape_min.empty()) {
x_shape_min = x_shape;
}
ShapeVector x_shape_max = x->shape()->max_shape();
if (x_shape_max.empty()) {
x_shape_max = x_shape;
}
int64_t value = 0;
if (args_spec_list[1]->isa<AbstractTensor>()) { // axis is Tensor
auto axis = CheckArg<AbstractTensor>(op_name, args_spec_list, 1);
auto axis_value = axis->BuildValue();
if (!axis_value->isa<tensor::Tensor>()) {
MS_LOG(EXCEPTION) << axis_value << " axis_value should be tensor, but got " << axis_value->type_name();
}
auto axis_tensor = axis_value->cast<tensor::TensorPtr>();
value = *(static_cast<int64_t *>(axis_tensor->data_c()));
} else if (args_spec_list[1]->isa<AbstractScalar>()) { // axis is Scalar
auto axis = CheckArg<AbstractScalar>(op_name, args_spec_list, 1);
MS_EXCEPTION_IF_NULL(axis);
value = GetValue<int64_t>(axis->BuildValue());
} else {
MS_LOG(EXCEPTION) << "axis incorrect type in ExpandDims";
}
if (value < -(SizeToInt(x_shape.size()) + 1) || value > SizeToInt(x_shape.size())) {
MS_LOG(EXCEPTION) << " axis value shoud be in range [-intput_x.dim-1,input_x.dim], but axis value is" << value
<< " and input_x.dim is" << x_shape.size();
}
if (value < 0) {
value = value + SizeToInt(x_shape.size()) + 1;
}
ShapeVector shape;
shape.insert(shape.end(), x_shape.begin(), x_shape.end());
shape.insert(shape.begin() + value, 1);
ShapeVector shape_min;
shape_min.insert(shape_min.end(), x_shape_min.begin(), x_shape_min.end());
shape_min.insert(shape_min.begin() + value, 1);
ShapeVector shape_max;
shape_max.insert(shape_max.end(), x_shape_max.begin(), x_shape_max.end());
shape_max.insert(shape_max.begin() + value, 1);
auto ret = std::make_shared<AbstractTensor>(x->element(), std::make_shared<Shape>(shape, shape_min, shape_max));
return ret;
}
} // namespace abstract
} // namespace mindspore

@ -492,32 +492,6 @@ AbstractBasePtr InferImplCast(const AnalysisEnginePtr &, const PrimitivePtr &pri
return ret;
}
AbstractBasePtr InferImplExpandDims(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) {
const std::string op_name = primitive->name();
CheckArgsSize(op_name, args_spec_list, 1);
auto x = CheckArg<AbstractTensor>(op_name, args_spec_list, 0);
MS_EXCEPTION_IF_NULL(x);
MS_EXCEPTION_IF_NULL(x->shape());
std::vector<int64_t> shape;
std::vector<int64_t> x_shape = x->shape()->shape();
shape.insert(shape.end(), x_shape.begin(), x_shape.end());
auto axis = primitive->GetAttr("axis");
auto value = GetValue<int64_t>(axis);
if (value < -(SizeToInt(x_shape.size()) + 1) || value > SizeToInt(x_shape.size())) {
MS_LOG(EXCEPTION) << " axis value shoud be in range [-intput_x.dim-1,input_x.dim], but axis value is" << value
<< " and input_x.dim is" << x_shape.size();
}
if (value < 0) {
value = value + SizeToInt(x_shape.size()) + 1;
}
shape.insert(shape.begin() + value, 1);
auto ret = std::make_shared<AbstractTensor>(x->element(), std::make_shared<Shape>(shape));
return ret;
}
AbstractBasePtr InferImplGpuConvertToDynamicShape(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) {
const std::string &op_name = primitive->name();

@ -122,7 +122,7 @@ def _check_infer_attr_reduce(axis, keep_dims, prim_name):
validator.check_value_type('axis[%d]' % index, value, [int], prim_name)
class ExpandDims(PrimitiveWithInfer):
class ExpandDims(PrimitiveWithCheck):
"""
Adds an additional dimension at the given axis.
@ -160,29 +160,13 @@ class ExpandDims(PrimitiveWithInfer):
"""Initialize ExpandDims"""
self.init_prim_io_names(inputs=['x', 'axis'], outputs=['output'])
def __infer__(self, x, axis):
validator.check_subclass("input_x", x['dtype'], mstype.tensor, self.name)
def __check__(self, x, axis):
validator.check_subclass("x", x['dtype'], mstype.tensor, self.name)
validator.check_subclass("axis", axis['dtype'], mstype.int_, self.name)
x_shape = list(x['shape'])
axis_v = axis['value']
rank = len(x_shape)
validator.check_int_range(axis_v, -rank - 1, rank, Rel.INC_BOTH, 'axis', self.name)
value = None
if x['value'] is not None:
value = x['value'].asnumpy()
value = np.expand_dims(value, axis_v)
value = Tensor(value)
if axis_v < 0:
axis_v = rank + 1 + axis_v
x_shape.insert(axis_v, 1)
out = {'shape': x_shape,
'dtype': x['dtype'],
'value': value}
if 'min_shape' in x and 'max_shape' in x:
out['min_shape'] = x['min_shape']
out['min_shape'].insert(axis_v, 1)
out['max_shape'] = x['max_shape']
out['max_shape'].insert(axis_v, 1)
return out
class DType(PrimitiveWithInfer):

@ -0,0 +1,141 @@
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.expand_dims = P.ExpandDims()
def construct(self, tensor):
return self.expand_dims(tensor, -1)
class NetDynamic(nn.Cell):
def __init__(self):
super(NetDynamic, self).__init__()
self.conv = inner.GpuConvertToDynamicShape()
self.expand_dims = P.ExpandDims()
def construct(self, x):
x_conv = self.conv(x)
return self.expand_dims(x_conv, -1)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_bool():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
x = np.random.randn(1, 16, 1, 1).astype(np.bool)
net = NetDynamic()
output = net(Tensor(x))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_int8():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
x = np.random.randn(1, 16, 1, 1).astype(np.int8)
net = NetDynamic()
output = net(Tensor(x))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_uint8():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
x = np.random.randn(1, 16, 1, 1).astype(np.uint8)
net = Net()
output = net(Tensor(x))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_int16():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
x = np.random.randn(1, 16, 1, 1).astype(np.int16)
net = Net()
output = net(Tensor(x))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_int32():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
x = np.random.randn(1, 16, 1, 1).astype(np.int32)
net = Net()
output = net(Tensor(x))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_int64():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
x = np.random.randn(1, 16, 1, 1).astype(np.int64)
net = Net()
output = net(Tensor(x))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_float16():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
x = np.random.randn(1, 16, 1, 1).astype(np.float16)
net = Net()
output = net(Tensor(x))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_float32():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
x = np.random.randn(1, 16, 1, 1).astype(np.float32)
net = Net()
output = net(Tensor(x))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_float64():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
x = np.random.randn(1, 16, 1, 1).astype(np.float64)
net = Net()
output = net(Tensor(x))
assert np.all(output.asnumpy() == np.expand_dims(x, -1))
Loading…
Cancel
Save