change tensor dtype and shape from function to attr

pull/1919/head
buxue 5 years ago
parent 553432c968
commit 66bbdb4a31

@ -79,12 +79,12 @@ if __name__ == '__main__':
for _, cell in net.cells_and_names():
if isinstance(cell, nn.Conv2d):
cell.weight.default_input = weight_init.initializer(weight_init.XavierUniform(),
cell.weight.default_input.shape(),
cell.weight.default_input.dtype()).to_tensor()
cell.weight.default_input.shape,
cell.weight.default_input.dtype).to_tensor()
if isinstance(cell, nn.Dense):
cell.weight.default_input = weight_init.initializer(weight_init.TruncatedNormal(),
cell.weight.default_input.shape(),
cell.weight.default_input.dtype()).to_tensor()
cell.weight.default_input.shape,
cell.weight.default_input.dtype).to_tensor()
if not config.use_label_smooth:
config.label_smooth_factor = 0.0

@ -338,15 +338,15 @@ class Dense_Thor(Cell):
self.has_bias = check_bool(has_bias)
self.thor = True
if isinstance(weight_init, Tensor):
if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
weight_init.shape()[1] != in_channels:
if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
weight_init.shape[1] != in_channels:
raise ValueError("weight_init shape error")
self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")
if self.has_bias:
if isinstance(bias_init, Tensor):
if bias_init.dim() != 1 or bias_init.shape()[0] != out_channels:
if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
raise ValueError("bias_init shape error")
self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias")

@ -56,7 +56,7 @@ def init_net_param(network, init_value='ones'):
params = network.trainable_params()
for p in params:
if isinstance(p.data, Tensor) and 'beta' not in p.name and 'gamma' not in p.name and 'bias' not in p.name:
p.set_parameter_data(initializer(init_value, p.data.shape(), p.data.dtype()))
p.set_parameter_data(initializer(init_value, p.data.shape, p.data.dtype))
def main():

@ -384,6 +384,28 @@ REGISTER_PYBIND_DEFINE(Tensor, ([](const py::module *m) {
.def(py::init<py::tuple, TypePtr>(), py::arg("input"), py::arg("dtype") = nullptr)
.def(py::init<Tensor, TypePtr>(), py::arg("input"), py::arg("dtype") = nullptr)
.def_readonly(PYTHON_TENSOR_FLAG, &Tensor::parse_info_)
.def_property_readonly("dtype", &Tensor::Dtype, R"mydelimiter(
Get the tensor's data type.
Returns:
type, the data type of tensor.
Examples:
>>> data = mindspore.Tensor(np.ones((2, 1), np.int32))
>>> data.dtype
Int32
)mydelimiter")
.def_property_readonly("shape", &Tensor::GetPyTupleShape, R"mydelimiter(
Get the tensor's shape.
Returns:
tuple[int], the shape of tensor.
Examples:
>>> data = mindspore.Tensor(np.ones((3, 3)))
>>> data.shape()
(3, 3)
)mydelimiter")
.def("asnumpy", &Tensor::data_sync, R"mydelimiter(
Convert tensor to numpy.ndarray.
@ -437,17 +459,6 @@ REGISTER_PYBIND_DEFINE(Tensor, ([](const py::module *m) {
>>> data.dim()
2
)mydelimiter")
.def("dtype", &Tensor::Dtype, R"mydelimiter(
Get the tensor's data type.
Returns:
type, the data type of tensor.
Examples:
>>> data = mindspore.Tensor(np.ones((2, 1), np.int32))
>>> data.dtype()
Int32
)mydelimiter")
.def("set_dtype", &Tensor::SetDtype, R"mydelimiter(
Set the tensor's data type.
@ -459,17 +470,6 @@ REGISTER_PYBIND_DEFINE(Tensor, ([](const py::module *m) {
>>> data.set_dtype(mindspore.int32)
mindspore.int32
)mydelimiter")
.def("shape", &Tensor::GetPyTupleShape, R"mydelimiter(
Get the tensor's shape.
Returns:
tuple[int], the shape of tensor.
Examples:
>>> data = mindspore.Tensor(np.ones((3, 3)))
>>> data.shape()
(3, 3)
)mydelimiter")
.def("__str__", &Tensor::ToString)
.def("__repr__", &Tensor::ToStringRepr)
.def(py::pickle(
@ -488,8 +488,8 @@ REGISTER_PYBIND_DEFINE(Tensor, ([](const py::module *m) {
(void)py::class_<MetaTensor, std::shared_ptr<MetaTensor>>(*m, "MetaTensor")
.def(py::init<TypePtr, const std::vector<int>>(), py::arg("dtype"), py::arg("shape"))
.def_readonly(PYTHON_META_TENSOR_FLAG, &MetaTensor::parse_info_)
.def("dtype", &MetaTensor::Dtype, "Get the MetaTensor's dtype.")
.def("shape", &MetaTensor::shape, "Get the MetaTensor's shape.");
.def_property_readonly("dtype", &MetaTensor::Dtype, "Get the MetaTensor's dtype.")
.def_property_readonly("shape", &MetaTensor::shape, "Get the MetaTensor's shape.");
}));
} // namespace tensor
} // namespace mindspore

@ -170,8 +170,8 @@ def get_py_obj_dtype(obj):
Type of MindSpore type.
"""
# Tensor
if hasattr(obj, 'dtype') and callable(obj.dtype) and isinstance(obj.dtype(), typing.Type):
return tensor_type(obj.dtype())
if hasattr(obj, 'dtype') and isinstance(obj.dtype, typing.Type):
return tensor_type(obj.dtype)
if hasattr(obj, '__primitive_flag__') or hasattr(obj, 'construct'):
return function
if isinstance(obj, (typing.Type, type)):

@ -331,11 +331,11 @@ def initializer(init, shape=None, dtype=mstype.float32):
raise TypeError("Unsupported init type '{}'.".format(type(init)))
if isinstance(init, Tensor):
init_shape = init.shape()
init_shape = init.shape
shape = shape if isinstance(shape, (tuple, list)) else [shape]
if shape is not None and init_shape != tuple(shape):
raise ValueError("The shape of init should be same as variable shape, but got the shape of init {} and "
"the variable shape {}.".format(list(init.shape()), shape))
"the variable shape {}.".format(list(init.shape), shape))
return init
if isinstance(shape, list):

@ -140,8 +140,8 @@ class Parameter:
x.name = prefix + '.' + x.name
x.is_init = False
if init != 'same':
shape = self.default_input.shape()
dtype = self.default_input.dtype()
shape = self.default_input.shape
dtype = self.default_input.dtype
if isinstance(init, (str, Initializer, numbers.Number)):
x.init_mode = initializer(init, shape=shape, dtype=dtype)
x.default_input = MetaTensor(dtype, shape)

@ -45,13 +45,13 @@ class Tensor(Tensor_):
>>> # init a tensor with input data
>>> t1 = Tensor(np.zeros([1, 2, 3]), mindspore.float32)
>>> assert isinstance(t1, Tensor)
>>> assert t1.shape() == (1, 2, 3)
>>> assert t1.dtype() == mindspore.float32
>>> assert t1.shape == (1, 2, 3)
>>> assert t1.dtype == mindspore.float32
>>>
>>> # init a tensor with a float scalar
>>> t2 = Tensor(0.1)
>>> assert isinstance(t2, Tensor)
>>> assert t2.dtype() == mindspore.float64
>>> assert t2.dtype == mindspore.float64
"""
def __init__(self, input_data, dtype=None):
@ -80,7 +80,7 @@ class Tensor(Tensor_):
return False
# The GE backend don't support single `Equal` operator execution.
# bool type is not supported for `Equal` operator in backend.
if context.get_context("enable_ge") or self.dtype() == mstype.bool_ or other.dtype() == mstype.bool_:
if context.get_context("enable_ge") or self.dtype == mstype.bool_ or other.dtype == mstype.bool_:
return Tensor(np.array(self.asnumpy() == other.asnumpy()))
return tensor_operator_registry.get('__eq__')(self, other)
@ -166,7 +166,7 @@ class Tensor(Tensor_):
return out[0]
def __str__(self):
if self.dtype() == mstype.type_none:
if self.dtype == mstype.type_none:
return "Unknown Tensor type!"
return str(self.asnumpy())

@ -267,21 +267,21 @@ class MobileNetV2(nn.Cell):
if isinstance(m, (nn.Conv2d, DepthwiseConv)):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.set_parameter_data(Tensor(np.random.normal(0, np.sqrt(2. / n),
m.weight.data.shape()).astype("float32")))
m.weight.data.shape).astype("float32")))
if m.bias is not None:
m.bias.set_parameter_data(
Tensor(np.zeros(m.bias.data.shape(), dtype="float32")))
Tensor(np.zeros(m.bias.data.shape, dtype="float32")))
elif isinstance(m, nn.BatchNorm2d):
m.gamma.set_parameter_data(
Tensor(np.ones(m.gamma.data.shape(), dtype="float32")))
Tensor(np.ones(m.gamma.data.shape, dtype="float32")))
m.beta.set_parameter_data(
Tensor(np.zeros(m.beta.data.shape(), dtype="float32")))
Tensor(np.zeros(m.beta.data.shape, dtype="float32")))
elif isinstance(m, nn.Dense):
m.weight.set_parameter_data(Tensor(np.random.normal(
0, 0.01, m.weight.data.shape()).astype("float32")))
0, 0.01, m.weight.data.shape).astype("float32")))
if m.bias is not None:
m.bias.set_parameter_data(
Tensor(np.zeros(m.bias.data.shape(), dtype="float32")))
Tensor(np.zeros(m.bias.data.shape, dtype="float32")))
def mobilenet_v2(**kwargs):

@ -322,21 +322,21 @@ class MobileNetV3(nn.Cell):
if isinstance(m, (nn.Conv2d)):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.set_parameter_data(Tensor(np.random.normal(0, np.sqrt(2. / n),
m.weight.data.shape()).astype("float32")))
m.weight.data.shape).astype("float32")))
if m.bias is not None:
m.bias.set_parameter_data(
Tensor(np.zeros(m.bias.data.shape(), dtype="float32")))
Tensor(np.zeros(m.bias.data.shape, dtype="float32")))
elif isinstance(m, nn.BatchNorm2d):
m.gamma.set_parameter_data(
Tensor(np.ones(m.gamma.data.shape(), dtype="float32")))
Tensor(np.ones(m.gamma.data.shape, dtype="float32")))
m.beta.set_parameter_data(
Tensor(np.zeros(m.beta.data.shape(), dtype="float32")))
Tensor(np.zeros(m.beta.data.shape, dtype="float32")))
elif isinstance(m, nn.Dense):
m.weight.set_parameter_data(Tensor(np.random.normal(
0, 0.01, m.weight.data.shape()).astype("float32")))
0, 0.01, m.weight.data.shape).astype("float32")))
if m.bias is not None:
m.bias.set_parameter_data(
Tensor(np.zeros(m.bias.data.shape(), dtype="float32")))
Tensor(np.zeros(m.bias.data.shape, dtype="float32")))
def mobilenet_v3(model_name, **kwargs):

@ -131,7 +131,7 @@ class Flatten(Cell):
Examples:
>>> net = nn.Flatten()
>>> input = Tensor(np.array([[[1.2, 1.2], [2.1, 2.1]], [[2.2, 2.2], [3.2, 3.2]]]), mindspore.float32)
>>> input.shape()
>>> input.shape
(2, 2, 2)
>>> net(input)
[[1.2 1.2 2.1 2.1]
@ -198,15 +198,15 @@ class Dense(Cell):
self.has_bias = check_bool(has_bias)
if isinstance(weight_init, Tensor):
if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
weight_init.shape()[1] != in_channels:
if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
weight_init.shape[1] != in_channels:
raise ValueError("weight_init shape error")
self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")
if self.has_bias:
if isinstance(bias_init, Tensor):
if bias_init.dim() != 1 or bias_init.shape()[0] != out_channels:
if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
raise ValueError("bias_init shape error")
self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias")

@ -69,7 +69,7 @@ class Conv2d(Cell):
Examples:
>>> net = combined.Conv2d(120, 240, 4, batchnorm=True, activation='ReLU')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> net(input).shape()
>>> net(input).shape
(1, 240, 1024, 640)
"""

@ -168,7 +168,7 @@ class Conv2d(_Conv):
Examples:
>>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> net(input).shape()
>>> net(input).shape
(1, 240, 1024, 640)
"""
@cell_attr_register

@ -56,7 +56,7 @@ class Embedding(Cell):
>>>
>>> # Maps the input word IDs to word embedding.
>>> output = net(input_data)
>>> output.shape()
>>> output.shape
(8, 128, 768)
"""
def __init__(self, vocab_size, embedding_size, use_one_hot=False, embedding_table='normal', dtype=mstype.float32):

@ -474,7 +474,7 @@ class LayerNorm(Cell):
Examples:
>>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32)
>>> shape1 = x.shape()[1:]
>>> shape1 = x.shape[1:]
>>> m = nn.LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1)
>>> m(x)
"""

@ -113,7 +113,7 @@ class MaxPool2d(_PoolNd):
[0. 0. 4. 0.]
[1. 8. 7. 0.]]]]
>>> output = pool(x)
>>> output.shape()
>>> output.shape
(1, 2, 2, 2)
>>> output
[[[[7. 8.]
@ -195,7 +195,7 @@ class AvgPool2d(_PoolNd):
[0. 8. 9. 7.]
[2. 1. 4. 9.]]]]
>>> output = pool(x)
>>> output.shape()
>>> output.shape
(1, 2, 2, 2)
>>> output
[[[[4.888889 4.4444447]
@ -260,7 +260,7 @@ class AvgPool1d(_PoolNd):
>>> pool = nn.AvgPool1d(kernel_size=6, strides=1)
>>> x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32)
>>> output = pool(x)
>>> output.shape()
>>> output.shape
(1, 3, 1)
"""

@ -571,8 +571,8 @@ class DenseQuant(Cell):
self.has_bias = check_bool(has_bias)
if isinstance(weight_init, Tensor):
if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
weight_init.shape()[1] != in_channels:
if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
weight_init.shape[1] != in_channels:
raise ValueError("weight_init shape error")
self.weight = Parameter(initializer(
@ -580,7 +580,7 @@ class DenseQuant(Cell):
if self.has_bias:
if isinstance(bias_init, Tensor):
if bias_init.dim() != 1 or bias_init.shape()[0] != out_channels:
if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
raise ValueError("bias_init shape error")
self.bias = Parameter(initializer(

@ -23,7 +23,7 @@ greater = base.MultitypeFuncGraph("greater")
@greater.register("Number", "Number")
def _greater_scala(x, y):
def _greater_scalar(x, y):
"""
Determine whether two numbers are greater.

@ -145,10 +145,10 @@ class SameTypeShape(PrimitiveWithInfer):
def __call__(self, x, y):
"""run in PyNative mode"""
validator.check_value_type("x", x, Tensor, self.name)
validator.check_value_type("y", y, Tensor, self.name)
validator.check('x dtype', x.dtype(), 'y dtype', y.dtype(), Rel.EQ, self.name, TypeError)
validator.check('x shape', x.shape(), 'y shape', y.shape(), Rel.EQ, self.name)
validator.check_value_type('x', x, Tensor, self.name)
validator.check_value_type('y', y, Tensor, self.name)
validator.check('x dtype', x.dtype, 'y dtype', y.dtype, Rel.EQ, self.name, TypeError)
validator.check('x shape', x.shape, 'y shape', y.shape, Rel.EQ, self.name)
return x
def __infer__(self, x, y):
@ -187,7 +187,7 @@ class Cast(PrimitiveWithInfer):
def check_elim(self, x, dtype):
if isinstance(x, Tensor):
if x.dtype() == dtype:
if x.dtype == dtype:
return (True, x)
return (False, None)
raise ValueError("Expecting (Tensor, dtype), got : {}".format(inputs))
@ -498,7 +498,7 @@ class GatherV2(PrimitiveWithInfer):
The original Tensor.
- **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
Specifies the indices of elements of the original Tensor. Must be in the range
`[0, input_param.shape()[axis])`.
`[0, input_param.shape[axis])`.
- **axis** (int) - Specifies the dimension index to gather indices.
Outputs:
@ -542,7 +542,7 @@ class SparseGatherV2(GatherV2):
The original Tensor.
- **input_indices** (Tensor) - The shape of tensor is :math:`(y_1, y_2, ..., y_S)`.
Specifies the indices of elements of the original Tensor. Must be in the range
`[0, input_param.shape()[axis])`.
`[0, input_param.shape[axis])`.
- **axis** (int) - Specifies the dimension index to gather indices.
Outputs:
@ -700,7 +700,7 @@ class Split(PrimitiveWithInfer):
output_num (int): The number of output tensors. Default: 1.
Raises:
ValueError: If axis is out of the range [-len(input_x.shape()), len(input_x.shape())),
ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)),
or if the output_num is less than or equal to 0, or if the
dimension which to split cannot be evenly divided by output_num.
@ -1644,7 +1644,7 @@ class Unpack(PrimitiveWithInfer):
A tuple of Tensors, the shape of each objects is same.
Raises:
ValueError: If axis is out of the range [-len(input_x.shape()), len(input_x.shape())).
ValueError: If axis is out of the range [-len(input_x.shape), len(input_x.shape)).
Examples:
>>> unpack = P.Unpack()
@ -1850,7 +1850,7 @@ class StridedSlice(PrimitiveWithInfer):
>>> [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
>>> slice = P.StridedSlice()
>>> output = slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
>>> output.shape()
>>> output.shape
(1, 1, 3)
>>> output
[[[3, 3, 3]]]
@ -1974,7 +1974,7 @@ class Diag(PrimitiveWithInfer):
if x is None:
return None
# do constant-folding only when x rank is 1
if len(x.shape()) != 1:
if len(x.shape) != 1:
return None
ret = np.diag(x.asnumpy())
return Tensor(ret)
@ -2026,7 +2026,7 @@ class DiagPart(PrimitiveWithInfer):
if x is None:
return None
# do constant-folding only when x rank is 2
if len(x.shape()) != 2:
if len(x.shape) != 2:
return None
ret = np.diag(x.asnumpy())
return Tensor(ret)

@ -2329,8 +2329,8 @@ class NMSWithMask(PrimitiveWithInfer):
def infer_shape(self, bboxes_shape):
cls_name = self.name
validator.check_integer("bboxes rank", len(bboxes_shape), 2, Rel.EQ, cls_name)
validator.check_integer("bboxes.shape()[0]", bboxes_shape[0], 0, Rel.GT, cls_name)
validator.check_integer("bboxes.shape()[1]", bboxes_shape[1], 5, Rel.EQ, cls_name)
validator.check_integer("bboxes.shape[0]", bboxes_shape[0], 0, Rel.GT, cls_name)
validator.check_integer("bboxes.shape[1]", bboxes_shape[1], 5, Rel.EQ, cls_name)
num = bboxes_shape[0]
return (bboxes_shape, (num,), (num,))

@ -78,7 +78,7 @@ class Flatten(PrimitiveWithInfer):
>>> input_tensor = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
>>> flatten = P.Flatten()
>>> output = flatten(input_tensor)
>>> assert output.shape() == (1, 24)
>>> assert output.shape == (1, 24)
"""
@prim_attr_register
@ -840,7 +840,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer):
>>> weight = Tensor(np.ones([1, 32, 3, 3]), mindspore.float32)
>>> depthwise_conv2d = P.DepthwiseConv2dNative(channel_multiplier = 3, kernel_size = (3, 3))
>>> output = depthwise_conv2d(input, weight)
>>> assert output.shape() == (10, 96, 30, 30)
>>> assert output.shape == (10, 96, 30, 30)
"""
@prim_attr_register
@ -2057,7 +2057,7 @@ class DropoutDoMask(PrimitiveWithInfer):
>>> dropout_do_mask = P.DropoutDoMask()
>>> mask = dropout_gen_mask(shape, keep_prob)
>>> output = dropout_do_mask(x, mask, keep_prob)
>>> assert output.shape() == (20, 16, 50)
>>> assert output.shape == (20, 16, 50)
"""
@prim_attr_register
@ -2114,7 +2114,7 @@ class ResizeBilinear(PrimitiveWithInfer):
>>> tensor = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.int32)
>>> resize_bilinear = P.ResizeBilinear((5, 5))
>>> result = resize_bilinear(tensor)
>>> assert result.shape() == (5, 5)
>>> assert result.shape == (5, 5)
"""
@prim_attr_register

@ -157,8 +157,8 @@ def _to_full_tensor(elem, device_num, global_rank, scaling_sens=None):
data = Tensor(data)
if not isinstance(data, Tensor):
raise ValueError("elements in tensors must be Tensor")
shape_ = data.shape()
type_ = data.dtype()
shape_ = data.shape
type_ = data.dtype
new_shape = ()
batchsize_per_device = 1
for i, item in enumerate(shape_):

@ -42,17 +42,17 @@ def _special_process_par(par, new_par):
Like (12,2048,1,1)->(12,2048), this case is caused by GE 4 dimensions tensor.
"""
par_shape_len = len(par.data.shape())
new_par_shape_len = len(new_par.data.shape())
par_shape_len = len(par.data.shape)
new_par_shape_len = len(new_par.data.shape)
delta_len = new_par_shape_len - par_shape_len
delta_i = 0
for delta_i in range(delta_len):
if new_par.data.shape()[par_shape_len + delta_i] != 1:
if new_par.data.shape[par_shape_len + delta_i] != 1:
break
if delta_i == delta_len - 1:
new_val = new_par.data.asnumpy()
new_val = new_val.reshape(par.data.shape())
par.set_parameter_data(Tensor(new_val, par.data.dtype()))
new_val = new_val.reshape(par.data.shape)
par.set_parameter_data(Tensor(new_val, par.data.dtype))
return True
return False
@ -61,17 +61,17 @@ def _update_param(param, new_param):
"""Updates param's data from new_param's data."""
if isinstance(param.data, Tensor) and isinstance(new_param.data, Tensor):
if param.data.dtype() != new_param.data.dtype():
if param.data.dtype != new_param.data.dtype:
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
.format(param.name, param.data.dtype(), new_param.data.dtype()))
.format(param.name, param.data.dtype, new_param.data.dtype))
raise RuntimeError(msg)
if param.data.shape() != new_param.data.shape():
if param.data.shape != new_param.data.shape:
if not _special_process_par(param, new_param):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) different from parameter_dict's({})"
.format(param.name, param.data.shape(), new_param.data.shape()))
.format(param.name, param.data.shape, new_param.data.shape))
raise RuntimeError(msg)
return
@ -79,12 +79,12 @@ def _update_param(param, new_param):
return
if isinstance(param.data, Tensor) and not isinstance(new_param.data, Tensor):
if param.data.shape() != (1,) and param.data.shape() != ():
if param.data.shape != (1,) and param.data.shape != ():
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) is not (1,), inconsitent with parameter_dict's(scalar)."
.format(param.name, param.data.shape()))
.format(param.name, param.data.shape))
raise RuntimeError(msg)
param.set_parameter_data(initializer(new_param.data, param.data.shape(), param.data.dtype()))
param.set_parameter_data(initializer(new_param.data, param.data.shape, param.data.dtype))
elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
@ -120,12 +120,12 @@ def save_checkpoint(parameter_list, ckpoint_file_name):
param["data"].init_data()
param_data = param["data"].asnumpy().reshape(-1)
param_tensor.tensor_content = param_data.tostring()
param_tensor.tensor_type = str(param["data"].dtype())
param_tensor.tensor_type = str(param["data"].dtype)
if param['data'].shape() == ():
if param['data'].shape == ():
param_tensor.dims.append(0)
else:
for dim in param['data'].shape():
for dim in param['data'].shape:
param_tensor.dims.append(dim)
with open(ckpoint_file_name, "wb") as f:

@ -73,7 +73,7 @@ class FusedLayerNorm(Cell):
Examples:
>>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32)
>>> shape1 = x.shape()[1:]
>>> shape1 = x.shape[1:]
>>> m = nn.LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1)
>>> m(x)
"""

@ -267,21 +267,21 @@ class MobileNetV2(nn.Cell):
if isinstance(m, (nn.Conv2d, DepthwiseConv)):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.set_parameter_data(Tensor(np.random.normal(0, np.sqrt(2. / n),
m.weight.data.shape()).astype("float32")))
m.weight.data.shape).astype("float32")))
if m.bias is not None:
m.bias.set_parameter_data(
Tensor(np.zeros(m.bias.data.shape(), dtype="float32")))
Tensor(np.zeros(m.bias.data.shape, dtype="float32")))
elif isinstance(m, nn.BatchNorm2d):
m.gamma.set_parameter_data(
Tensor(np.ones(m.gamma.data.shape(), dtype="float32")))
Tensor(np.ones(m.gamma.data.shape, dtype="float32")))
m.beta.set_parameter_data(
Tensor(np.zeros(m.beta.data.shape(), dtype="float32")))
Tensor(np.zeros(m.beta.data.shape, dtype="float32")))
elif isinstance(m, nn.Dense):
m.weight.set_parameter_data(Tensor(np.random.normal(
0, 0.01, m.weight.data.shape()).astype("float32")))
0, 0.01, m.weight.data.shape).astype("float32")))
if m.bias is not None:
m.bias.set_parameter_data(
Tensor(np.zeros(m.bias.data.shape(), dtype="float32")))
Tensor(np.zeros(m.bias.data.shape, dtype="float32")))
def mobilenet_v2(**kwargs):

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save