From 0bf0862112ca2b5f2f01a26c6bedcf9bb9ede988 Mon Sep 17 00:00:00 2001 From: lihongkang <[lihongkang1@huawei.com]> Date: Thu, 15 Oct 2020 20:28:01 +0800 Subject: [PATCH] fix bugs --- mindspore/nn/layer/activation.py | 4 ++- mindspore/nn/layer/image.py | 9 ++++--- .../ops/_op_impl/tbe/sparse_apply_ftrl_d.py | 6 +++++ mindspore/ops/operations/array_ops.py | 19 ++++++++++++-- mindspore/ops/operations/math_ops.py | 26 ++++++++++++------- mindspore/ops/operations/nn_ops.py | 17 +++++++----- 6 files changed, 58 insertions(+), 23 deletions(-) diff --git a/mindspore/nn/layer/activation.py b/mindspore/nn/layer/activation.py index 01d8720feb..2ff36e3771 100644 --- a/mindspore/nn/layer/activation.py +++ b/mindspore/nn/layer/activation.py @@ -382,9 +382,11 @@ class PReLU(Cell): Tensor, with the same type and shape as the `input_data`. Examples: - >>> input_x = Tensor(np.random.rand(1, 10, 4, 4), mindspore.float32) + >>> input_x = Tensor(np.array([[[[0.1, 0.6], [0.9, 0.9]]]]), mindspore.float32) >>> prelu = nn.PReLU() >>> prelu(input_x) + [[[[0.1 0.6] + [0.9 0.9]]]] """ @cell_attr_register(attrs="") diff --git a/mindspore/nn/layer/image.py b/mindspore/nn/layer/image.py index cfafd8b5d4..500ed8da80 100644 --- a/mindspore/nn/layer/image.py +++ b/mindspore/nn/layer/image.py @@ -197,8 +197,8 @@ class SSIM(Cell): Args: max_val (Union[int, float]): The dynamic range of the pixel values (255 for 8-bit grayscale images). Default: 1.0. - filter_size (int): The size of the Gaussian filter. Default: 11. - filter_sigma (float): The standard deviation of Gaussian kernel. Default: 1.5. + filter_size (int): The size of the Gaussian filter. Default: 11. The value must be greater than or equal to 1. + filter_sigma (float): The standard deviation of Gaussian kernel. Default: 1.5. The value must be greater than 0. k1 (float): The constant used to generate c1 in the luminance comparison function. Default: 0.01. k2 (float): The constant used to generate c2 in the contrast comparison function. Default: 0.03. @@ -211,9 +211,10 @@ class SSIM(Cell): Examples: >>> net = nn.SSIM() - >>> img1 = Tensor(np.random.random((1,3,16,16))) - >>> img2 = Tensor(np.random.random((1,3,16,16))) + >>> img1 = Tensor(np.random.random((1,3,16,16)), mindspore.float32) + >>> img2 = Tensor(np.random.random((1,3,16,16)), mindspore.float32) >>> ssim = net(img1, img2) + [0.12174469] """ def __init__(self, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): super(SSIM, self).__init__() diff --git a/mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py b/mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py index 2a9a8f3175..e28134f363 100644 --- a/mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +++ b/mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py @@ -42,6 +42,12 @@ sparse_apply_ftrl_d_op_info = TBERegOp("SparseApplyFtrl") \ DataType.I32_NHWC, DataType.F32_NHWC, DataType.F32_NHWC, DataType.F32_NHWC) \ .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.I32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW, + DataType.I64_NCHW, DataType.F32_NCHW, DataType.F32_NCHW, DataType.F32_NCHW) \ + .dtype_format(DataType.F32_NHWC, DataType.F32_NHWC, DataType.F32_NHWC, DataType.F32_NHWC, + DataType.I64_NHWC, DataType.F32_NHWC, DataType.F32_NHWC, DataType.F32_NHWC) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.I64_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ .get_op_info() diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 95af441570..fac6a3038a 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -196,6 +196,8 @@ class SameTypeShape(PrimitiveWithInfer): >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) >>> input_y = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) >>> out = P.SameTypeShape()(input_x, input_y) + [[2. 2.] + [2. 2.]] """ @prim_attr_register @@ -383,6 +385,9 @@ class Reshape(PrimitiveWithInfer): >>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) >>> reshape = P.Reshape() >>> output = reshape(input_tensor, (3, 2)) + [[-0.1 0.3] + [3.6 0.4 ] + [0.5 -3.2]] """ @prim_attr_register @@ -553,7 +558,7 @@ class Transpose(PrimitiveWithInfer): - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. - **input_perm** (tuple[int]) - The permutation to be converted. The input tuple is constructed by multiple indexes. The length of `input_perm` and the shape of `input_x` must be the same. Only constant value is - allowed. + allowed. Must be in the range [0, rank(input_x)). Outputs: Tensor, the type of output tensor is the same as `input_x` and the shape of output tensor is decided by the @@ -564,6 +569,12 @@ class Transpose(PrimitiveWithInfer): >>> perm = (0, 2, 1) >>> transpose = P.Transpose() >>> output = transpose(input_tensor, perm) + [[[1. 4.] + [2. 5.] + [3. 6.]] + [[7. 10.] + [8. 11.] + [9. 12.]]] """ @prim_attr_register @@ -1904,6 +1915,7 @@ class Slice(PrimitiveWithInfer): >>> [[3, 3, 3], [4, 4, 4]], >>> [[5, 5, 5], [6, 6, 6]]]).astype(np.int32)) >>> type = P.Slice()(data, (1, 0, 0), (1, 1, 3)) + [[[3 3 3]]] """ @prim_attr_register @@ -2049,6 +2061,7 @@ class Select(PrimitiveWithInfer): >>> input_x = Tensor([2,3], mindspore.float32) >>> input_y = Tensor([1,2], mindspore.float32) >>> select(input_cond, input_x, input_y) + [2. 2.] """ @prim_attr_register @@ -2513,9 +2526,11 @@ class ResizeNearestNeighbor(PrimitiveWithInfer): Tensor, the shape of the output tensor is :math:`(N, C, NEW\_H, NEW\_W)`. Examples: - >>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) + >>> input_tensor = Tensor(np.array([[[[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]]]), mindspore.float32) >>> resize = P.ResizeNearestNeighbor((2, 2)) >>> output = resize(input_tensor) + [[[[-0.1 0.3] + [0.4 0.5 ]]]] """ @prim_attr_register diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index a368192f01..eac243e898 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -326,7 +326,7 @@ class ReduceMean(_Reduce): Inputs: - **input_x** (Tensor[Number]) - The input tensor. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. - Only constant value is allowed. + Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)). Outputs: Tensor, has the same dtype as the `input_x`. @@ -342,6 +342,8 @@ class ReduceMean(_Reduce): >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> op = P.ReduceMean(keep_dims=True) >>> output = op(input_x, 1) + >>> output.shape + (3, 1, 5, 6) """ @@ -358,7 +360,7 @@ class ReduceSum(_Reduce): Inputs: - **input_x** (Tensor[Number]) - The input tensor. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. - Only constant value is allowed. + Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)). Outputs: Tensor, has the same dtype as the `input_x`. @@ -374,6 +376,8 @@ class ReduceSum(_Reduce): >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> op = P.ReduceSum(keep_dims=True) >>> output = op(input_x, 1) + >>> output.shape + (3, 1, 5, 6) """ @prim_attr_register @@ -397,7 +401,7 @@ class ReduceAll(_Reduce): Inputs: - **input_x** (Tensor[bool]) - The input tensor. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. - Only constant value is allowed. + Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)). Outputs: Tensor, the dtype is bool. @@ -435,7 +439,7 @@ class ReduceAny(_Reduce): Inputs: - **input_x** (Tensor[bool]) - The input tensor. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. - Only constant value is allowed. + Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)). Outputs: Tensor, the dtype is bool. @@ -473,7 +477,7 @@ class ReduceMax(_Reduce): Inputs: - **input_x** (Tensor[Number]) - The input tensor. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. - Only constant value is allowed. + Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)). Outputs: Tensor, has the same dtype as the `input_x`. @@ -514,7 +518,7 @@ class ReduceMin(_Reduce): Inputs: - **input_x** (Tensor[Number]) - The input tensor. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. - Only constant value is allowed. + Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)). Outputs: Tensor, has the same dtype as the `input_x`. @@ -530,6 +534,8 @@ class ReduceMin(_Reduce): >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> op = P.ReduceMin(keep_dims=True) >>> output = op(input_x, 1) + >>> output.shape + (3, 1, 5, 6) """ @@ -547,7 +553,7 @@ class ReduceProd(_Reduce): Inputs: - **input_x** (Tensor[Number]) - The input tensor. - **axis** (Union[int, tuple(int), list(int)]) - The dimensions to reduce. Default: (), reduce all dimensions. - Only constant value is allowed. + Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)). Outputs: Tensor, has the same dtype as the `input_x`. @@ -3270,11 +3276,11 @@ class SquareSumAll(PrimitiveWithInfer): - **output_y2** (Tensor) - The same type as the `input_x1`. Examples: - >>> input_x1 = Tensor(np.random.randint([3, 2, 5, 7]), mindspore.float32) - >>> input_x2 = Tensor(np.random.randint([3, 2, 5, 7]), mindspore.float32) + >>> input_x1 = Tensor(np.array([0, 0, 2, 0]), mindspore.float32) + >>> input_x2 = Tensor(np.array([0, 0, 2, 4]), mindspore.float32) >>> square_sum_all = P.SquareSumAll() >>> square_sum_all(input_x1, input_x2) - (27, 26) + (4, 20) """ @prim_attr_register diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 2de7bd8a4e..31b1a8ce78 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -2500,7 +2500,8 @@ class ResizeBilinear(PrimitiveWithInfer): can be represented by different data types, but the data types of output images are always float32. Args: - size (tuple[int]): A tuple of 2 int elements `(new_height, new_width)`, the new size of the images. + size (Union[tuple[int], list[int]]): A tuple or list of 2 int elements `(new_height, new_width)`, the new size + of the images. align_corners (bool): If true, rescale input by `(new_height - 1) / (height - 1)`, which exactly aligns the 4 corners of images and resized images. If false, rescale by `new_height / height`. Default: False. @@ -2521,7 +2522,7 @@ class ResizeBilinear(PrimitiveWithInfer): @prim_attr_register def __init__(self, size, align_corners=False): - pass + validator.check_value_type("size", size, [tuple, list], self.name) def infer_shape(self, input_shape): validator.check("input shape rank", len(input_shape), "", 4, Rel.EQ, self.name) @@ -2857,10 +2858,12 @@ class SigmoidCrossEntropyWithLogits(PrimitiveWithInfer): Tensor, with the same shape and type as input `logits`. Examples: - >>> logits = Tensor(np.random.randn(2, 3).astype(np.float16)) - >>> labels = Tensor(np.random.randn(2, 3).astype(np.float16)) + >>> logits = Tensor(np.array([[-0.8, 1.2, 0.7], [-0.1, -0.4, 0.7]]).astype(np.float16)) + >>> labels = Tensor(np.array([[0.3, 0.8, 1.2], [-0.6, 0.1, 2.2]]).astype(np.float16)) >>> sigmoid = P.SigmoidCrossEntropyWithLogits() >>> sigmoid(logits, labels) + [[0.6113 0.5034 0.263 ] + [0.5845 0.553 -0.4365]] """ @prim_attr_register @@ -3043,7 +3046,8 @@ class ROIAlign(PrimitiveWithInfer): >>> rois = Tensor(np.array([[0, 0.2, 0.3, 0.2, 0.3]]), mindspore.float32) >>> roi_align = P.ROIAlign(2, 2, 0.5, 2) >>> output_tensor = roi_align(input_tensor, rois) - >>> assert output_tensor == Tensor(np.array([[[[2.15]]]]), mindspore.float32) + [[[[1.77499998e+00, 2.02500010e+00], + [2.27500010e+00, 2.52500010e+00]]]] """ @prim_attr_register @@ -3062,6 +3066,7 @@ class ROIAlign(PrimitiveWithInfer): self.roi_end_mode = roi_end_mode def infer_shape(self, inputs_shape, rois_shape): + validator.check("input shape rank", len(inputs_shape), "", 4, Rel.LE, self.name) return [rois_shape[0], inputs_shape[1], self.pooled_height, self.pooled_width] def infer_dtype(self, inputs_type, rois_type): @@ -5162,7 +5167,7 @@ class SparseApplyFtrl(PrimitiveWithCheck): args = {"var_dtype": var_dtype, "accum_dtype": accum_dtype, "linear_dtype": linear_dtype, "grad_dtype": grad_dtype} validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name) - validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32], self.name) + validator.check_tensor_type_same({"indices_dtype": indices_dtype}, [mstype.int32, mstype.int64], self.name) class SparseApplyFtrlV2(PrimitiveWithInfer):