!13975 fix mindspore numpy doc errors

From: @yanglf1121
Reviewed-by: @liangchenghui,@kingxian
Signed-off-by: @liangchenghui,@kingxian
pull/13975/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit d519b22b76

@ -31,7 +31,7 @@ However, currently MindSpore Parser cannot parse numpy.ndarray in JIT-graph. To
<table> <table>
<tr> <tr>
<td style="text-align:center"> 1.1.0 </td> <td style="text-align:center"> 1.2.0 </td> <td style="text-align:center"> 1.1.1 </td> <td style="text-align:center"> 1.2.0-rc1 </td>
</tr> </tr>
<tr> <tr>
<td> <td>
@ -64,7 +64,7 @@ Previously, we have incomplete support for keyword arguments `out` and `where` i
<table> <table>
<tr> <tr>
<td style="text-align:center"> 1.1.0 </td> <td style="text-align:center"> 1.2.0 </td> <td style="text-align:center"> 1.1.1 </td> <td style="text-align:center"> 1.2.0-rc1 </td>
</tr> </tr>
<tr> <tr>
<td> <td>
@ -189,7 +189,7 @@ def construct(self, *inputs):
<table> <table>
<tr> <tr>
<td style="text-align:center"> 1.1.0 </td> <td style="text-align:center"> 1.2.0 </td> <td style="text-align:center"> 1.1.1 </td> <td style="text-align:center"> 1.2.0-rc1 </td>
</tr> </tr>
<tr> <tr>
<td> <td>

@ -1293,14 +1293,14 @@ def meshgrid(*xi, sparse=False, indexing='xy'):
>>> y = np.linspace(0, 1, 2) >>> y = np.linspace(0, 1, 2)
>>> xv, yv = np.meshgrid(x, y) >>> xv, yv = np.meshgrid(x, y)
>>> print(xv) >>> print(xv)
[[0. , 0.5, 1. ], [[0. 0.5 1. ]
[0. , 0.5, 1. ]] [0. 0.5 1. ]]
>>> print(yv) >>> print(yv)
[[0., 0., 0.], [[0. 0. 0.],
[1., 1., 1.]] [1. 1. 1.]]
>>> xv, yv = np.meshgrid(x, y, sparse=True) >>> xv, yv = np.meshgrid(x, y, sparse=True)
>>> print(xv) >>> print(xv)
[[0. , 0.5, 1. ]] [[0. 0.5 1. ]]
>>> print(yv) >>> print(yv)
[[0.], [[0.],
[1.] [1.]
@ -1426,19 +1426,19 @@ class mGridClass(nd_grid):
>>> from mindspore.numpy import mgrid >>> from mindspore.numpy import mgrid
>>> output = mgrid[0:5, 0:5] >>> output = mgrid[0:5, 0:5]
>>> print(output) >>> print(output)
[[[0, 0, 0, 0, 0], [[[0 0 0 0 0]
[1, 1, 1, 1, 1], [1 1 1 1 1]
[2, 2, 2, 2, 2], [2 2 2 2 2]
[3, 3, 3, 3, 3], [3 3 3 3 3]
[4, 4, 4, 4, 4]], [4 4 4 4 4]]
[[0, 1, 2, 3, 4], [[0 1 2 3 4]
[0, 1, 2, 3, 4], [0 1 2 3 4]
[0, 1, 2, 3, 4], [0 1 2 3 4]
[0, 1, 2, 3, 4], [0 1 2 3 4]
[0, 1, 2, 3, 4]]] [0 1 2 3 4]]]
>>> output = mgrid[-1:1:5j] >>> output = mgrid[-1:1:5j]
>>> print(output) >>> print(output)
[-1. , -0.5, 0. , 0.5, 1. ] [-1. -0.5 0. 0.5 1. ]
""" """
def __init__(self): def __init__(self):
super(mGridClass, self).__init__(sparse=False) super(mGridClass, self).__init__(sparse=False)
@ -1473,13 +1473,13 @@ class oGridClass(nd_grid):
[Tensor(shape=[5, 1], dtype=Int32, value= [Tensor(shape=[5, 1], dtype=Int32, value=
[[0], [[0],
[1], [1],
[2], [2]
[3], [3],
[4]]), Tensor(shape=[1, 5], dtype=Int32, value= [4]]), Tensor(shape=[1, 5], dtype=Int32, value=
[[0, 1, 2, 3, 4]])] [[0, 1, 2, 3, 4]])]
>>> output = ogrid[-1:1:5j] >>> output = ogrid[-1:1:5j]
>>> print(output) >>> print(output)
[-1. , -0.5, 0. , 0.5, 1. ] [-1. -0.5 0. 0.5 1. ]
""" """
def __init__(self): def __init__(self):
super(oGridClass, self).__init__(sparse=True) super(oGridClass, self).__init__(sparse=True)
@ -1684,10 +1684,10 @@ def ix_(*args):
>>> import mindspore.numpy as np >>> import mindspore.numpy as np
>>> ixgrid = np.ix_(np.array([0, 1]), np.array([2, 4])) >>> ixgrid = np.ix_(np.array([0, 1]), np.array([2, 4]))
>>> print(ixgrid) >>> print(ixgrid)
[Tensor(shape=[2, 1], dtype=Int32, value= (Tensor(shape=[2, 1], dtype=Int32, value=
[[0], [[0],
[1]]), Tensor(shape=[1, 2], dtype=Int32, value= [1]]), Tensor(shape=[1, 2], dtype=Int32, value=
[[2, 4]])] [[2, 4]]))
""" """
# TODO boolean mask # TODO boolean mask
_check_input_tensor(*args) _check_input_tensor(*args)
@ -1784,8 +1784,9 @@ def indices(dimensions, dtype=mstype.int32, sparse=False):
``Ascend`` ``GPU`` ``CPU`` ``Ascend`` ``GPU`` ``CPU``
Examples: Examples:
>>> import mindspore.numpy as np
>>> grid = np.indices((2, 3)) >>> grid = np.indices((2, 3))
>>> print(indices) >>> print(grid)
[Tensor(shape=[2, 3], dtype=Int32, value= [Tensor(shape=[2, 3], dtype=Int32, value=
[[0, 0, 0], [[0, 0, 0],
[1, 1, 1]]), Tensor(shape=[2, 3], dtype=Int32, value= [1, 1, 1]]), Tensor(shape=[2, 3], dtype=Int32, value=

@ -492,16 +492,14 @@ def column_stack(tup):
ValueError: If `tup` is empty. ValueError: If `tup` is empty.
Examples: Examples:
>>> import mindspore.numpy as mnp >>> import mindspore.numpy as np
>>> import numpy as onp >>> x1 = np.array([1, 2, 3]).astype('int32')
>>> from mindspore import Tensor >>> x2 = np.array([4, 5, 6]).astype('int32')
>>> x1 = Tensor(onp.array([1, 2, 3]).astype('int32')) >>> output = np.column_stack((x1, x2))
>>> x2 = Tensor(onp.array([4, 5, 6]).astype('int32'))
>>> output = mnp.column_stack((x1, x2))
>>> print(output) >>> print(output)
[[1, 4], [[1 4]
[2, 5], [2 5]
[3, 6]] [3 6]]
""" """
if isinstance(tup, Tensor): if isinstance(tup, Tensor):
return tup return tup
@ -541,15 +539,13 @@ def vstack(tup):
ValueError: If `tup` is empty. ValueError: If `tup` is empty.
Examples: Examples:
>>> import mindspore.numpy as mnp >>> import mindspore.numpy as np
>>> import numpy as onp >>> x1 = np.array([1, 2, 3]).astype('int32')
>>> from mindspore import Tensor >>> x2 = np.array([4, 5, 6]).astype('int32')
>>> x1 = Tensor(onp.array([1, 2, 3]).astype('int32')) >>> output = np.vstack((x1, x2))
>>> x2 = Tensor(onp.array([4, 5, 6]).astype('int32'))
>>> output = mnp.vstack((x1, x2))
>>> print(output) >>> print(output)
[[1, 2, 3], [[1 2 3]
[4, 5, 6]] [4 5 6]]
""" """
if isinstance(tup, Tensor): if isinstance(tup, Tensor):
return tup return tup
@ -690,12 +686,13 @@ def where(condition, x=None, y=None):
>>> y = np.full((2, 1, 1), 7) >>> y = np.full((2, 1, 1), 7)
>>> output = np.where(condition, x, y) >>> output = np.where(condition, x, y)
>>> print(output) >>> print(output)
[[[7, 5], [[[7 5]
[7, 5], [7 5]
[7, 5]], [7 5]]
[[7, 5],
[7, 5], [[7 5]
[7, 5]]] [7 5]
[7 5]]]
""" """
# type promotes input tensors # type promotes input tensors
dtype1 = F.dtype(x) dtype1 = F.dtype(x)
@ -978,7 +975,7 @@ def unique(x, return_inverse=False):
>>> input_x = np.asarray([1, 2, 2, 2, 3, 4, 5]).astype('int32') >>> input_x = np.asarray([1, 2, 2, 2, 3, 4, 5]).astype('int32')
>>> output_x = np.unique(input_x) >>> output_x = np.unique(input_x)
>>> print(output_x) >>> print(output_x)
[1, 2, 3, 4, 5] [1 2 3 4 5]
>>> output_x = np.unique(input_x, return_inverse=True) >>> output_x = np.unique(input_x, return_inverse=True)
>>> print(output_x) >>> print(output_x)
(Tensor(shape=[5], dtype=Int32, value= [ 1, 2, 3, 4, 5]), Tensor(shape=[7], dtype=Int32, (Tensor(shape=[5], dtype=Int32, value= [ 1, 2, 3, 4, 5]), Tensor(shape=[7], dtype=Int32,
@ -1055,7 +1052,7 @@ def roll(a, shift, axis=None):
Tensor, with the same shape as a. Tensor, with the same shape as a.
Supported Platforms: Supported Platforms:
``GPU`` ``Ascend`` ``GPU`` ``CPU``
Raises: Raises:
TypeError: If input arguments have types not specified above. TypeError: If input arguments have types not specified above.

@ -57,8 +57,8 @@ def not_equal(x1, x2, dtype=None):
>>> a = np.asarray([1, 2]) >>> a = np.asarray([1, 2])
>>> b = np.asarray([[1, 3],[1, 4]]) >>> b = np.asarray([[1, 3],[1, 4]])
>>> print(np.not_equal(a, b)) >>> print(np.not_equal(a, b))
>>> [[False True] [[False True]
[False True]] [False True]]
""" """
_check_input_tensor(x1, x2) _check_input_tensor(x1, x2)
return _apply_tensor_op(F.not_equal, x1, x2, dtype=dtype) return _apply_tensor_op(F.not_equal, x1, x2, dtype=dtype)
@ -253,9 +253,6 @@ def isfinite(x, dtype=None):
>>> output = np.isfinite(np.array([np.inf, 1., np.nan]).astype('float32')) >>> output = np.isfinite(np.array([np.inf, 1., np.nan]).astype('float32'))
>>> print(output) >>> print(output)
[False True False] [False True False]
>>> output = np.isfinite(np.log(np.array(-1.).astype('float32')))
>>> print(output)
False
""" """
return _apply_tensor_op(F.isfinite, x, dtype=dtype) return _apply_tensor_op(F.isfinite, x, dtype=dtype)

@ -258,9 +258,9 @@ def add(x1, x2, dtype=None):
>>> x2 = np.full((3, 2), [3, 4]) >>> x2 = np.full((3, 2), [3, 4])
>>> output = np.add(x1, x2) >>> output = np.add(x1, x2)
>>> print(output) >>> print(output)
[[4, 6], [[4 6]
[4, 6], [4 6]
[4, 6]] [4 6]]
""" """
# broadcast is not fully supported in tensor_add on CPU, # broadcast is not fully supported in tensor_add on CPU,
# so we use tensor_sub as a substitute solution # so we use tensor_sub as a substitute solution
@ -297,9 +297,9 @@ def subtract(x1, x2, dtype=None):
>>> x2 = np.full((3, 2), [3, 4]) >>> x2 = np.full((3, 2), [3, 4])
>>> output = np.subtract(x1, x2) >>> output = np.subtract(x1, x2)
>>> print(output) >>> print(output)
[[-2, -2], [[-2 -2]
[-2, -2], [-2 -2]
[-2, -2]] [-2 -2]]
""" """
return _apply_tensor_op(F.tensor_sub, x1, x2, dtype=dtype) return _apply_tensor_op(F.tensor_sub, x1, x2, dtype=dtype)
@ -331,9 +331,9 @@ def multiply(x1, x2, dtype=None):
>>> x2 = np.full((3, 2), [3, 4]) >>> x2 = np.full((3, 2), [3, 4])
>>> output = np.multiply(x1, x2) >>> output = np.multiply(x1, x2)
>>> print(output) >>> print(output)
[[3, 8], [[3 8]
[3, 8], [3 8]
[3, 8]] [3 8]]
""" """
if _get_device() == 'CPU': if _get_device() == 'CPU':
_check_input_tensor(x1, x2) _check_input_tensor(x1, x2)
@ -374,9 +374,9 @@ def divide(x1, x2, dtype=None):
>>> x2 = np.full((3, 2), [3, 4]) >>> x2 = np.full((3, 2), [3, 4])
>>> output = np.divide(x1, x2) >>> output = np.divide(x1, x2)
>>> print(output) >>> print(output)
[[0.33333333, 0.5], [[0.33333334 0.5 ]
[0.33333333, 0.5], [0.33333334 0.5 ]
[0.33333333, 0.5]] [0.33333334 0.5 ]]
""" """
if not _check_is_float(F.dtype(x1)) and not _check_is_float(F.dtype(x2)): if not _check_is_float(F.dtype(x1)) and not _check_is_float(F.dtype(x2)):
x1 = F.cast(x1, mstype.float32) x1 = F.cast(x1, mstype.float32)
@ -413,9 +413,9 @@ def true_divide(x1, x2, dtype=None):
>>> x2 = np.full((3, 2), [3, 4]) >>> x2 = np.full((3, 2), [3, 4])
>>> output = np.true_divide(x1, x2) >>> output = np.true_divide(x1, x2)
>>> print(output) >>> print(output)
[[0.33333333, 0.5], [[0.33333334 0.5 ]
[0.33333333, 0.5], [0.33333334 0.5 ]
[0.33333333, 0.5]] [0.33333334 0.5 ]]
""" """
return divide(x1, x2, dtype=dtype) return divide(x1, x2, dtype=dtype)
@ -450,9 +450,9 @@ def power(x1, x2, dtype=None):
>>> x2 = np.full((3, 2), [3, 4]).astype('float32') >>> x2 = np.full((3, 2), [3, 4]).astype('float32')
>>> output = np.power(x1, x2) >>> output = np.power(x1, x2)
>>> print(output) >>> print(output)
[[ 1, 16], [[ 1 16]
[ 1, 16], [ 1 16]
[ 1, 16]] [ 1 16]]
""" """
return _apply_tensor_op(F.tensor_pow, x1, x2, dtype=dtype) return _apply_tensor_op(F.tensor_pow, x1, x2, dtype=dtype)
@ -708,8 +708,8 @@ def dot(a, b):
>>> b = np.full((2, 3, 4), 5).astype('float32') >>> b = np.full((2, 3, 4), 5).astype('float32')
>>> output = np.dot(a, b) >>> output = np.dot(a, b)
>>> print(output) >>> print(output)
[[[105, 105, 105, 105], [[[105. 105. 105. 105.]
[105, 105, 105, 105]]] [105. 105. 105. 105.]]]
""" """
ndim_a, ndim_b = F.rank(a), F.rank(b) ndim_a, ndim_b = F.rank(a), F.rank(b)
if ndim_a > 0 and ndim_b >= 2: if ndim_a > 0 and ndim_b >= 2:
@ -760,13 +760,13 @@ def outer(a, b):
>>> b = np.full(4, 3).astype('float32') >>> b = np.full(4, 3).astype('float32')
>>> output = np.outer(a, b) >>> output = np.outer(a, b)
>>> print(output) >>> print(output)
[[6, 6, 6, 6], [[6. 6. 6. 6.]
[6, 6, 6, 6], [6. 6. 6. 6.]
[6, 6, 6, 6], [6. 6. 6. 6.]
[6, 6, 6, 6], [6. 6. 6. 6.]
[6, 6, 6, 6], [6. 6. 6. 6.]
[6, 6, 6, 6], [6. 6. 6. 6.]
[6, 6, 6, 6]] [6. 6. 6. 6.]]
""" """
_check_input_tensor(a, b) _check_input_tensor(a, b)
if F.rank(a) != 1: if F.rank(a) != 1:
@ -1478,7 +1478,7 @@ def amin(a, axis=None, keepdims=False, initial=None, where=True):
[0. 1.] [0. 1.]
>>> output = np.amin(a, axis=1) >>> output = np.amin(a, axis=1)
>>> print(output) >>> print(output)
[0, 2] [0. 2.]
>>> output = np.amin(a, where=np.array([False, True]), initial=10, axis=0) >>> output = np.amin(a, where=np.array([False, True]), initial=10, axis=0)
>>> print(output) >>> print(output)
[10. 1.] [10. 1.]
@ -3733,7 +3733,7 @@ def promote_types(type1, type2):
>>> import mindspore.numpy as np >>> import mindspore.numpy as np
>>> output = np.promote_types(np.float32, np.float64) >>> output = np.promote_types(np.float32, np.float64)
>>> print(output) >>> print(output)
np.float64 Float64
""" """
type1 = _check_dtype(type1) type1 = _check_dtype(type1)
type2 = _check_dtype(type2) type2 = _check_dtype(type2)

Loading…
Cancel
Save