Add new np interfaces

pull/12726/head
yanglf1121 4 years ago
parent a1c3f55aca
commit 72b365c24b

@ -30,13 +30,14 @@ from .array_ops import (transpose, expand_dims, squeeze, rollaxis, swapaxes, res
ravel, concatenate, where, atleast_1d, atleast_2d, atleast_3d,
column_stack, hstack, dstack, vstack, stack, unique, moveaxis,
tile, broadcast_to, broadcast_arrays, roll, append, split, vsplit,
flip, flipud, fliplr, hsplit, dsplit, take_along_axis, take, repeat)
flip, flipud, fliplr, hsplit, dsplit, take_along_axis, take, repeat,
rot90, select, array_split)
from .array_creations import copy_ as copy
from .array_creations import (array, asarray, asfarray, ones, zeros, full, arange,
linspace, logspace, eye, identity, empty, empty_like,
ones_like, zeros_like, full_like, diagonal, tril, triu,
tri, trace, meshgrid, mgrid, ogrid, diagflat,
diag, diag_indices, ix_)
diag, diag_indices, ix_, indices, geomspace, vander)
from .dtypes import (int_, int8, int16, int32, int64, uint, uint8, uint16,
uint32, uint64, float_, float16, float32, float64, bool_, inf, nan,
numeric_types, PINF, NINF)
@ -45,35 +46,51 @@ from .math_ops import (mean, inner, add, subtract, multiply, divide, true_divide
matmul, square, sqrt, reciprocal, log, maximum, heaviside, amax, amin,
hypot, float_power, floor, ptp, deg2rad, rad2deg, count_nonzero,
positive, negative, clip, floor_divide, remainder, fix, fmod, trunc,
exp, expm1, cumsum)
exp, expm1, exp2, kron, promote_types, divmod_, diff, cbrt,
cross, ceil, trapz, gcd, lcm, convolve, log1p, logaddexp, log2,
logaddexp2, log10, ediff1d, nansum, nanmean, nanvar, nanstd, cumsum, nancumsum,
sin, cos, tan, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh,
arctanh, arctan2, cov)
from .logic_ops import (not_equal, less_equal, less, greater_equal, greater, equal, isfinite,
isnan, isinf, isposinf, isneginf, isscalar)
isnan, isinf, isposinf, isneginf, isscalar, logical_and, logical_not,
logical_or, logical_xor, in1d, isin, isclose)
mod = remainder
fabs = absolute
divmod = divmod_ # pylint: disable=redefined-builtin
abs = absolute # pylint: disable=redefined-builtin
max = amax # pylint: disable=redefined-builtin
min = amin # pylint: disable=redefined-builtin
array_ops_module = ['transpose', 'expand_dims', 'squeeze', 'rollaxis', 'swapaxes', 'reshape',
'ravel', 'concatenate', 'where', 'atleast_1d', 'atleast_2d', 'atleast_3d',
'column_stack', 'hstack', 'dstack', 'vstack', 'stack', 'unique', 'moveaxis',
'tile', 'broadcast_to', 'broadcast_arrays', 'append', 'roll', 'split', 'vsplit',
'flip', 'flipud', 'fliplr', 'hsplit', 'dsplit', 'take_along_axis', 'take',
'repeat']
'repeat', 'rot90', 'select', 'array_split']
array_creations_module = ['array', 'asarray', 'asfarray', 'ones', 'zeros', 'full', 'arange',
'linspace', 'logspace', 'eye', 'identity', 'empty', 'empty_like',
'ones_like', 'zeros_like', 'full_like', 'diagonal', 'tril', 'triu',
'tri', 'trace', 'meshgrid', 'mgrid', 'ogrid', 'diagflat', 'diag',
'diag_indices', 'ix_', 'cumsum']
'diag_indices', 'ix_', 'indices', 'geomspace', 'vander']
math_module = ['mean', 'inner', 'add', 'subtract', 'multiply', 'divide', 'true_divide', 'power',
'dot', 'outer', 'tensordot', 'absolute', 'std', 'var', 'average', 'not_equal',
'minimum', 'matmul', 'square', 'sqrt', 'reciprocal', 'log', 'maximum',
'heaviside', 'amax', 'amin', 'hypot', 'float_power', 'floor', 'ptp', 'deg2rad',
'rad2deg', 'count_nonzero', 'positive', 'negative', 'clip', 'floor_divide',
'remainder', 'mod', 'fix', 'fmod', 'trunc', 'exp', 'expm1', 'fabs', 'cumsum']
'remainder', 'mod', 'fix', 'fmod', 'trunc', 'exp', 'expm1', 'fabs', 'exp2', 'kron',
'promote_types', 'divmod', 'diff', 'cbrt', 'cross', 'ceil', 'trapz',
'abs', 'max', 'min', 'gcd', 'lcm', 'log1p', 'logaddexp', 'log2', 'logaddexp2', 'log10',
'convolve', 'ediff1d', 'nansum', 'nanmean', 'nanvar', 'nanstd', 'cumsum',
'nancumsum', 'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan', 'sinh', 'cosh', 'tanh',
'arcsinh', 'arccosh', 'arctanh', 'arctan2', 'cov']
logic_module = ['not_equal', 'less_equal', 'less', 'greater_equal', 'greater', 'equal', 'isfinite',
'isnan', 'isinf', 'isposinf', 'isneginf', 'isscalar']
'isnan', 'isinf', 'isposinf', 'isneginf', 'isscalar', 'logical_and', 'logical_not',
'logical_or', 'logical_xor', 'in1d', 'isin', 'isclose']
__all__ = array_ops_module + array_creations_module + math_module + logic_module + numeric_types

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -169,3 +169,16 @@ promotion_rule = {
(bool_, float32): float32,
(bool_, float64): float64,
}
rule_for_trigonometric = {float16: float16,
float32: float32,
float64: float64,
int8: float16,
int16: float32,
int32: float32,
int64: float32,
uint8: float16,
uint16: float32,
uint32: float32,
uint64: float32,
bool_: float16}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -13,14 +13,11 @@
# limitations under the License.
# ============================================================================
"""internal utility functions"""
import numpy as onp
from ..common import Tensor
from ..ops import functional as F
from ..common import dtype as mstype
from .utils_const import _tile_size, _add_unit_axes, _raise_type_error
from .utils_const import _tile_size, _add_unit_axes, _raise_type_error, _type_convert
def _deep_list(array_like):
@ -56,9 +53,8 @@ def _deep_tensor_to_nparray(array_like):
def _check_input_for_asarray(array_like):
"""check whether array_like argument is a valid type for np.asarray conversion"""
if not isinstance(array_like, (Tensor, list, tuple, int, float, bool, onp.ndarray)):
_raise_type_error("input data must be `int`, `float`, `bool`, `Tensor`, `list`, `tuple`" + \
"or numpy.ndarray, but got ", array_like)
if not isinstance(array_like, (Tensor, list, tuple, int, float, bool)):
_raise_type_error("input data must be `int`, `float`, `bool`, `Tensor`, `list`, `tuple`, but got ", array_like)
def _is_scalar(shape):
@ -121,6 +117,20 @@ def _convert_64_to_32(tensor):
return tensor
def _to_tensor(*args):
"""Returns each input as Tensor"""
res = ()
for arg in args:
if isinstance(arg, (int, float, bool, list, tuple)):
arg = _convert_64_to_32(_type_convert(Tensor, arg))
elif not isinstance(arg, Tensor):
_raise_type_error("Expect input to be array like.")
res += (arg,)
if len(res) == 1:
return res[0]
return res
def _get_dtype_from_scalar(*input_numbers):
"""
Get the final dtype from series of input numbers, compared with F.typeof, we
@ -139,3 +149,8 @@ def _get_dtype_from_scalar(*input_numbers):
if int_flag:
return mstype.int32
return mstype.float32
def _isnan(x):
"""Computes isnan."""
return F.not_equal(x, x)

@ -14,7 +14,8 @@
# ============================================================================
"""internal graph-compatible utility functions"""
import math
from functools import partial
from itertools import zip_longest
from collections import deque
import mindspore.context as context
from ..ops import functional as F
@ -24,7 +25,7 @@ from ..common import Tensor
from .._c_expression import Tensor as Tensor_
from .._c_expression import typing
from .dtypes import promotion_rule, dtype_tuple, all_types, dtype_map
from .dtypes import promotion_rule, dtype_tuple, all_types, dtype_map, rule_for_trigonometric
@constexpr
@ -110,44 +111,19 @@ def _get_device():
return context.get_context('device_target')
@constexpr
def _reverse_index(idx, arr):
"""
Returns 1 if shape[idx:] is broadcastable to shape_out[idx:],
2 situations if the function returns 1:
- 1. Tensor's shape has 1 at the designated dimension.
- 2. Tensor's dimension is less than the designated idx. (The Tensor shape
has been reversed)
For both cases, 2 tensors are broadcastable.
otherwise returns the element at position of shape
"""
if len(arr) <= idx:
return 1
return arr[-1 - idx]
@constexpr
def _infer_out_shape(*shapes):
"""
Returns shape of output after broadcasting
Raises ValueError if shape1 and shape2 cannot be broadcast
Returns shape of output after broadcasting. Raises ValueError if shapes cannot be broadcast.
"""
shapes_unbroadcastable = False
ndim_max = max(map(len, shapes))
shape_out = [0]*ndim_max
i = 0
for i in range(ndim_max):
shape_out[-1 - i] = max(map(partial(_reverse_index, i), shapes))
for shape in shapes:
if _reverse_index(i, shape) != shape_out[-1 - i]:
if _reverse_index(i, shape) != 1:
shapes_unbroadcastable = True
break
if shapes_unbroadcastable:
break
if not shapes_unbroadcastable:
return tuple(shape_out)
shape_out = deque()
reversed_shapes = map(reversed, shapes)
for items in zip_longest(*reversed_shapes, fillvalue=1):
max_size = 0 if 0 in items else max(items)
if any(item not in (1, max_size) for item in items):
raise ValueError(f'operands could not be broadcast together with shapes {*shapes,}')
shape_out.appendleft(max_size)
return tuple(shape_out)
@constexpr
@ -228,6 +204,21 @@ def _raise_value_error(info, param=None):
raise ValueError(info + f"{param}")
@constexpr
def _raise_runtime_error(info, param=None):
"""
Raise RuntimeError in both graph/pynative mode
Args:
info(str): info string to display
param(python obj): any object that can be recognized by graph mode. If is
not None, then param's value information will be extracted and displayed.
Default is None.
"""
if param is None:
raise RuntimeError(info)
raise RuntimeError(info + f"{param}")
@constexpr
def _empty(dtype, shape):
"""Returns an uninitialized array with dtype and shape."""
@ -242,6 +233,9 @@ def _promote(dtype1, dtype2):
return promotion_rule[dtype1, dtype2]
return promotion_rule[dtype2, dtype1]
@constexpr
def _promote_for_trigonometric(dtype):
return rule_for_trigonometric[dtype]
@constexpr
def _max(*args):
@ -315,7 +309,7 @@ def _canonicalize_axis(axis, ndim):
axis = tuple([canonicalizer(axis) for axis in axis])
if all(axis.count(el) <= 1 for el in axis):
return axis if len(axis) > 1 else axis[0]
return tuple(sorted(axis)) if len(axis) > 1 else axis[0]
raise ValueError(f"duplicate axes in {axis}.")
@ -426,13 +420,37 @@ def _tuple_getitem(tup, idx, startswith=True):
@constexpr
def _iota(dtype, num):
def _tuple_setitem(tup, idx, value):
"""
Returns a tuple with specified `idx` set to `value`.
"""
tup = list(tup)
tup[idx] = value
return tuple(tup)
@constexpr
def _iota(dtype, num, increasing=True):
"""Creates a 1-D tensor with value: [0,1,...num-1] and dtype."""
# TODO: Change to P.Linspace when the kernel is implemented on CPU.
if increasing:
return Tensor(list(range(int(num))), dtype)
return Tensor(list(range(int(num)-1, -1, -1)), dtype)
@constexpr
def _ceil(number):
"""Ceils the number in graph mode."""
return math.ceil(number)
@constexpr
def _seq_prod(seq1, seq2):
"""Returns the element-wise product of seq1 and seq2."""
return tuple(map(lambda x, y: x*y, seq1, seq2))
@constexpr
def _make_tensor(val, dtype):
""" Returns the tensor with value `val` and dtype `dtype`."""
return Tensor(val, dtype)

@ -15,6 +15,7 @@
"""Implementation for internal polymorphism `not equal` operations."""
from . import _constexpr_utils as const_utils
from ...composite import base
from ... import functional as F
@ -41,6 +42,21 @@ def _not_equal_scalar(x, y):
return not F.scalar_eq(x, y)
@not_equal.register("mstype", "mstype")
def _not_equal_mstype(x, y):
"""
Determine if two mindspore types are not equal.
Args:
x (mstype): first input mindspore type.
y (mstype): second input mindspore type.
Returns:
bool, if x != y return true, x == y return false.
"""
return not const_utils.mstype_eq(x, y)
@not_equal.register("String", "String")
def _not_equal_string(x, y):
"""

@ -77,6 +77,7 @@ floormod = tensor_mod
tensor_exp = P.Exp()
exp = tensor_exp
tensor_expm1 = P.Expm1()
tensor_slice = P.Slice()
strided_slice = P.StridedSlice()
same_type_shape = P.SameTypeShape()
check_bprop = P.CheckBprop()
@ -94,6 +95,22 @@ tensor_slice = P.Slice()
maximum = P.Maximum()
minimum = P.Minimum()
floor = P.Floor()
logical_not = P.LogicalNot()
logical_or = P.LogicalOr()
logical_and = P.LogicalAnd()
sin = P.Sin()
cos = P.Cos()
tan = P.Tan()
asin = P.Asin()
acos = P.ACos()
atan = P.Atan()
sinh = P.Sinh()
cosh = P.Cosh()
tanh = P.Tanh()
asinh = P.Asinh()
acosh = P.Acosh()
atanh = P.Atanh()
atan2 = P.Atan2()
scalar_to_array = P.ScalarToArray()
scalar_to_tensor = P.ScalarToTensor()

@ -2560,7 +2560,7 @@ class Acosh(PrimitiveWithInfer):
TypeError: If `input_x` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
``Ascend`` ``GPU``
Examples:
>>> acosh = ops.Acosh()
@ -2637,7 +2637,7 @@ class Asinh(PrimitiveWithInfer):
TypeError: If `input_x` is not a Tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
``Ascend`` ``GPU``
Examples:
>>> asinh = ops.Asinh()

@ -20,7 +20,7 @@ import numpy as onp
import mindspore.numpy as mnp
from .utils import rand_int, rand_bool, match_array, match_res, match_meta, \
match_all_arrays
match_all_arrays, run_multi_test, to_tensor
class Cases():
@ -40,8 +40,8 @@ class Cases():
self.array_sets = [1, 1.1, True, [1, 0, True], [1, 1.0, 2], (1,),
[(1, 2, 3), (4, 5, 6)], onp.random.random( # pylint: disable=no-member
(100, 100)).astype(onp.float32),
onp.random.random((100, 100)).astype(onp.bool)]
(100, 100)).astype(onp.float32).tolist(),
onp.random.random((100, 100)).astype(onp.bool).tolist()]
self.arrs = [
rand_int(2),
@ -138,8 +138,8 @@ def test_asarray():
expected = mnp.asarray(array, test_case.mnp_dtypes[i]).asnumpy()
match_array(actual, expected, error=7)
# Additional tests for nested tensor/numpy_array mixture
mnp_input = [(onp.ones(3,), mnp.ones(3)), [[1, 1, 1], (1, 1, 1)]]
# Additional tests for nested tensor mixture
mnp_input = [(mnp.ones(3,), mnp.ones(3)), [[1, 1, 1], (1, 1, 1)]]
onp_input = [(onp.ones(3,), onp.ones(3)), [[1, 1, 1], (1, 1, 1)]]
actual = onp.asarray(onp_input)
@ -168,11 +168,11 @@ def test_array():
assert arr4 is arr5
# Additional tests for nested tensor/numpy_array mixture
mnp_input = [(onp.ones(3,), mnp.ones(3)), [[1, 1, 1], (1, 1, 1)]]
mnp_input = [(mnp.ones(3,), mnp.ones(3)), [[1, 1, 1], (1, 1, 1)]]
onp_input = [(onp.ones(3,), onp.ones(3)), [[1, 1, 1], (1, 1, 1)]]
actual = onp.asarray(onp_input)
expected = mnp.asarray(mnp_input).asnumpy()
actual = onp.array(onp_input)
expected = mnp.array(mnp_input).asnumpy()
match_array(actual, expected, error=7)
@ -202,11 +202,11 @@ def test_asfarray():
match_array(actual, expected, error=7)
# Additional tests for nested tensor/numpy_array mixture
mnp_input = [(onp.ones(3,), mnp.ones(3)), [[1, 1, 1], (1, 1, 1)]]
mnp_input = [(mnp.ones(3,), mnp.ones(3)), [[1, 1, 1], (1, 1, 1)]]
onp_input = [(onp.ones(3,), onp.ones(3)), [[1, 1, 1], (1, 1, 1)]]
actual = onp.asarray(onp_input)
expected = mnp.asarray(mnp_input).asnumpy()
actual = onp.asfarray(onp_input)
expected = mnp.asfarray(mnp_input).asnumpy()
match_array(actual, expected, error=7)
@ -373,14 +373,14 @@ def test_linspace():
stop = onp.random.random([1, 5, 1]).astype("float32")
actual = onp.linspace(start, stop, num=20, retstep=True,
endpoint=False, dtype=onp.float32)
expected = mnp.linspace(mnp.asarray(start), mnp.asarray(stop), num=20,
expected = mnp.linspace(to_tensor(start), to_tensor(stop), num=20,
retstep=True, endpoint=False)
match_array(actual[0], expected[0].asnumpy(), error=6)
match_array(actual[1], expected[1].asnumpy(), error=6)
actual = onp.linspace(start, stop, num=20, retstep=True,
endpoint=False, dtype=onp.int16)
expected = mnp.linspace(mnp.asarray(start), mnp.asarray(stop), num=20,
expected = mnp.linspace(to_tensor(start), to_tensor(stop), num=20,
retstep=True, endpoint=False, dtype=mnp.int16)
match_array(actual[0], expected[0].asnumpy(), error=6)
match_array(actual[1], expected[1].asnumpy(), error=6)
@ -388,7 +388,7 @@ def test_linspace():
for axis in range(2):
actual = onp.linspace(start, stop, num=20, retstep=False,
endpoint=False, dtype=onp.float32, axis=axis)
expected = mnp.linspace(mnp.asarray(start), mnp.asarray(stop), num=20,
expected = mnp.linspace(to_tensor(start), to_tensor(stop), num=20,
retstep=False, endpoint=False, dtype=mnp.float32, axis=axis)
match_array(actual, expected.asnumpy(), error=6)
@ -510,18 +510,18 @@ def test_full_like():
for mnp_proto, onp_proto in zip(test_case.mnp_prototypes, test_case.onp_prototypes):
shape = onp.zeros_like(onp_proto).shape
fill_value = rand_int()
actual = mnp.full_like(mnp_proto, mnp.array(fill_value)).asnumpy()
actual = mnp.full_like(mnp_proto, to_tensor(fill_value)).asnumpy()
expected = onp.full_like(onp_proto, fill_value)
match_array(actual, expected)
for i in range(len(shape) - 1, 0, -1):
fill_value = rand_int(*shape[i:])
actual = mnp.full_like(mnp_proto, mnp.array(fill_value)).asnumpy()
actual = mnp.full_like(mnp_proto, to_tensor(fill_value)).asnumpy()
expected = onp.full_like(onp_proto, fill_value)
match_array(actual, expected)
fill_value = rand_int(1, *shape[i + 1:])
actual = mnp.full_like(mnp_proto, mnp.array(fill_value)).asnumpy()
actual = mnp.full_like(mnp_proto, to_tensor(fill_value)).asnumpy()
expected = onp.full_like(onp_proto, fill_value)
match_array(actual, expected)
@ -549,6 +549,21 @@ def test_tri_triu_tril():
match_array(mnp.tri(64, 64, -10).asnumpy(), onp.tri(64, 64, -10))
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_nancumsum():
x = rand_int(2, 3, 4, 5)
x[0][2][1][3] = onp.nan
x[1][0][2][4] = onp.nan
x[1][1][1][1] = onp.nan
match_res(mnp.nancumsum, onp.nancumsum, x)
match_res(mnp.nancumsum, onp.nancumsum, x, axis=-2)
match_res(mnp.nancumsum, onp.nancumsum, x, axis=0)
match_res(mnp.nancumsum, onp.nancumsum, x, axis=3)
def mnp_diagonal(arr):
return mnp.diagonal(arr, offset=2, axis1=-1, axis2=0)
@ -653,7 +668,7 @@ def test_meshgrid():
(2, 3), 9), onp.full((4, 5, 6), 7))
for i in range(len(xi)):
arrs = xi[i:]
mnp_arrs = map(mnp.asarray, arrs)
mnp_arrs = map(to_tensor, arrs)
for mnp_res, onp_res in zip(mnp_meshgrid(*mnp_arrs), onp_meshgrid(*arrs)):
match_all_arrays(mnp_res, onp_res)
@ -750,6 +765,68 @@ def test_ix_():
match_res(mnp_ix_, onp_ix_, *test_arrs)
def mnp_indices():
a = mnp.indices((2, 3))
b = mnp.indices((2, 3, 4), sparse=True)
return a, b
def onp_indices():
a = onp.indices((2, 3))
b = onp.indices((2, 3, 4), sparse=True)
return a, b
def test_indices():
run_multi_test(mnp_indices, onp_indices, ())
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_geomspace():
start = onp.arange(1, 7).reshape(2, 3)
end = [1000, 2000, 3000]
match_array(mnp.geomspace(1, 256, num=9).asnumpy(),
onp.geomspace(1, 256, num=9), error=1)
match_array(mnp.geomspace(1, 256, num=8, endpoint=False).asnumpy(),
onp.geomspace(1, 256, num=8, endpoint=False), error=1)
match_array(mnp.geomspace(to_tensor(start), end, num=4).asnumpy(),
onp.geomspace(start, end, num=4), error=1)
match_array(mnp.geomspace(to_tensor(start), end, num=4, endpoint=False).asnumpy(),
onp.geomspace(start, end, num=4, endpoint=False), error=1)
match_array(mnp.geomspace(to_tensor(start), end, num=4, axis=-1).asnumpy(),
onp.geomspace(start, end, num=4, axis=-1), error=1)
match_array(mnp.geomspace(to_tensor(start), end, num=4, endpoint=False, axis=-1).asnumpy(),
onp.geomspace(start, end, num=4, endpoint=False, axis=-1), error=1)
start = onp.arange(1, 1 + 2*3*4*5).reshape(2, 3, 4, 5)
end = [1000, 2000, 3000, 4000, 5000]
for i in range(-5, 5):
match_array(mnp.geomspace(to_tensor(start), end, num=4, axis=i).asnumpy(),
onp.geomspace(start, end, num=4, axis=i), error=1)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_vander():
arrs = [rand_int(i + 3) for i in range(3)]
for i in range(3):
mnp_vander = mnp.vander(to_tensor(arrs[i]))
onp_vander = onp.vander(arrs[i])
match_all_arrays(mnp_vander, onp_vander)
mnp_vander = mnp.vander(to_tensor(arrs[i]), N=2, increasing=True)
onp_vander = onp.vander(arrs[i], N=2, increasing=True)
match_all_arrays(mnp_vander, onp_vander)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training

File diff suppressed because it is too large Load Diff

@ -19,7 +19,8 @@ import numpy as onp
import mindspore.numpy as mnp
from .utils import rand_int, run_binop_test, match_res
from .utils import rand_int, rand_bool, run_binop_test, run_logical_test, match_res, \
match_all_arrays, to_tensor
class Cases():
@ -55,6 +56,15 @@ class Cases():
rand_int(8, 1, 6, 1)
]
# Boolean arrays
self.boolean_arrs = [
rand_bool(),
rand_bool(5),
rand_bool(6, 1),
rand_bool(7, 1, 5),
rand_bool(8, 1, 6, 1)
]
# array which contains infs and nans
self.infs = onp.array([[1.0, onp.nan], [onp.inf, onp.NINF], [2.3, -4.5], [onp.nan, 0.0]])
@ -246,10 +256,147 @@ def test_isneginf():
match_res(mnp_isneginf, onp_isneginf, test_case.infs)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_isscalar():
assert mnp.isscalar(1) == onp.isscalar(1)
assert mnp.isscalar(2.3) == onp.isscalar(2.3)
assert mnp.isscalar([4.5]) == onp.isscalar([4.5])
assert mnp.isscalar(False) == onp.isscalar(False)
assert mnp.isscalar(mnp.array(True)) == onp.isscalar(onp.array(True))
assert mnp.isscalar(to_tensor(True)) == onp.isscalar(onp.array(True))
assert mnp.isscalar('numpy') == onp.isscalar('numpy')
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_isclose():
a = [0, 1, 2, float('inf'), float('inf'), float('nan')]
b = [0, 1, -2, float('-inf'), float('inf'), float('nan')]
match_all_arrays(mnp.isclose(a, b), onp.isclose(a, b))
match_all_arrays(mnp.isclose(a, b, equal_nan=True), onp.isclose(a, b, equal_nan=True))
a = rand_int(2, 3, 4, 5)
diff = (onp.random.random((2, 3, 4, 5)).astype("float32") - 0.5) / 1000
b = a + diff
match_all_arrays(mnp.isclose(to_tensor(a), to_tensor(b), atol=1e-3), onp.isclose(a, b, atol=1e-3))
match_all_arrays(mnp.isclose(to_tensor(a), to_tensor(b), atol=1e-3, rtol=1e-4),
onp.isclose(a, b, atol=1e-3, rtol=1e-4))
match_all_arrays(mnp.isclose(to_tensor(a), to_tensor(b), atol=1e-2, rtol=1e-6),
onp.isclose(a, b, atol=1e-2, rtol=1e-6))
a = rand_int(2, 3, 4, 5)
b = rand_int(4, 5)
match_all_arrays(mnp.isclose(to_tensor(a), to_tensor(b)), onp.isclose(a, b))
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_in1d():
xi = [rand_int(), rand_int(1), rand_int(10)]
yi = [rand_int(), rand_int(1), rand_int(10)]
for x in xi:
for y in yi:
match_res(mnp.in1d, onp.in1d, x, y)
match_res(mnp.in1d, onp.in1d, x, y, invert=True)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_isin():
xi = [rand_int(), rand_int(1), rand_int(10), rand_int(2, 3)]
yi = [rand_int(), rand_int(1), rand_int(10), rand_int(2, 3)]
for x in xi:
for y in yi:
match_res(mnp.in1d, onp.in1d, x, y)
match_res(mnp.in1d, onp.in1d, x, y, invert=True)
def mnp_logical_or(x1, x2):
return mnp.logical_or(x1, x2)
def onp_logical_or(x1, x2):
return onp.logical_or(x1, x2)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_logical_or():
run_logical_test(mnp_logical_or, onp_logical_or, test_case)
def mnp_logical_xor(x1, x2):
return mnp.logical_xor(x1, x2)
def onp_logical_xor(x1, x2):
return onp.logical_xor(x1, x2)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_logical_xor():
run_logical_test(mnp_logical_xor, onp_logical_xor, test_case)
def mnp_logical_and(x1, x2):
return mnp.logical_and(x1, x2)
def onp_logical_and(x1, x2):
return onp.logical_and(x1, x2)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_logical_and():
run_logical_test(mnp_logical_and, onp_logical_and, test_case)
def mnp_logical_not(x):
return mnp.logical_not(x)
def onp_logical_not(x):
return onp.logical_not(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_logical_not():
for arr in test_case.boolean_arrs:
expected = onp_logical_not(arr)
actual = mnp_logical_not(to_tensor(arr))
onp.testing.assert_equal(actual.asnumpy().tolist(), expected.tolist())

File diff suppressed because it is too large Load Diff

@ -15,6 +15,7 @@
"""utility functions for mindspore.numpy st tests"""
import functools
import numpy as onp
from mindspore import Tensor
import mindspore.numpy as mnp
@ -90,7 +91,9 @@ def rand_bool(*shape):
def match_res(mnp_fn, onp_fn, *arrs, **kwargs):
"""Checks results from applying mnp_fn and onp_fn on arrs respectively"""
mnp_arrs = map(functools.partial(mnp.asarray, dtype='float32'), arrs)
dtype = kwargs.get('dtype', mnp.float32)
kwargs.pop('dtype', None)
mnp_arrs = map(functools.partial(Tensor, dtype=dtype), arrs)
error = kwargs.get('error', 0)
kwargs.pop('error', None)
mnp_res = mnp_fn(*mnp_arrs, **kwargs)
@ -151,15 +154,32 @@ def run_unary_test(mnp_fn, onp_fn, test_case, error=0):
def run_multi_test(mnp_fn, onp_fn, arrs, error=0):
mnp_arrs = map(mnp.asarray, arrs)
mnp_arrs = map(Tensor, arrs)
for actual, expected in zip(mnp_fn(*mnp_arrs), onp_fn(*arrs)):
match_array(actual.asnumpy(), expected, error)
match_all_arrays(actual, expected, error)
def run_single_test(mnp_fn, onp_fn, arr, error=0):
mnp_arr = mnp.asarray(arr)
mnp_arr = Tensor(arr)
for actual, expected in zip(mnp_fn(mnp_arr), onp_fn(arr)):
if isinstance(expected, tuple):
for actual_arr, expected_arr in zip(actual, expected):
match_array(actual_arr.asnumpy(), expected_arr, error)
match_array(actual.asnumpy(), expected, error)
def run_logical_test(mnp_fn, onp_fn, test_case):
for x1 in test_case.boolean_arrs:
for x2 in test_case.boolean_arrs:
match_res(mnp_fn, onp_fn, x1, x2, dtype=mnp.bool_)
def to_tensor(obj, dtype=None):
if dtype is None:
res = Tensor(obj)
if res.dtype == mnp.float64:
res = res.astype(mnp.float32)
if res.dtype == mnp.int64:
res = res.astype(mnp.int32)
else:
res = Tensor(obj, dtype)
return res

Loading…
Cancel
Save