fix mindpsore.numpy packaging issue and format API comments

pull/9961/head
yanglf1121 4 years ago
parent 280db3d651
commit 9072283395

@ -281,6 +281,7 @@ install(
${CMAKE_SOURCE_DIR}/mindspore/_extends
${CMAKE_SOURCE_DIR}/mindspore/parallel
${CMAKE_SOURCE_DIR}/mindspore/mindrecord
${CMAKE_SOURCE_DIR}/mindspore/numpy
${CMAKE_SOURCE_DIR}/mindspore/train
${CMAKE_SOURCE_DIR}/mindspore/common
${CMAKE_SOURCE_DIR}/mindspore/ops

@ -42,3 +42,5 @@ array_ops_module = ['array', 'asarray', 'asfarray', 'copy', 'ones', 'zeros', 'ar
math_module = ['mean', 'inner']
__all__ = array_ops_module + math_module + numeric_types
__all__.sort()

File diff suppressed because it is too large Load Diff

@ -14,7 +14,7 @@
# ============================================================================
"""Dtypes and utilities"""
from mindspore import (int8, int16, int32, int64, uint8, uint16, uint32, uint64, \
from ..common.dtype import (int8, int16, int32, int64, uint8, uint16, uint32, uint64, \
float16, float32, float64, bool_)
# original numpy has int->int64, float->float64, uint->uint64 mapping. we map

@ -13,8 +13,8 @@
# limitations under the License.
# ============================================================================
"""math operations, the function docs are adapted from Numpy API."""
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from ..ops import operations as P
from ..ops import functional as F
from .array_ops import squeeze
from .utils import _infer_out_shape, _is_scalar, _check_axis_valid, _get_device_compile, \
_check_shape_aligned
@ -22,7 +22,7 @@ from .utils import _infer_out_shape, _is_scalar, _check_axis_valid, _get_device_
def mean(a, axis=None, keepdims=False):
"""
Compute the arithmetic mean along the specified axis.
Computes the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken
over the flattened array by default, otherwise over the specified
@ -30,8 +30,8 @@ def mean(a, axis=None, keepdims=False):
Note:
Numpy arguments dtype and out are not supported.
On GPU, the supported dtypes are np.float16, and np.float32.
On CPU, the supported dtypes are np.float16, and np.float32.
On GPU, the supported dtypes are mstype.float16, and mstype.float32.
On CPU, the supported dtypes are mstype.float16, and mstype.float32.
Args:
a (Tensor): input tensor containing numbers whose mean is desired.
@ -56,6 +56,7 @@ def mean(a, axis=None, keepdims=False):
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> a = np.arange(6, dtype='float32')
>>> output = np.mean(a, 0)
>>> print(output)
@ -83,8 +84,8 @@ def inner(a, b):
Note:
Numpy argument out is not supported.
On GPU, the supported dtypes are np.float16, and np.float32.
On CPU, the supported dtype is np.float32.
On GPU, the supported dtypes are mstype.float16, and mstype.float32.
On CPU, the supported dtype is mstype.float32.
Args:
a (Tensor): input tensor. If a and b are nonscalar, their last
@ -103,6 +104,7 @@ def inner(a, b):
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> a = np.ones((5, 3))
>>> b = np.ones((2, 7, 3))
>>> output = np.inner(a, b)

@ -17,13 +17,12 @@ from functools import partial
import numpy as onp
import mindspore
import mindspore.context as context
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops.primitive import constexpr
from mindspore.common import dtype as mstype
from ..common import Tensor
from ..ops import operations as P
from ..ops import functional as F
from ..ops.primitive import constexpr
from ..common import dtype as mstype
from .dtypes import dtype_tuple, all_types, dtype_map
@ -119,17 +118,17 @@ def _check_shape(shape):
def _check_dtype(dtype):
"""check the input dtype and make conversions"""
# convert the string dtype to mindspore.dtype
# convert the string dtype to mstype.dtype
if isinstance(dtype, str):
dtype = dtype.lower()
dtype = dtype_map[dtype]
elif isinstance(dtype, type):
if dtype is int:
dtype = mindspore.int32
dtype = mstype.int32
if dtype is float:
dtype = mindspore.float32
dtype = mstype.float32
if dtype is bool:
dtype = mindspore.bool_
dtype = mstype.bool_
if dtype not in dtype_tuple:
raise TypeError(
f"only {all_types} are allowed for dtype, but got {type(dtype)}")

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""unit tests for array operations"""
"""unit tests for numpy array operations"""
import functools

@ -1,8 +1,23 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""unit tests for numpy math operations"""
import pytest
import numpy as onp
import mindspore.context as context
import mindspore.numpy as mnp
@ -16,7 +31,6 @@ def rand_int(*shape):
class Cases():
def __init__(self):
self.device_cpu = context.get_context('device_target') == 'CPU'
self.arrs = [
rand_int(2),

Loading…
Cancel
Save