pull/9800/head
lihongkang 4 years ago
parent 2ba79a32a2
commit def7fca51f

@ -129,12 +129,10 @@ class Dropout(Cell):
>>> x = Tensor(np.ones([2, 2, 3]), mindspore.float32)
>>> net = nn.Dropout(keep_prob=0.8)
>>> net.set_train()
Dropout<keep_prob=0.8, dtype=Float32>
>>> output = net(x)
>>> print(output)
[[[0. 1.25 0. ]
[1.25 1.25 1.25]]
[[1.25 1.25 1.25]
[1.25 1.25 1.25]]]
>>> print(output.shape)
(2, 2, 3)
"""
def __init__(self, keep_prob=0.5, dtype=mstype.float32):
@ -257,12 +255,12 @@ class Dense(Cell):
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32)
>>> input = Tensor(np.array([[180, 234, 154], [244, 48, 247]]), mindspore.float32)
>>> net = nn.Dense(3, 4)
>>> output = net(input)
>>> print(output)
[[ 2.5246444 2.2738023 0.5711005 -3.9399147 ]
[ 1.0739875 4.0155234 0.94188046 -5.459526 ]]
[[ 1.1199665 1.6730378 -1.383349 -1.5148697 ]
[ 3.0728707 0.0124917 -1.4012015 0.04354739 ]]
"""
@cell_attr_register(attrs=['has_bias', 'activation', 'in_channels', 'out_channels'])
@ -456,10 +454,10 @@ class Norm(Cell):
Examples:
>>> net = nn.Norm(axis=0)
>>> input = Tensor(np.random.randint(0, 10, [2, 4]), mindspore.float32)
>>> input = Tensor(np.array([[4, 4, 9, 1], [2, 1, 3, 6]]), mindspore.float32)
>>> output = net(input)
>>> print(output)
[7.81025 6.708204 0. 8.602325]
[4.472136 4.1231055 9.486833 6.0827627]
"""
def __init__(self, axis=(), keep_dims=False):
@ -584,14 +582,14 @@ class Pad(Cell):
... self.pad = nn.Pad(paddings=((1, 1), (2, 2)), mode="CONSTANT")
... def construct(self, x):
... return self.pad(x)
>>> x = np.random.random(size=(2, 3)).astype(np.float32)
>>> x = np.array([[0.3, 0.5, 0.2], [0.5, 0.7, 0.3]], dtype=np.float32)
>>> pad = Net()
>>> output = pad(Tensor(x))
>>> print(output)
[[0. 0. 0. 0. 0. 0. ]
[0. 0. 0.82691735 0.36147234 0.70918983 0. ]
[0. 0. 0.7842975 0.44726616 0.4353459 0. ]
[0. 0. 0. 0. 0. 0. ]]
[[0. 0. 0. 0. 0. 0. 0. ]
[0. 0. 0.3 0.5 0.2 0. 0. ]
[0. 0. 0.5 0.7 0.3 0. 0. ]
[0. 0. 0. 0. 0. 0. 0. ]]
"""
def __init__(self, paddings, mode="CONSTANT"):
@ -694,8 +692,8 @@ class Unfold(Cell):
must be a tuple or list of int, and the format is [1, stride_row, stride_col, 1].
rates (Union[tuple[int], list[int]]): In each extracted patch, the gap between the corresponding dimension
pixel positions, must be a tuple or a list of integers, and the format is [1, rate_row, rate_col, 1].
padding (str): The type of padding algorithm, is a string whose value is "same" or "valid",
not case sensitive. Default: "valid".
padding (str): The type of padding algorithm, is a string whose value is "same" or "valid", not case sensitive.
Default: "valid".
- same: Means that the patch can take the part beyond the original image, and this part is filled with 0.

@ -131,9 +131,9 @@ class Conv2d(_Conv):
If the 'pad_mode' is set to be "valid", the output height and width will be
:math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
@ -322,7 +322,7 @@ class Conv1d(_Conv):
If the 'pad_mode' is set to be "valid", the output width will be
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction of convolution layer can be found in paper `Gradient Based Learning Applied to Document
Recognition <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.

@ -104,11 +104,13 @@ class LSTM(Cell):
``Ascend`` ``GPU``
Examples:
>>> net = nn.LSTM(10, 12, 2, has_bias=True, batch_first=True, bidirectional=False)
>>> net = nn.LSTM(10, 16, 2, has_bias=True, batch_first=True, bidirectional=False)
>>> input = Tensor(np.ones([3, 5, 10]).astype(np.float32))
>>> h0 = Tensor(np.ones([1 * 2, 3, 12]).astype(np.float32))
>>> c0 = Tensor(np.ones([1 * 2, 3, 12]).astype(np.float32))
>>> h0 = Tensor(np.ones([1 * 2, 3, 16]).astype(np.float32))
>>> c0 = Tensor(np.ones([1 * 2, 3, 16]).astype(np.float32))
>>> output, (hn, cn) = net(input, (h0, c0))
>>> print(output.shape)
(3, 5, 16)
"""
def __init__(self,

@ -67,6 +67,9 @@ def repeat_elements(x, rep, axis=0):
One tensor with values repeated along the specified axis. If x has shape
(s1, s2, ..., sn) and axis is i, the output will have shape (s1, s2, ..., si * rep, ..., sn)
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
>>> output = C.repeat_elements(x, rep = 2, axis = 0)

@ -120,6 +120,9 @@ def clip_by_global_norm(x, clip_norm=1.0, use_norm=None):
Returns:
tuple[Tensor], a clipped Tensor.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> x1 = np.array([[2., 3.],[1., 2.]]).astype(np.float32)
>>> x2 = np.array([[1., 4.],[3., 1.]]).astype(np.float32)

@ -53,6 +53,9 @@ def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
Returns:
Tensor, number of nonzero element. The data type is dtype.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> input_x = Tensor(np.array([[0, 1, 0], [1, 1, 0]]).astype(np.float32))
>>> nonzero_num = count_nonzero(x=input_x, axis=[0, 1], keep_dims=True, dtype=mstype.int32)
@ -181,6 +184,9 @@ def tensor_dot(x1, x2, axes):
Tensor, the shape of the output tensor is :math:`(N + M)`. Where :math:`N` and :math:`M` are the free axes not
contracted in both inputs
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> input_x1 = Tensor(np.ones(shape=[1, 2, 3]), mindspore.float32)
>>> input_x2 = Tensor(np.ones(shape=[3, 1, 2]), mindspore.float32)

@ -45,6 +45,9 @@ def normal(shape, mean, stddev, seed=None):
of `mean` and `stddev`.
The dtype is float32.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> shape = (3, 1, 2)
>>> mean = Tensor(np.array([[3, 4], [5, 6]]), mstype.float32)
@ -85,6 +88,9 @@ def laplace(shape, mean, lambda_param, seed=None):
Tensor. The shape should be the broadcasted shape of Input "shape" and shapes of mean and lambda_param.
The dtype is float32.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> shape = (4, 16)
>>> mean = Tensor(1.0, mstype.float32)
@ -127,6 +133,9 @@ def uniform(shape, minval, maxval, seed=None, dtype=mstype.float32):
of `minval` and `maxval`.
The dtype is designated as the input `dtype`.
Supported Platforms:
``Ascend`` ``GPU``
Examples:
>>> # For discrete uniform distribution, only one number is allowed for both minval and maxval:
>>> shape = (4, 2)
@ -174,6 +183,9 @@ def gamma(shape, alpha, beta, seed=None):
of `alpha` and `beta`.
The dtype is float32.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> shape = (3, 1, 2)
>>> alpha = Tensor(np.array([[3, 4], [5, 6]]), mstype.float32)
@ -202,6 +214,9 @@ def poisson(shape, mean, seed=None):
Tensor. The shape should be equal to the broadcasted shape between the input "shape" and shapes of `mean`.
The dtype is float32.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> shape = (4, 1)
>>> mean = Tensor(np.array([5.0, 10.0]), mstype.float32)
@ -236,6 +251,9 @@ def multinomial(inputs, num_sample, replacement=True, seed=None):
Tensor, has the same rows with input. The number of sampled indices of each row is `num_samples`.
The dtype is float32.
Supported Platforms:
``GPU``
Examples:
>>> input = Tensor([0, 9, 4, 0], mstype.float32)
>>> output = C.multinomial(input, 2, True)

@ -749,14 +749,14 @@ class Unique(Primitive):
>>>
>>> # note that for GPU, this operator must be wrapped inside a model, and executed in graph mode.
>>> class UniqueNet(nn.Cell):
>>> def __init__(self):
>>> super(UniqueNet, self).__init__()
>>> self.unique_op = P.Unique()
>>>
>>> def construct(self, x):
>>> output, indices = self.unique_op(x)
>>> return output, indices
>>>
... def __init__(self):
... super(UniqueNet, self).__init__()
... self.unique_op = P.Unique()
...
... def construct(self, x):
... output, indices = self.unique_op(x)
... return output, indices
...
>>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
>>> context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
>>> net = UniqueNet()
@ -1644,10 +1644,10 @@ class ArgMaxWithValue(PrimitiveWithInfer):
``Ascend`` ``GPU``
Examples:
>>> input_x = Tensor(np.random.rand(5), mindspore.float32)
>>> input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
>>> index, output = ops.ArgMaxWithValue()(input_x)
>>> print(index, output)
2 0.87173676
3 0.7
"""
@prim_attr_register
@ -1701,10 +1701,10 @@ class ArgMinWithValue(PrimitiveWithInfer):
``Ascend``
Examples:
>>> input_x = Tensor(np.random.rand(5), mindspore.float32)
>>> input_x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
>>> output = ops.ArgMinWithValue()(input_x)
>>> print(output)
(Tensor(shape=[], dtype=Int32, value= 2), Tensor(shape=[], dtype=Float32, value= 0.0595638))
(Tensor(shape=[], dtype=Int32, value= 0), Tensor(shape=[], dtype=Float32, value= 0.0))
"""
@prim_attr_register
@ -3629,7 +3629,7 @@ class SpaceToDepth(PrimitiveWithInfer):
>>> block_size = 2
>>> space_to_depth = ops.SpaceToDepth(block_size)
>>> output = space_to_depth(x)
>>> print(output)
>>> print(output.shape)
(1, 12, 1, 1)
"""

@ -5697,8 +5697,8 @@ class LARSUpdate(PrimitiveWithInfer):
... self.lars = ops.LARSUpdate()
... self.reduce = ops.ReduceSum()
... def construct(self, weight, gradient):
... w_square_sum = self.reduce(ops.square(weight))
... grad_square_sum = self.reduce(ops.square(gradient))
... w_square_sum = self.reduce(ops.Square(weight))
... grad_square_sum = self.reduce(ops.Square(gradient))
... grad_t = self.lars(weight, gradient, w_square_sum, grad_square_sum, 0.0, 1.0)
... return grad_t
...

Loading…
Cancel
Save