pull/8311/head
lihongkang 4 years ago
parent dbcdda18ed
commit be6786e177

@ -597,8 +597,7 @@ class GroupNorm(Cell):
[[[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]]
[0. 0. 0. 0.]],
[[0. 0. 0. 0.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]

@ -168,9 +168,8 @@ class MaxPool1d(_PoolNd):
pad_mode (str): The optional value for pad mode, is "same" or "valid", not case sensitive.
Default: "valid".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible.
- same: Adopts the way of completion. The total number of padding will be calculated in horizontal
and vertical directions and evenly distributed to top and bottom, left and right if possible.
Otherwise, the last extra padding will be done from the bottom and the right side.
- valid: Adopts the way of discarding. The possible largest height and width of output

@ -199,7 +199,7 @@ class InverseDecayLR(LearningRateSchedule):
>>> learning_rate = 0.1
>>> decay_rate = 0.9
>>> decay_steps = 4
>>> global_step = Tenosr(2, mstype.int32)
>>> global_step = Tensor(2, mstype.int32)
>>> inverse_decay_lr = InverseDecayLR(learning_rate, decay_rate, decay_steps, True)
>>> inverse_decay_lr(global_step)
"""

@ -35,6 +35,10 @@ fused_mul_add_n_op_info = TBERegOp("FusedMulAddN") \
.dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_Default, DataType.F32_C1HWNCoC0) \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_Default, DataType.F32_FracZ) \
.dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_Default, DataType.I32_5HD) \
.dtype_format(DataType.I32_C1HWNCoC0, DataType.I32_C1HWNCoC0, DataType.I32_Default, DataType.I32_C1HWNCoC0) \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I32_FracZ, DataType.I32_FracZ, DataType.I32_Default, DataType.I32_FracZ) \
.get_op_info()

@ -1082,6 +1082,7 @@ class TupleToArray(PrimitiveWithInfer):
Examples:
>>> type = P.TupleToArray()((1,2,3))
[1 2 3]
"""
@prim_attr_register
@ -1411,7 +1412,7 @@ class ArgMinWithValue(PrimitiveWithInfer):
Outputs:
tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input
tensor.
- index (Tensor) - The index for the maximum value of the input tensor. If `keep_dims` is true, the shape of
- index (Tensor) - The index for the minimum value of the input tensor. If `keep_dims` is true, the shape of
output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is
:math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`.
- output_x (Tensor) - The minimum value of input tensor, with the same shape as index.
@ -3980,8 +3981,8 @@ class Sort(PrimitiveWithInfer):
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
>>> sort = P.Sort()
>>> sort(x)
>>> ([[1.0, 2.0, 8.0], [3.0, 5.0, 9.0], [4.0, 6.0 ,7.0]],
[[2, 1, 0], [2, 0, 1], [0, 1, 2]])
([[1.0, 2.0, 8.0], [3.0, 5.0, 9.0], [4.0, 6.0 ,7.0]],
[[2, 1, 0], [2, 0, 1], [0, 1, 2]])
"""
@prim_attr_register

@ -247,12 +247,13 @@ class ReduceScatter(PrimitiveWithInfer):
>>> from mindspore.ops.operations.comm_ops import ReduceOp
>>> import mindspore.nn as nn
>>> import mindspore.ops.operations as P
>>> import numpy as np
>>>
>>> init()
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.reducescatter = P.ReduceScatter(ReduceOp.SUM, group="nccl_world_group")
>>> self.reducescatter = P.ReduceScatter(ReduceOp.SUM)
>>>
>>> def construct(self, x):
>>> return self.reducescatter(x)

@ -3304,6 +3304,7 @@ class Tan(PrimitiveWithInfer):
>>> tan = P.Tan()
>>> input_x = Tensor(np.array([-1.0, 0.0, 1.0]), mindspore.float32)
>>> output = tan(input_x)
[-1.5574081 0. 1.5574081]
"""
@prim_attr_register

@ -4554,8 +4554,8 @@ class SparseApplyProximalAdagrad(PrimitiveWithCheck):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad()
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.var = Parameter(Tensor(np.random.rand(1, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(1, 3).astype(np.float32)), name="accum")
>>> self.lr = 0.01
>>> self.l1 = 0.0
>>> self.l2 = 0.0
@ -4564,9 +4564,11 @@ class SparseApplyProximalAdagrad(PrimitiveWithCheck):
self.l2, grad, indices)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones((3,), np.int32))
>>> grad = Tensor(np.random.rand(1, 3).astype(np.float32))
>>> indices = Tensor(np.ones((1,), np.int32))
>>> output = net(grad, indices)
([[6.94971561e-01, 5.24479389e-01, 5.52502394e-01]],
[[1.69961065e-01, 9.21632349e-01, 7.83344746e-01]])
"""
__mindspore_signature__ = (
@ -5267,18 +5269,21 @@ class SparseApplyFtrlV2(PrimitiveWithInfer):
>>> super(SparseApplyFtrlV2Net, self).__init__()
>>> self.sparse_apply_ftrl_v2 = P.SparseApplyFtrlV2(lr=0.01, l1=0.0, l2=0.0,
l2_shrinkage=0.0, lr_power=-0.5)
>>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name="linear")
>>> self.var = Parameter(Tensor(np.random.rand(1, 3).astype(np.float32)), name="var")
>>> self.accum = Parameter(Tensor(np.random.rand(1, 3).astype(np.float32)), name="accum")
>>> self.linear = Parameter(Tensor(np.random.rand(1, 3).astype(np.float32)), name="linear")
>>>
>>> def construct(self, grad, indices):
>>> out = self.sparse_apply_ftrl_v2(self.var, self.accum, self.linear, grad, indices)
>>> return out
>>>
>>> net = SparseApplyFtrlV2Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
>>> indices = Tensor(np.ones([3]), mindspore.int32)
>>> grad = Tensor(np.random.rand(1, 3).astype(np.float32))
>>> indices = Tensor(np.ones([1]), mindspore.int32)
>>> output = net(grad, indices)
([[3.98493223e-02, 4.38684933e-02, 8.25387388e-02]],
[[6.40987396e-01, 7.19417334e-01, 1.52606890e-01]],
[[7.43463933e-01, 2.92334408e-01, 6.81572020e-01]])
"""
__mindspore_signature__ = (

@ -38,6 +38,8 @@ class StandardNormal(PrimitiveWithInfer):
>>> shape = (4, 16)
>>> stdnormal = P.StandardNormal(seed=2)
>>> output = stdnormal(shape)
>>> output.shape
(4, 16)
"""
@prim_attr_register
@ -83,6 +85,8 @@ class StandardLaplace(PrimitiveWithInfer):
>>> shape = (4, 16)
>>> stdlaplace = P.StandardLaplace(seed=2)
>>> output = stdlaplace(shape)
>>> output.shape
(4, 16)
"""
@prim_attr_register
@ -238,11 +242,13 @@ class UniformInt(PrimitiveWithInfer):
Tensor. The shape is the same as the input 'shape', and the data type is int32.
Examples:
>>> shape = (4, 16)
>>> shape = (2, 4)
>>> minval = Tensor(1, mstype.int32)
>>> maxval = Tensor(5, mstype.int32)
>>> uniform_int = P.UniformInt(seed=10)
>>> output = uniform_int(shape, minval, maxval)
[[4 2 1 3]
[4 3 4 5]]
"""
@prim_attr_register
@ -287,9 +293,11 @@ class UniformReal(PrimitiveWithInfer):
Tensor. The shape that the input 'shape' denotes. The dtype is float32.
Examples:
>>> shape = (4, 16)
>>> shape = (2, 2)
>>> uniformreal = P.UniformReal(seed=2)
>>> output = uniformreal(shape)
[[0.4359949 0.18508208]
[0.02592623 0.93154085]]
"""
@prim_attr_register

Loading…
Cancel
Save