pull/8393/head
lihongkang 4 years ago
parent 4e07f43dff
commit 6731c8d1f2

@ -1344,7 +1344,8 @@ class Argmin(PrimitiveWithInfer):
Examples:
>>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)
>>> index = P.Argmin()(input_x)
>>> assert index == Tensor(2, mindspore.int64)
>>> print(index)
2
"""
@prim_attr_register

@ -363,6 +363,7 @@ class Broadcast(PrimitiveWithInfer):
>>> from mindspore.communication import init
>>> import mindspore.nn as nn
>>> import mindspore.ops.operations as P
>>> import numpy as np
>>>
>>> init()
>>> class Net(nn.Cell):

@ -358,12 +358,12 @@ class Assert(PrimitiveWithInfer):
>>> class AssertDemo(nn.Cell):
>>> def __init__(self):
>>> super(AssertDemo, self).__init__()
>>> self.assert = P.Assert(summarize=10)
>>> self.assert1 = P.Assert(summarize=10)
>>> self.add = P.TensorAdd()
>>>
>>> def construct(self, x, y):
>>> data = self.add(x, y)
>>> self.assert(True, [data])
>>> self.assert1(True, [data])
>>> return data
"""

@ -182,7 +182,9 @@ class AssignAdd(PrimitiveWithInfer):
>>>
>>> net = Net()
>>> value = Tensor(np.ones([1]).astype(np.int64)*100)
>>> net(value)
>>> output = net(value)
>>> print(output)
Parameter (name=global_step)
"""
__mindspore_signature__ = (
sig.make_sig('x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
@ -232,7 +234,9 @@ class AssignSub(PrimitiveWithInfer):
>>>
>>> net = Net()
>>> value = Tensor(np.ones([1]).astype(np.int32)*100)
>>> net(value)
>>> output = net(value)
>>> print(output)
Parameter (name=global_step)
"""
__mindspore_signature__ = (
@ -3347,8 +3351,9 @@ class Atan(PrimitiveWithInfer):
>>> tan = P.Tan()
>>> output_y = tan(input_x)
>>> atan = P.Atan()
>>> atan(output_y)
[[1.047, 07850001]]
>>> output = atan(output_y)
>>> print(output)
[[1.047, 0.7850001]]
"""
@prim_attr_register
@ -3473,7 +3478,7 @@ class BitwiseAnd(_BitwiseBinaryOp):
- **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
Outputs:
- **y** (Tensor) - The same type as the `input_x1`.
Tensor, has the same type as the `input_x1`.
Examples:
>>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16)
@ -3499,7 +3504,7 @@ class BitwiseOr(_BitwiseBinaryOp):
- **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
Outputs:
- **y** (Tensor) - The same type as the `input_x1`.
Tensor, has the same type as the `input_x1`.
Examples:
>>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16)
@ -3525,7 +3530,7 @@ class BitwiseXor(_BitwiseBinaryOp):
- **input_x2** (Tensor) - The input tensor with same type as the `input_x1`.
Outputs:
- **y** (Tensor) - The same type as the `input_x1`.
Tensor, has the same type as the `input_x1`.
Examples:
>>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16)
@ -3541,10 +3546,11 @@ class BesselI0e(PrimitiveWithInfer):
Computes BesselI0e of input element-wise.
Inputs:
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. Data type must be float16 or
float32.
Outputs:
Tensor, has the same shape as `input_x`. Data type must be float16 or float32.
Tensor, has the same shape as `input_x`.
Examples:
>>> bessel_i0e = P.BesselI0e()
@ -3570,10 +3576,11 @@ class BesselI1e(PrimitiveWithInfer):
Computes BesselI1e of input element-wise.
Inputs:
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. Data type must be float16 or
float32.
Outputs:
Tensor, has the same shape as `input_x`. Data type must be float16 or float32.
Tensor, has the same shape as `input_x`.
Examples:
>>> bessel_i1e = P.BesselI1e()

@ -789,9 +789,12 @@ class BNTrainingReduce(PrimitiveWithInfer):
- **square_sum** (Tensor) - A 1-D Tensor with float32 data type. Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> input_x = Tensor(np.ones([128, 3, 32, 3]), mindspore.float32)
>>> bn_training_reduce = P.BNTrainingReduce()
>>> output = bn_training_reduce(input_x)
>>> print(output)
([1.22880000e+04, 1.22880000e+04, 1.22880000e+04],
[1.22880000e+04, 1.22880000e+04, 1.22880000e+04])
"""
@prim_attr_register
@ -843,15 +846,30 @@ class BNTrainingUpdate(PrimitiveWithInfer):
Has the same shape as `variance`.
Examples:
>>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)
>>> sum = Tensor(np.ones([64]), mindspore.float32)
>>> square_sum = Tensor(np.ones([64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> offset = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> input_x = Tensor(np.ones([1, 2, 2, 2]), mindspore.float32)
>>> sum = Tensor(np.ones([2]), mindspore.float32)
>>> square_sum = Tensor(np.ones([2]), mindspore.float32)
>>> scale = Tensor(np.ones([2]), mindspore.float32)
>>> offset = Tensor(np.ones([2]), mindspore.float32)
>>> mean = Tensor(np.ones([2]), mindspore.float32)
>>> variance = Tensor(np.ones([2]), mindspore.float32)
>>> bn_training_update = P.BNTrainingUpdate()
>>> output = bn_training_update(input_x, sum, square_sum, scale, offset, mean, variance)
>>> print(output)
([[[[2.73200464e+00, 2.73200464e+00],
[2.73200464e+00, 2.73200464e+00]],
[[2.73200464e+00, 2.73200464e+00],
[2.73200464e+00, 2.73200464e+00]]]],
[[[[2.73200464e+00, 2.73200464e+00],
[2.73200464e+00, 2.73200464e+00]],
[[2.73200464e+00, 2.73200464e+00],
[2.73200464e+00, 2.73200464e+00]]]],
[[[[2.73200464e+00, 2.73200464e+00],
[2.73200464e+00, 2.73200464e+00]],
[[2.73200464e+00, 2.73200464e+00],
[2.73200464e+00, 2.73200464e+00]]]],
[2.50000000e-01, 2.50000000e-01],
[1.87500000e-01, 1.87500000e-01])
"""
@prim_attr_register
@ -928,13 +946,20 @@ class BatchNorm(PrimitiveWithInfer):
- **reserve_space_2** (Tensor) - Tensor of shape :math:`(C,)`.
Examples:
>>> input_x = Tensor(np.ones([32, 64]), mindspore.float32)
>>> scale = Tensor(np.ones([64]), mindspore.float32)
>>> bias = Tensor(np.ones([64]), mindspore.float32)
>>> mean = Tensor(np.ones([64]), mindspore.float32)
>>> variance = Tensor(np.ones([64]), mindspore.float32)
>>> input_x = Tensor(np.ones([2, 2]), mindspore.float32)
>>> scale = Tensor(np.ones([2]), mindspore.float32)
>>> bias = Tensor(np.ones([2]), mindspore.float32)
>>> mean = Tensor(np.ones([2]), mindspore.float32)
>>> variance = Tensor(np.ones([2]), mindspore.float32)
>>> batch_norm = P.BatchNorm()
>>> output = batch_norm(input_x, scale, bias, mean, variance)
>>> print(output)
([[1.0, 1.0],
[1.0, 1.0]],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0],
[1.0, 1.0])
"""
@prim_attr_register
@ -1704,7 +1729,10 @@ class BiasAdd(PrimitiveWithInfer):
>>> input_x = Tensor(np.arange(6).reshape((2, 3)), mindspore.float32)
>>> bias = Tensor(np.random.random(3).reshape((3,)), mindspore.float32)
>>> bias_add = P.BiasAdd()
>>> bias_add(input_x, bias)
>>> output = bias_add(input_x, bias)
>>> print(output)
[[0.4662124 1.2493685 2.3611782]
[3.4662123 4.2493687 5.3611784]]
"""
@prim_attr_register
@ -4805,7 +4833,7 @@ class ApplyPowerSign(PrimitiveWithInfer):
>>> self.beta = 0.9
>>> def construct(self, grad):
>>> out = self.apply_power_sign(self.var, self.m, self.lr, self.logbase,
self.sign_decay, self.beta, grad)
>>> self.sign_decay, self.beta, grad)
>>> return out
>>> net = Net()
>>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))
@ -5582,7 +5610,7 @@ class BasicLSTMCell(PrimitiveWithInfer):
LSTM layer except the last layer. Default 1.0. The range of dropout is [0.0, 1.0].
forget_bias (float): Add forget bias to forget gate biases in order to decrease former scale. Default: 1.0.
state_is_tuple (bool): If true, the state is a tuple of 2 tensors, containing h and c; If false, the state is
a tensor and it needs to be split first. Default: True.
a tensor and it needs to be split first. Default: True.
activation (str): Activation. Default: "tanh". Only "tanh" is currently supported.
Inputs:
@ -5614,12 +5642,20 @@ class BasicLSTMCell(PrimitiveWithInfer):
Examples:
>>> x = Tensor(np.random.rand(1, 32).astype(np.float16))
>>> h = Tensor(np.random.rand(1, 64).astype(np.float16))
>>> c = Tensor(np.random.rand(1, 64).astype(np.float16))
>>> w = Tensor(np.random.rand(96, 256).astype(np.float16))
>>> b = Tensor(np.random.rand(256, ).astype(np.float16))
>>> h = Tensor(np.random.rand(1, 2).astype(np.float16))
>>> c = Tensor(np.random.rand(1, 2).astype(np.float16))
>>> w = Tensor(np.random.rand(34, 8).astype(np.float16))
>>> b = Tensor(np.random.rand(8, ).astype(np.float16))
>>> lstm = P.BasicLSTMCell(keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh')
>>> lstm(x, h, c, w, b)
>>> output = lstm(x, h, c, w, b)
>>> print(output)
([[9.5459e-01, 9.2725e-01]],
[[1.0000e+00, 1.0000e+00]],
[[1.0000e+00, 1.0000e+00]],
[[1.0000e+00, 1.0000e+00]],
[[9.9951e-01, 1.0000e+00]],
[[9.5459e-01, 9.2773e-01]],
[[0.0000e+00, 0.0000e+00]])
"""
@prim_attr_register

@ -48,7 +48,9 @@ class Assign(PrimitiveWithCheck):
>>> return self.y
>>> x = Tensor([2.0], mindspore.float32)
>>> net = Net()
>>> net(x)
>>> output = net(x)
>>> print(output)
Parameter (name=y)
"""
__mindspore_signature__ = (
sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),

@ -134,11 +134,14 @@ class Gamma(PrimitiveWithInfer):
The dtype is float32.
Examples:
>>> shape = (4, 16)
>>> shape = (2, 2)
>>> alpha = Tensor(1.0, mstype.float32)
>>> beta = Tensor(1.0, mstype.float32)
>>> gamma = P.Gamma(seed=3)
>>> output = Gamma(shape, alpha, beta)
>>> output = gamma(shape, alpha, beta)
>>> print(output)
[[0.21962446 0.33740655]
[1.0859369 0.25875127]]
"""
@prim_attr_register

Loading…
Cancel
Save