update example of erfc, flatten operator and so on.

pull/8432/head
wangshuide2020 4 years ago
parent c5d9c78e46
commit 903c1750a4

@ -172,7 +172,7 @@ class LazyAdam(Optimizer):
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
If false, update the gradients without using NAG. Default: False.
weight_decay (float): Weight decay (L2 penalty). Default: 0.0.
loss_scale (float): A floating point value for the loss scale. Should be equal to or greater than 1. Default:
1.0.

@ -1772,7 +1772,7 @@ class Erfc(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
>>> erfc = P.Erfc()
>>> erfc(input_x)
[1.8427168, 0., 0.1572832, 0.00469124, 0.00002235]
[1.8427168, 1.0, 0.1572832, 0.00469124, 0.00002235]
"""
@prim_attr_register
@ -2895,6 +2895,8 @@ class FloatStatus(PrimitiveWithInfer):
>>> float_status = P.FloatStatus()
>>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
>>> result = float_status(input_x)
>>> print(result)
[1.]
"""
@prim_attr_register

@ -90,7 +90,8 @@ class Flatten(PrimitiveWithInfer):
>>> input_tensor = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
>>> flatten = P.Flatten()
>>> output = flatten(input_tensor)
>>> assert output.shape == (1, 24)
>>> print(output.shape)
(1, 24)
"""
@prim_attr_register
@ -700,7 +701,7 @@ class FusedBatchNormEx(PrimitiveWithInfer):
Outputs:
Tuple of 6 Tensors, the normalized input, the updated parameters and reserve.
- **output_x** (Tensor) - The input of FusedBatchNormEx, same type and shape as the `input_x`.
- **output_x** (Tensor) - The output of FusedBatchNormEx, same type and shape as the `input_x`.
- **updated_scale** (Tensor) - Updated parameter scale, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_bias** (Tensor) - Updated parameter bias, Tensor of shape :math:`(C,)`, data type: float32.
- **updated_moving_mean** (Tensor) - Updated mean value, Tensor of shape :math:`(C,)`, data type: float32.
@ -3206,7 +3207,7 @@ class Adam(PrimitiveWithInfer):
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
If false, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Tensor) - Weights to be updated.
@ -3306,7 +3307,7 @@ class FusedSparseAdam(PrimitiveWithInfer):
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
If false, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Parameter) - Parameters to be updated with float32 data type.
@ -3439,7 +3440,7 @@ class FusedSparseLazyAdam(PrimitiveWithInfer):
If false, the result is unpredictable. Default: False.
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
If true, update the gradients using NAG.
If true, update the gradients without using NAG. Default: False.
If false, update the gradients without using NAG. Default: False.
Inputs:
- **var** (Parameter) - Parameters to be updated with float32 data type.

Loading…
Cancel
Save