pull/10124/head
lihongkang 4 years ago
parent 4ce11a930b
commit c27d27c46e

@ -258,9 +258,8 @@ class Dense(Cell):
>>> input = Tensor(np.array([[180, 234, 154], [244, 48, 247]]), mindspore.float32)
>>> net = nn.Dense(3, 4)
>>> output = net(input)
>>> print(output)
[[ 1.1199665 1.6730378 -1.383349 -1.5148697 ]
[ 3.0728707 0.0124917 -1.4012015 0.04354739 ]]
>>> print(output.shape)
(2, 4)
"""
@cell_attr_register(attrs=['has_bias', 'activation', 'in_channels', 'out_channels'])

@ -154,7 +154,6 @@ class DenseBnAct(Cell):
bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
same as input. The values of str refer to the function `initializer`. Default: 'zeros'.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: True.
activation (Cell): The regularization function applied to the output of the layer, eg. 'ReLU'. Default: None.
has_bn (bool): Specifies to use batchnorm or not. Default: False.
momentum (float): Momentum for moving average for batchnorm, must be [0, 1]. Default:0.9
eps (float): Term added to the denominator to improve numerical stability for batchnorm, should be greater

@ -441,6 +441,7 @@ class GlobalBatchNorm(_BatchNorm):
eps (float): A value added to the denominator for numerical stability. Default: 1e-5.
momentum (float): A floating hyperparameter of the momentum for the
running_mean and running_var computation. Default: 0.9.
affine (bool): A bool value. When set to True, gamma and beta can be learned. Default: True.
gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the gamma weight.
The values of str refer to the function `initializer` including 'zeros', 'ones', 'xavier_uniform',
'he_uniform', etc. Default: 'ones'.

@ -559,6 +559,9 @@ class Conv2dBnFoldQuant(Cell):
padding (int): Implicit paddings on both sides of the input. Default: 0.
eps (float): Parameters for BatchNormal. Default: 1e-5.
momentum (float): Parameters for BatchNormal op. Default: 0.997.
dilation (int): Specifies the dilation rate to use for dilated convolution. Default: 1.
group (int): Splits filter into groups, `in_ channels` and `out_channels` must be
divisible by the number of groups. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
convolution kernel. Default: 'normal'.

@ -244,7 +244,8 @@ class Lamb(Optimizer):
>>> optim = nn.Lamb(params=net.trainable_params(), learning_rate=0.1)
>>>
>>> #2) Use parameter groups and set different values
>>> poly_decay_lr = learning_rate_schedule.PolynomialDecayLR()
>>> poly_decay_lr = learning_rate_schedule.PolynomialDecayLR(learning_rate=0.1, end_learning_rate=0.01,
... decay_steps=4, power = 0.5)
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01},

@ -3018,7 +3018,7 @@ class GetNext(PrimitiveWithInfer):
>>> relu = P.ReLU()
>>> result = relu(data).asnumpy()
>>> print(result.shape)
>>> (32, 1, 32, 32)
(32, 1, 32, 32)
"""
@prim_attr_register

Loading…
Cancel
Save