diff --git a/mindspore/nn/layer/basic.py b/mindspore/nn/layer/basic.py index 1258667709..bf1b7e6ac7 100644 --- a/mindspore/nn/layer/basic.py +++ b/mindspore/nn/layer/basic.py @@ -258,9 +258,8 @@ class Dense(Cell): >>> input = Tensor(np.array([[180, 234, 154], [244, 48, 247]]), mindspore.float32) >>> net = nn.Dense(3, 4) >>> output = net(input) - >>> print(output) - [[ 1.1199665 1.6730378 -1.383349 -1.5148697 ] - [ 3.0728707 0.0124917 -1.4012015 0.04354739 ]] + >>> print(output.shape) + (2, 4) """ @cell_attr_register(attrs=['has_bias', 'activation', 'in_channels', 'out_channels']) diff --git a/mindspore/nn/layer/combined.py b/mindspore/nn/layer/combined.py index fa8591897f..a8cece00ba 100644 --- a/mindspore/nn/layer/combined.py +++ b/mindspore/nn/layer/combined.py @@ -154,7 +154,6 @@ class DenseBnAct(Cell): bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is same as input. The values of str refer to the function `initializer`. Default: 'zeros'. has_bias (bool): Specifies whether the layer uses a bias vector. Default: True. - activation (Cell): The regularization function applied to the output of the layer, eg. 'ReLU'. Default: None. has_bn (bool): Specifies to use batchnorm or not. Default: False. momentum (float): Momentum for moving average for batchnorm, must be [0, 1]. Default:0.9 eps (float): Term added to the denominator to improve numerical stability for batchnorm, should be greater diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index 4546ab336b..6cf489754a 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -441,6 +441,7 @@ class GlobalBatchNorm(_BatchNorm): eps (float): A value added to the denominator for numerical stability. Default: 1e-5. momentum (float): A floating hyperparameter of the momentum for the running_mean and running_var computation. Default: 0.9. + affine (bool): A bool value. When set to True, gamma and beta can be learned. Default: True. gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the gamma weight. The values of str refer to the function `initializer` including 'zeros', 'ones', 'xavier_uniform', 'he_uniform', etc. Default: 'ones'. diff --git a/mindspore/nn/layer/quant.py b/mindspore/nn/layer/quant.py index f16482712f..831b9c09c5 100644 --- a/mindspore/nn/layer/quant.py +++ b/mindspore/nn/layer/quant.py @@ -559,6 +559,9 @@ class Conv2dBnFoldQuant(Cell): padding (int): Implicit paddings on both sides of the input. Default: 0. eps (float): Parameters for BatchNormal. Default: 1e-5. momentum (float): Parameters for BatchNormal op. Default: 0.997. + dilation (int): Specifies the dilation rate to use for dilated convolution. Default: 1. + group (int): Splits filter into groups, `in_ channels` and `out_channels` must be + divisible by the number of groups. Default: 1. has_bias (bool): Specifies whether the layer uses a bias vector. Default: False. weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel. Default: 'normal'. diff --git a/mindspore/nn/optim/lamb.py b/mindspore/nn/optim/lamb.py index 2b6b5f9d5b..a0040128be 100755 --- a/mindspore/nn/optim/lamb.py +++ b/mindspore/nn/optim/lamb.py @@ -244,7 +244,8 @@ class Lamb(Optimizer): >>> optim = nn.Lamb(params=net.trainable_params(), learning_rate=0.1) >>> >>> #2) Use parameter groups and set different values - >>> poly_decay_lr = learning_rate_schedule.PolynomialDecayLR() + >>> poly_decay_lr = learning_rate_schedule.PolynomialDecayLR(learning_rate=0.1, end_learning_rate=0.01, + ... decay_steps=4, power = 0.5) >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 0ae818fdba..99c82f6311 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -3018,7 +3018,7 @@ class GetNext(PrimitiveWithInfer): >>> relu = P.ReLU() >>> result = relu(data).asnumpy() >>> print(result.shape) - >>> (32, 1, 32, 32) + (32, 1, 32, 32) """ @prim_attr_register