From eb4571a67f32db8329d7b5e8f52b7e1f30e42f15 Mon Sep 17 00:00:00 2001 From: jiangjinsheng Date: Thu, 4 Jun 2020 15:51:54 +0800 Subject: [PATCH] fixed LeakyReLU, Optimizer --- mindspore/nn/layer/activation.py | 2 +- mindspore/nn/layer/embedding.py | 4 ++-- mindspore/nn/optim/optimizer.py | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/mindspore/nn/layer/activation.py b/mindspore/nn/layer/activation.py index 3a754e4c03..fe98ca296a 100644 --- a/mindspore/nn/layer/activation.py +++ b/mindspore/nn/layer/activation.py @@ -250,7 +250,7 @@ class LeakyReLU(Cell): def construct(self, x): alpha = P.Cast()(F.scalar_to_array(self.alpha), P.DType()(x)) - if self.alpha <= 1: + if alpha <= 1: out = P.Maximum()(alpha * x, x) else: out = P.Minimum()(alpha * x, x) diff --git a/mindspore/nn/layer/embedding.py b/mindspore/nn/layer/embedding.py index e27cd765af..8a6a0bd33d 100755 --- a/mindspore/nn/layer/embedding.py +++ b/mindspore/nn/layer/embedding.py @@ -45,8 +45,8 @@ class Embedding(Cell): Inputs: - **input** (Tensor) - Tensor of shape :math:`(\text{batch_size}, \text{input_length})`. The element of - the Tensor should be integer and not larger than vocab_size. else the corresponding embedding vector is zero - if larger than vocab_size. + the Tensor should be integer and not larger than vocab_size. else the corresponding embedding vector is zero + if larger than vocab_size. Outputs: Tensor of shape :math:`(\text{batch_size}, \text{input_length}, \text{embedding_size})`. diff --git a/mindspore/nn/optim/optimizer.py b/mindspore/nn/optim/optimizer.py index 658ffb7b46..82a276404d 100755 --- a/mindspore/nn/optim/optimizer.py +++ b/mindspore/nn/optim/optimizer.py @@ -93,13 +93,13 @@ class Optimizer(Cell): if isinstance(loss_scale, int): loss_scale = float(loss_scale) - validator.check_value_type("loss_scale", loss_scale, [float], None) - validator.check_number_range("loss_scale", loss_scale, 0.0, float("inf"), Rel.INC_NEITHER, None) + validator.check_value_type("loss_scale", loss_scale, [float], self.cls_name) + validator.check_number_range("loss_scale", loss_scale, 0.0, float("inf"), Rel.INC_NEITHER, self.cls_name) if isinstance(weight_decay, int): weight_decay = float(weight_decay) - validator.check_value_type("weight_decay", weight_decay, [float], None) - validator.check_number_range("weight_decay", weight_decay, 0.0, float("inf"), Rel.INC_LEFT, None) + validator.check_value_type("weight_decay", weight_decay, [float], self.cls_name) + validator.check_number_range("weight_decay", weight_decay, 0.0, float("inf"), Rel.INC_LEFT, self.cls_name) self.is_group = False self.is_group_lr = False