From 3c522351299f8de7a6bb37fab35c7b3782939310 Mon Sep 17 00:00:00 2001 From: Jiaqi Date: Fri, 6 Nov 2020 10:43:19 +0800 Subject: [PATCH] modify example --- mindspore/nn/optim/adam.py | 2 +- mindspore/nn/optim/ftrl.py | 2 +- mindspore/nn/optim/lazyadam.py | 2 +- mindspore/nn/optim/proximal_ada_grad.py | 2 +- mindspore/nn/wrap/loss_scale.py | 8 ++++---- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/mindspore/nn/optim/adam.py b/mindspore/nn/optim/adam.py index 017eaf1c4e..27ea0c56b2 100755 --- a/mindspore/nn/optim/adam.py +++ b/mindspore/nn/optim/adam.py @@ -337,7 +337,7 @@ class Adam(Optimizer): """If the input value is set to "CPU", the parameters will be updated on the host using the Fused optimizer operation.""" if not isinstance(value, str): - raise ValueError("The value must be str type, but got value type is {}".format(type(value))) + raise TypeError("The value must be str type, but got value type is {}".format(type(value))) if value not in ('CPU', 'Ascend'): raise ValueError("The value must be 'CPU' or 'Ascend', but got value {}".format(value)) diff --git a/mindspore/nn/optim/ftrl.py b/mindspore/nn/optim/ftrl.py index 0dd4228335..83c7af6004 100644 --- a/mindspore/nn/optim/ftrl.py +++ b/mindspore/nn/optim/ftrl.py @@ -190,7 +190,7 @@ class FTRL(Optimizer): """If the input value is set to "CPU", the parameters will be updated on the host using the Fused optimizer operation.""" if not isinstance(value, str): - raise ValueError("The value must be str type, but got value type is {}".format(type(value))) + raise TypeError("The value must be str type, but got value type is {}".format(type(value))) if value not in ('CPU', 'Ascend'): raise ValueError("The value must be 'CPU' or 'Ascend', but got value {}".format(value)) diff --git a/mindspore/nn/optim/lazyadam.py b/mindspore/nn/optim/lazyadam.py index 827843fede..9da6095974 100644 --- a/mindspore/nn/optim/lazyadam.py +++ b/mindspore/nn/optim/lazyadam.py @@ -255,7 +255,7 @@ class LazyAdam(Optimizer): """If the input value is set to "CPU", the parameters will be updated on the host using the Fused optimizer operation.""" if not isinstance(value, str): - raise ValueError("The value must be str type, but got value type is {}".format(type(value))) + raise TypeError("The value must be str type, but got value type is {}".format(type(value))) if value not in ('CPU', 'Ascend'): raise ValueError("The value must be 'CPU' or 'Ascend', but got value {}".format(value)) diff --git a/mindspore/nn/optim/proximal_ada_grad.py b/mindspore/nn/optim/proximal_ada_grad.py index d58f230270..479043c34e 100644 --- a/mindspore/nn/optim/proximal_ada_grad.py +++ b/mindspore/nn/optim/proximal_ada_grad.py @@ -159,7 +159,7 @@ class ProximalAdagrad(Optimizer): """If the input value is set to "CPU", the parameters will be updated on the host using the Fused optimizer operation.""" if not isinstance(value, str): - raise ValueError("The value must be str type, but got value type is {}".format(type(value))) + raise TypeError("The value must be str type, but got value type is {}".format(type(value))) if value not in ('CPU', 'Ascend'): raise ValueError("The value must be 'CPU' or 'Ascend', but got value {}".format(value)) diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index 490ccb3f90..5d9438534a 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -79,13 +79,13 @@ class DynamicLossScaleUpdateCell(Cell): >>> net_with_loss = Net() >>> optimizer = nn.Momentum(net_with_loss.trainable_params(), learning_rate=0.1, momentum=0.9) >>> manager = nn.DynamicLossScaleUpdateCell(loss_scale_value=2**12, scale_factor=2, scale_window=1000) - >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=manager) + >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=manager) >>> train_network.set_train() >>> >>> inputs = Tensor(np.ones([16, 16]).astype(np.float32)) >>> label = Tensor(np.zeros([16, 16]).astype(np.float32)) >>> scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mindspore.float32) - >>> output = train_network(inputs, label, scaling_sens) + >>> output = train_network(inputs, label, scale_sense=scaling_sens) """ def __init__(self, @@ -145,13 +145,13 @@ class FixedLossScaleUpdateCell(Cell): >>> net_with_loss = Net() >>> optimizer = nn.Momentum(net_with_loss.trainable_params(), learning_rate=0.1, momentum=0.9) >>> manager = nn.FixedLossScaleUpdateCell(loss_scale_value=2**12) - >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=manager) + >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=manager) >>> train_network.set_train() >>> >>> inputs = Tensor(np.ones([16, 16]).astype(np.float32)) >>> label = Tensor(np.zeros([16, 16]).astype(np.float32)) >>> scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mindspore.float32) - >>> output = train_network(inputs, label, scaling_sens) + >>> output = train_network(inputs, label, scale_sense=scaling_sens) """ def __init__(self, loss_scale_value):