From 1a54785fe28fc004307f9ab63e35a00549bea675 Mon Sep 17 00:00:00 2001 From: panyifeng Date: Tue, 25 Aug 2020 20:16:08 +0800 Subject: [PATCH] remove name arg from gradoperation --- mindspore/nn/wrap/cell_wrapper.py | 4 ++-- mindspore/nn/wrap/grad_reducer.py | 2 +- mindspore/nn/wrap/loss_scale.py | 2 +- mindspore/ops/composite/base.py | 7 +++---- mindspore/ops/operations/debug_ops.py | 4 ++-- .../cv/faster_rcnn/src/network_define.py | 3 +-- .../official/cv/maskrcnn/src/network_define.py | 3 +-- .../cv/resnet_thor/src/grad_reducer_thor.py | 2 +- model_zoo/official/cv/ssd/src/ssd.py | 2 +- .../cv/warpctc/src/warpctc_for_train.py | 2 +- .../official/cv/yolov3_darknet53/src/yolo.py | 2 +- .../cv/yolov3_darknet53_quant/src/yolo.py | 2 +- .../official/cv/yolov3_resnet18/src/yolov3.py | 2 +- model_zoo/official/gnn/gat/src/utils.py | 2 +- model_zoo/official/gnn/gcn/src/metrics.py | 2 +- .../official/nlp/bert/src/bert_for_finetune.py | 5 ++--- .../nlp/bert/src/bert_for_pre_training.py | 5 ++--- .../nlp/bert_thor/src/bert_for_pre_training.py | 5 ++--- .../nlp/bert_thor/src/grad_reducer_thor.py | 2 +- .../src/transformer/transformer_for_train.py | 2 +- .../nlp/tinybert/src/tinybert_for_gd_td.py | 12 ++++-------- .../transformer/src/transformer_for_train.py | 5 ++--- .../official/recommend/deepfm/src/deepfm.py | 2 +- .../wide_and_deep/src/wide_and_deep.py | 4 ++-- .../src/wide_and_deep.py | 6 ++---- .../function/compile_gradient_wrt_inputs.py | 2 +- .../function/compile_gradient_wrt_params.py | 2 +- ...ms_with_rand_and_run_gradient_wrt_inputs.py | 2 +- ...ms_with_rand_and_run_gradient_wrt_params.py | 2 +- .../function/run_gradient_wrt_inputs.py | 2 +- .../function/run_gradient_wrt_params.py | 2 +- .../utils/block_util.py | 2 +- .../utils/bprop_util.py | 2 +- .../utils/check_gradient.py | 6 +++--- tests/ops_common.py | 2 +- tests/perf_test/test_lenet.py | 2 +- tests/st/control/test_cont_grad.py | 4 ++-- tests/st/gnn/test_gnn_aggregator.py | 2 +- .../python_file_for_ci/wide_and_deep.py | 4 ++-- tests/st/model_zoo_tests/yolov3/src/yolov3.py | 2 +- .../models/bert/src/bert_for_pre_training.py | 5 ++--- tests/st/networks/models/bert/src/utils.py | 3 +-- .../resnet50/src_thor/grad_reducer_thor.py | 2 +- tests/st/networks/test_cell_bprop.py | 4 ++-- tests/st/ops/ascend/test_addn.py | 2 +- tests/st/ops/ascend/test_conv_grad.py | 2 +- tests/st/ops/ascend/test_dense_grad.py | 2 +- .../st/ops/ascend/test_fused_batchnorm_grad.py | 2 +- tests/st/ops/ascend/test_maxpool_grad.py | 2 +- .../ascend/test_maxpool_with_argmax_grad.py | 2 +- tests/st/ops/ascend/test_relu_grad.py | 2 +- tests/st/ops/ascend/test_simplemean_grad.py | 2 +- .../ascend/test_tbe_ops/test_batchnorm_grad.py | 2 +- .../ascend/test_tbe_ops/test_gelu_grad_sens.py | 4 ++-- .../ascend/test_tbe_ops/test_layernorm_grad.py | 2 +- .../ascend/test_tbe_ops/test_maximum_grad.py | 2 +- .../ascend/test_tbe_ops/test_maxpool_grad.py | 2 +- .../ascend/test_tbe_ops/test_minimum_grad.py | 2 +- .../ops/ascend/test_tbe_ops/test_relu_grad.py | 2 +- .../ascend/test_tbe_ops/test_relu_v2_grad.py | 2 +- .../test_resize_nearest_neighbor_grad.py | 2 +- ...t_sigmoid_cross_entropy_with_logits_grad.py | 2 +- .../ascend/test_tbe_ops/test_sigmoid_grad.py | 2 +- .../test_tbe_ops/test_smooth_l1_loss_grad.py | 2 +- .../test_tbe_ops/test_stridedslice_grad.py | 2 +- tests/st/ops/cpu/test_batchnorm_op.py | 2 +- tests/st/ops/cpu/test_lstm_op.py | 3 +-- tests/st/ops/custom_ops_tbe/test_square.py | 2 +- tests/st/ops/gpu/test_batchnorm_op.py | 2 +- .../st/ops/gpu/test_binary_cross_entropy_op.py | 2 +- tests/st/ops/gpu/test_ctcloss_op.py | 2 +- tests/st/ops/gpu/test_dense_op.py | 7 +++---- tests/st/ops/gpu/test_gelu_grad_op.py | 2 +- tests/st/ops/gpu/test_kl_div_op.py | 2 +- tests/st/ops/gpu/test_logsoftmax_op.py | 2 +- tests/st/ops/gpu/test_lstm_op.py | 3 +-- tests/st/ops/gpu/test_maximum_op.py | 2 +- tests/st/ops/gpu/test_minimum_op.py | 2 +- tests/st/ops/gpu/test_mirror_pad.py | 2 +- tests/st/ops/gpu/test_smoothl1loss_op.py | 2 +- tests/st/ops/gpu/test_softmax_op.py | 2 +- tests/st/ops/gpu/test_stridedslice_grad_op.py | 2 +- tests/st/ops/gpu/test_tanh_op.py | 2 +- tests/st/pynative/test_pynative_hook.py | 4 ++-- tests/st/pynative/test_pynative_lenet.py | 2 +- tests/st/pynative/test_pynative_mindarmour.py | 5 ++--- tests/st/pynative/test_pynative_resnet50.py | 2 +- tests/st/pynative/test_tensor_index.py | 2 +- tests/train_step_wrap.py | 6 +++--- tests/ut/python/dtype/test_list.py | 2 +- tests/ut/python/exec/test_train_with_lars.py | 3 +-- tests/ut/python/ir/test_row_tensor.py | 4 ++-- tests/ut/python/ir/test_sparse_tensor.py | 2 +- tests/ut/python/keep_order/test_keep_order.py | 2 +- tests/ut/python/model/test_mix_precision.py | 3 +-- tests/ut/python/nn/optim/test_lr_schedule.py | 2 +- tests/ut/python/nn/test_nn_pad.py | 2 +- tests/ut/python/ops/test_bprop_disorder.py | 2 +- tests/ut/python/ops/test_control_ops.py | 10 +++++----- tests/ut/python/ops/test_math_ops.py | 2 +- tests/ut/python/ops/test_momentum.py | 2 +- tests/ut/python/ops/test_nn_ops.py | 6 +++--- tests/ut/python/ops/test_ops.py | 2 +- .../parallel/test_add_relu_redistribution.py | 2 +- tests/ut/python/parallel/test_arithmetic.py | 2 +- tests/ut/python/parallel/test_attention.py | 2 +- .../parallel/test_auto_parallel_BN_PReLU.py | 2 +- .../parallel/test_auto_parallel_arithmetic.py | 2 +- ...st_auto_parallel_assign_sub_with_ref_key.py | 2 +- .../python/parallel/test_auto_parallel_cast.py | 2 +- .../test_auto_parallel_common_parameter.py | 2 +- .../test_auto_parallel_double_sources.py | 2 +- .../parallel/test_auto_parallel_double_star.py | 2 +- .../test_auto_parallel_double_subgraphs.py | 4 ++-- .../parallel/test_auto_parallel_fc_nobias.py | 2 +- .../parallel/test_auto_parallel_four_matmul.py | 2 +- .../parallel/test_auto_parallel_l2normalize.py | 2 +- .../parallel/test_auto_parallel_matmul_drop.py | 2 +- .../test_auto_parallel_matmul_prelu.py | 2 +- .../parallel/test_auto_parallel_onehot.py | 2 +- .../test_auto_parallel_partial_strategy.py | 2 +- .../test_auto_parallel_reduce_method.py | 2 +- .../parallel/test_auto_parallel_reshape.py | 2 +- .../parallel/test_auto_parallel_rhombus.py | 2 +- .../test_auto_parallel_softmax_loss.py | 2 +- .../parallel/test_auto_parallel_transformer.py | 2 +- .../parallel/test_auto_parallel_transpose.py | 2 +- .../test_auto_parallel_tuple_depend.py | 2 +- .../parallel/test_auto_parallel_two_matmul.py | 2 +- .../test_auto_parallel_two_partial_matmul.py | 2 +- .../parallel/test_auto_parallel_zig_zag.py | 2 +- .../parallel/test_auto_star_elimination.py | 2 +- .../ut/python/parallel/test_batch_parallel.py | 2 +- .../parallel/test_batch_parallel_dropout.py | 2 +- .../parallel/test_batch_parallel_tensoradd.py | 2 +- .../parallel/test_comparison_function_info.py | 2 +- .../python/parallel/test_dataset_interface.py | 2 +- .../parallel/test_different_type_for_div_op.py | 2 +- .../parallel/test_element_wise_function.py | 2 +- .../ut/python/parallel/test_embeddinglookup.py | 2 +- tests/ut/python/parallel/test_gather_v2.py | 2 +- .../parallel/test_gather_v2_primitive.py | 3 +-- tests/ut/python/parallel/test_get_next.py | 2 +- tests/ut/python/parallel/test_gpu_dropout.py | 2 +- .../test_hybird_parallel_activation.py | 2 +- tests/ut/python/parallel/test_l2normalize.py | 2 +- tests/ut/python/parallel/test_linear.py | 2 +- .../ut/python/parallel/test_loop_two_matmul.py | 2 +- tests/ut/python/parallel/test_loss_scale.py | 3 +-- .../ut/python/parallel/test_matmul_dropout.py | 2 +- tests/ut/python/parallel/test_matmul_tensor.py | 2 +- .../test_mix_precision_hybrid_parallel.py | 2 +- tests/ut/python/parallel/test_one_hot_net.py | 2 +- .../parallel/test_one_weight_parameter.py | 2 +- tests/ut/python/parallel/test_onehot.py | 2 +- tests/ut/python/parallel/test_prelu.py | 2 +- .../python/parallel/test_reduce_method_info.py | 2 +- tests/ut/python/parallel/test_reshape.py | 5 ++--- .../python/parallel/test_reshape_parameter.py | 2 +- tests/ut/python/parallel/test_scalar_loss.py | 2 +- .../parallel/test_semi_auto_two_subgraphs.py | 4 ++-- .../test_softmax_cross_entropy_loss.py | 2 +- .../parallel/test_sparse_feature_bprop.py | 2 +- .../python/parallel/test_sparse_gather_v2.py | 2 +- .../ut/python/parallel/test_split_grad_sens.py | 4 ++-- tests/ut/python/parallel/test_step_parallel.py | 2 +- .../parallel/test_strategy_checkpoint.py | 2 +- tests/ut/python/parallel/test_sum_as_loss.py | 2 +- tests/ut/python/parallel/test_two_matmul.py | 2 +- .../parallel/test_two_weights_parameter.py | 2 +- .../parallel/test_virtual_dataset_3_input.py | 2 +- .../python/parameter_feature/test_parameter.py | 4 ++-- .../python/parameter_feature/test_var_grad.py | 18 ++++++++---------- .../pipeline/infer/test_scalar_add_grad.py | 2 +- tests/ut/python/pipeline/parse/test_parse.py | 2 +- tests/ut/python/pynative_mode/ops/test_grad.py | 4 ++-- .../ut/python/pynative_mode/test_cont_cases.py | 4 ++-- .../ut/python/pynative_mode/test_framstruct.py | 8 ++++---- .../pynative_mode/test_high_order_grad.py | 6 +++--- tests/ut/python/pynative_mode/test_hook.py | 4 ++-- .../pynative_mode/test_implicit_conversion.py | 2 +- .../pynative_mode/test_insert_grad_of.py | 4 ++-- .../python/pynative_mode/test_kw_and_kwarg.py | 4 ++-- .../pynative_mode/test_pynative_model.py | 2 +- .../pynative_mode/test_sparse_pynative.py | 2 +- .../python/pynative_mode/test_stop_gradient.py | 4 ++-- .../test_user_define_bprop_check.py | 2 +- 187 files changed, 243 insertions(+), 269 deletions(-) diff --git a/mindspore/nn/wrap/cell_wrapper.py b/mindspore/nn/wrap/cell_wrapper.py index d0073a4929..c32b7eadd9 100644 --- a/mindspore/nn/wrap/cell_wrapper.py +++ b/mindspore/nn/wrap/cell_wrapper.py @@ -117,7 +117,7 @@ class WithGradCell(Cell): self.network = network self.loss_fn = loss_fn self.weights = ParameterTuple(network.trainable_params()) - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=(sens is not None)) + self.grad = C.GradOperation(get_by_list=True, sens_param=(sens is not None)) self.sens = sens if loss_fn is None: self.network_with_loss = network @@ -182,7 +182,7 @@ class TrainOneStepCell(Cell): self.network.add_flags(defer_inline=True) self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/mindspore/nn/wrap/grad_reducer.py b/mindspore/nn/wrap/grad_reducer.py index 68f676ec66..8e8a9ef756 100644 --- a/mindspore/nn/wrap/grad_reducer.py +++ b/mindspore/nn/wrap/grad_reducer.py @@ -269,7 +269,7 @@ class DistributedGradReducer(Cell): >>> self.network.add_flags(defer_inline=True) >>> self.weights = optimizer.parameters >>> self.optimizer = optimizer - >>> self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + >>> self.grad = C.GradOperation(get_by_list=True, sens_param=True) >>> self.sens = sens >>> self.reducer_flag = False >>> self.grad_reducer = None diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index 3bfc7170f1..88a0f26a34 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -210,7 +210,7 @@ class TrainOneStepWithLossScaleCell(Cell): self.network.add_flags(defer_inline=True) self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.hyper_map = C.HyperMap() if context.get_context("device_target") == "GPU": self.gpu_target = True diff --git a/mindspore/ops/composite/base.py b/mindspore/ops/composite/base.py index aa77f8005a..43d30128ba 100644 --- a/mindspore/ops/composite/base.py +++ b/mindspore/ops/composite/base.py @@ -106,12 +106,11 @@ class GradOperation(GradOperation_): a 'ones_like(outputs)' sensitivity will be attached automatically. Default: False. """ - def __init__(self, name, - get_all=False, get_by_list=False, sens_param=False): + def __init__(self, get_all=False, get_by_list=False, sens_param=False): self.get_all = get_all self.get_by_list = get_by_list self.sens_param = sens_param - GradOperation_.__init__(self, name, get_all, get_by_list, sens_param) + GradOperation_.__init__(self, 'grad', get_all, get_by_list, sens_param) self.grad_fn = None self.fn = None self.need_forward = False @@ -139,7 +138,7 @@ class GradOperation(GradOperation_): fn.already_run = False def __call__(self, fn, weights=None): - grad_ = GradOperation('grad', self.get_all, self.get_by_list, self.sens_param) + grad_ = GradOperation(self.get_all, self.get_by_list, self.sens_param) if self.grad_fn is None or self.fn != fn: if context.get_context("mode") == context.GRAPH_MODE: if self.get_by_list: diff --git a/mindspore/ops/operations/debug_ops.py b/mindspore/ops/operations/debug_ops.py index e5796ec360..4123122ea2 100644 --- a/mindspore/ops/operations/debug_ops.py +++ b/mindspore/ops/operations/debug_ops.py @@ -216,7 +216,7 @@ class InsertGradientOf(PrimitiveWithInfer): >>> return ret >>> >>> clip = P.InsertGradientOf(clip_gradient) - >>> grad_all = C.GradOperation('get_all', get_all=True) + >>> grad_all = C.GradOperation(get_all=True) >>> def InsertGradientOfClipDemo(): >>> def clip_test(x, y): >>> x = clip(x) @@ -268,7 +268,7 @@ class HookBackward(PrimitiveWithInfer): >>> def hook_fn(grad_out): >>> print(grad_out) >>> - >>> grad_all = GradOperation('get_all', get_all=True) + >>> grad_all = GradOperation(get_all=True) >>> hook = P.HookBackward(hook_fn) >>> >>> def hook_test(x, y): diff --git a/model_zoo/official/cv/faster_rcnn/src/network_define.py b/model_zoo/official/cv/faster_rcnn/src/network_define.py index 348c72cee5..ae71d46352 100644 --- a/model_zoo/official/cv/faster_rcnn/src/network_define.py +++ b/model_zoo/official/cv/faster_rcnn/src/network_define.py @@ -163,8 +163,7 @@ class TrainOneStepCell(nn.Cell): self.backbone = network_backbone self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = Tensor((np.ones((1,)) * sens).astype(np.float16)) self.reduce_flag = reduce_flag diff --git a/model_zoo/official/cv/maskrcnn/src/network_define.py b/model_zoo/official/cv/maskrcnn/src/network_define.py index 481632667b..b94262f45c 100644 --- a/model_zoo/official/cv/maskrcnn/src/network_define.py +++ b/model_zoo/official/cv/maskrcnn/src/network_define.py @@ -171,8 +171,7 @@ class TrainOneStepCell(nn.Cell): self.backbone = network_backbone self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = Tensor((np.ones((1,)) * sens).astype(np.float16)) self.reduce_flag = reduce_flag diff --git a/model_zoo/official/cv/resnet_thor/src/grad_reducer_thor.py b/model_zoo/official/cv/resnet_thor/src/grad_reducer_thor.py index b8bbbf29b7..86ee3fcc8f 100644 --- a/model_zoo/official/cv/resnet_thor/src/grad_reducer_thor.py +++ b/model_zoo/official/cv/resnet_thor/src/grad_reducer_thor.py @@ -119,7 +119,7 @@ class DistributedGradReducerThor(Cell): >>> self.network.add_flags(defer_inline=True) >>> self.weights = ParameterTuple(network.trainable_params()) >>> self.optimizer = optimizer - >>> self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + >>> self.grad = C.GradOperation(get_by_list=True, sens_param=True) >>> self.sens = sens >>> self.reducer_flag = False >>> self.grad_reducer = None diff --git a/model_zoo/official/cv/ssd/src/ssd.py b/model_zoo/official/cv/ssd/src/ssd.py index d2fb64531e..fca8a1948d 100644 --- a/model_zoo/official/cv/ssd/src/ssd.py +++ b/model_zoo/official/cv/ssd/src/ssd.py @@ -383,7 +383,7 @@ class TrainingWrapper(nn.Cell): self.network = network self.weights = ms.ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/model_zoo/official/cv/warpctc/src/warpctc_for_train.py b/model_zoo/official/cv/warpctc/src/warpctc_for_train.py index 5654f05e5d..8391ffe676 100755 --- a/model_zoo/official/cv/warpctc/src/warpctc_for_train.py +++ b/model_zoo/official/cv/warpctc/src/warpctc_for_train.py @@ -77,7 +77,7 @@ class TrainOneStepCellWithGradClip(Cell): self.network.add_flags(defer_inline=True) self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/model_zoo/official/cv/yolov3_darknet53/src/yolo.py b/model_zoo/official/cv/yolov3_darknet53/src/yolo.py index 09cce0c97f..eefa1e8bfa 100644 --- a/model_zoo/official/cv/yolov3_darknet53/src/yolo.py +++ b/model_zoo/official/cv/yolov3_darknet53/src/yolo.py @@ -412,7 +412,7 @@ class TrainingWrapper(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/model_zoo/official/cv/yolov3_darknet53_quant/src/yolo.py b/model_zoo/official/cv/yolov3_darknet53_quant/src/yolo.py index e010ddef2b..e794218636 100644 --- a/model_zoo/official/cv/yolov3_darknet53_quant/src/yolo.py +++ b/model_zoo/official/cv/yolov3_darknet53_quant/src/yolo.py @@ -412,7 +412,7 @@ class TrainingWrapper(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py b/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py index 0ac6b21070..c33ed1a0d3 100644 --- a/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py +++ b/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py @@ -647,7 +647,7 @@ class TrainingWrapper(nn.Cell): self.network = network self.weights = ms.ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/model_zoo/official/gnn/gat/src/utils.py b/model_zoo/official/gnn/gat/src/utils.py index 06d3252994..8b8a46c76b 100644 --- a/model_zoo/official/gnn/gat/src/utils.py +++ b/model_zoo/official/gnn/gat/src/utils.py @@ -141,7 +141,7 @@ class TrainOneStepCell(nn.Cell): self.network.add_flags(defer_inline=True) self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens def construct(self): diff --git a/model_zoo/official/gnn/gcn/src/metrics.py b/model_zoo/official/gnn/gcn/src/metrics.py index 0d47a9bc6c..8aa4c3da7e 100644 --- a/model_zoo/official/gnn/gcn/src/metrics.py +++ b/model_zoo/official/gnn/gcn/src/metrics.py @@ -150,7 +150,7 @@ class TrainOneStepCell(nn.Cell): self.network.add_flags(defer_inline=True) self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens def construct(self): diff --git a/model_zoo/official/nlp/bert/src/bert_for_finetune.py b/model_zoo/official/nlp/bert/src/bert_for_finetune.py index 97262b6fae..1b147982f0 100644 --- a/model_zoo/official/nlp/bert/src/bert_for_finetune.py +++ b/model_zoo/official/nlp/bert/src/bert_for_finetune.py @@ -57,8 +57,7 @@ class BertFinetuneCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() @@ -160,7 +159,7 @@ class BertSquadCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() self.parallel_mode = context.get_auto_parallel_context("parallel_mode") diff --git a/model_zoo/official/nlp/bert/src/bert_for_pre_training.py b/model_zoo/official/nlp/bert/src/bert_for_pre_training.py index 8607c3ba87..84f442c22c 100644 --- a/model_zoo/official/nlp/bert/src/bert_for_pre_training.py +++ b/model_zoo/official/nlp/bert/src/bert_for_pre_training.py @@ -274,7 +274,7 @@ class BertTrainOneStepCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.parallel_mode = context.get_auto_parallel_context("parallel_mode") @@ -353,8 +353,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() diff --git a/model_zoo/official/nlp/bert_thor/src/bert_for_pre_training.py b/model_zoo/official/nlp/bert_thor/src/bert_for_pre_training.py index 7ba00146db..807d5a5d31 100644 --- a/model_zoo/official/nlp/bert_thor/src/bert_for_pre_training.py +++ b/model_zoo/official/nlp/bert_thor/src/bert_for_pre_training.py @@ -293,7 +293,7 @@ class BertTrainOneStepCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.parallel_mode = context.get_auto_parallel_context("parallel_mode") @@ -373,8 +373,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() diff --git a/model_zoo/official/nlp/bert_thor/src/grad_reducer_thor.py b/model_zoo/official/nlp/bert_thor/src/grad_reducer_thor.py index d0316e99b2..47b86a4e65 100644 --- a/model_zoo/official/nlp/bert_thor/src/grad_reducer_thor.py +++ b/model_zoo/official/nlp/bert_thor/src/grad_reducer_thor.py @@ -119,7 +119,7 @@ class DistributedGradReducerThor(Cell): >>> self.network.add_flags(defer_inline=True) >>> self.weights = ParameterTuple(network.trainable_params()) >>> self.optimizer = optimizer - >>> self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + >>> self.grad = C.GradOperation(get_by_list=True, sens_param=True) >>> self.sens = sens >>> self.reducer_flag = False >>> self.grad_reducer = None diff --git a/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py b/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py index 7aa674d253..3cb1b3739a 100644 --- a/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py +++ b/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py @@ -239,7 +239,7 @@ class TransformerTrainOneStepWithLossScaleCell(nn.Cell): self.network.add_flags(defer_inline=True) self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.all_reduce = P.AllReduce() diff --git a/model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py b/model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py index f244c5591d..f003ec26e7 100644 --- a/model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py +++ b/model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py @@ -218,8 +218,7 @@ class BertTrainWithLossScaleCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() @@ -310,8 +309,7 @@ class BertTrainCell(nn.Cell): self.weights = optimizer.parameters self.optimizer = optimizer self.sens = sens - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.parallel_mode = context.get_auto_parallel_context("parallel_mode") @@ -474,8 +472,7 @@ class BertEvaluationWithLossScaleCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() @@ -562,8 +559,7 @@ class BertEvaluationCell(nn.Cell): self.weights = optimizer.parameters self.optimizer = optimizer self.sens = sens - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.parallel_mode = context.get_auto_parallel_context("parallel_mode") diff --git a/model_zoo/official/nlp/transformer/src/transformer_for_train.py b/model_zoo/official/nlp/transformer/src/transformer_for_train.py index 164c9391e9..32d5ad7e20 100644 --- a/model_zoo/official/nlp/transformer/src/transformer_for_train.py +++ b/model_zoo/official/nlp/transformer/src/transformer_for_train.py @@ -158,7 +158,7 @@ class TransformerTrainOneStepCell(nn.Cell): self.network = network self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.parallel_mode = context.get_auto_parallel_context("parallel_mode") @@ -244,8 +244,7 @@ class TransformerTrainOneStepWithLossScaleCell(nn.Cell): self.network.add_flags(defer_inline=True) self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() diff --git a/model_zoo/official/recommend/deepfm/src/deepfm.py b/model_zoo/official/recommend/deepfm/src/deepfm.py index 61dd3b5f85..9b82b15525 100644 --- a/model_zoo/official/recommend/deepfm/src/deepfm.py +++ b/model_zoo/official/recommend/deepfm/src/deepfm.py @@ -286,7 +286,7 @@ class TrainStepWrap(nn.Cell): self.weights = ParameterTuple(network.trainable_params()) self.optimizer = Adam(self.weights, learning_rate=lr, eps=eps, loss_scale=loss_scale) self.hyper_map = C.HyperMap() - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = loss_scale def construct(self, batch_ids, batch_wts, label): diff --git a/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py b/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py index 8bef1821b3..8b6566b32c 100644 --- a/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py +++ b/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py @@ -337,9 +337,9 @@ class TrainStepWrap(nn.Cell): self.optimizer_w = FTRL(learning_rate=5e-2, params=self.weights_w, l1=1e-8, l2=1e-8, initial_accum=1.0, loss_scale=sens) self.hyper_map = C.HyperMap() - self.grad_w = C.GradOperation('grad_w', get_by_list=True, + self.grad_w = C.GradOperation(get_by_list=True, sens_param=True) - self.grad_d = C.GradOperation('grad_d', get_by_list=True, + self.grad_d = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.loss_net_w = IthOutputCell(network, output_index=0) diff --git a/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py b/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py index ba358dd723..c36a7ea66f 100644 --- a/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py +++ b/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py @@ -537,11 +537,9 @@ class TrainStepWrap(nn.Cell): self.hyper_map = C.HyperMap() - self.grad_w = C.GradOperation('grad_w', - get_by_list=True, + self.grad_w = C.GradOperation(get_by_list=True, sens_param=True) - self.grad_d = C.GradOperation('grad_d', - get_by_list=True, + self.grad_d = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens diff --git a/tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py b/tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py index 83c76c4cdb..66cd84a784 100644 --- a/tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py +++ b/tests/mindspore_test_framework/components/function/compile_gradient_wrt_inputs.py @@ -46,5 +46,5 @@ class CompileBackwardBlockWrtInputsBC(IBuilderComponent): """ def __call__(self): - grad_op = GradOperation('grad', get_all=True, sens_param=True) + grad_op = GradOperation(get_all=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, compile_block, grad_op) diff --git a/tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py b/tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py index 15cc02b3f4..bb6a9c68df 100644 --- a/tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py +++ b/tests/mindspore_test_framework/components/function/compile_gradient_wrt_params.py @@ -46,5 +46,5 @@ class CompileBackwardBlockWrtParamsBC(IBuilderComponent): """ def __call__(self, verification_set): - grad_op = GradOperation('grad', get_by_list=True, sens_param=True) + grad_op = GradOperation(get_by_list=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, compile_block, grad_op) diff --git a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py index 2dcf807328..c7a0ca78c7 100644 --- a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py +++ b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_inputs.py @@ -22,5 +22,5 @@ from ...utils.block_util import run_block, gen_grad_net, create_funcs, get_unifo class RunBackwardBlockWrtInputsWithRandParamBC(IBuilderComponent): def __call__(self): - grad_op = GradOperation('grad', get_all=True, sens_param=True) + grad_op = GradOperation(get_all=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, run_block, grad_op, get_uniform_with_shape) diff --git a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py index 22f03194c4..4f046f60ad 100644 --- a/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py +++ b/tests/mindspore_test_framework/components/function/init_params_with_rand_and_run_gradient_wrt_params.py @@ -22,5 +22,5 @@ from ...utils.block_util import run_block, gen_grad_net, create_funcs, get_unifo class RunBackwardBlockWrtParamsWithRandParamBC(IBuilderComponent): def __call__(self): - grad_op = GradOperation('grad', get_by_list=True, sens_param=True) + grad_op = GradOperation(get_by_list=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, run_block, grad_op, get_uniform_with_shape) diff --git a/tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py b/tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py index aa7ffad79c..124fe70c9b 100644 --- a/tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py +++ b/tests/mindspore_test_framework/components/function/run_gradient_wrt_inputs.py @@ -22,5 +22,5 @@ from ...utils.block_util import run_block, gen_grad_net, create_funcs class RunBackwardBlockWrtInputsBC(IBuilderComponent): def __call__(self): - grad_op = GradOperation('grad', get_all=True, sens_param=True) + grad_op = GradOperation(get_all=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, run_block, grad_op) diff --git a/tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py b/tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py index d365a00230..ae2086b2ff 100644 --- a/tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py +++ b/tests/mindspore_test_framework/components/function/run_gradient_wrt_params.py @@ -22,5 +22,5 @@ from ...utils.block_util import run_block, gen_grad_net, create_funcs class RunBackwardBlockWrtParamsBC(IBuilderComponent): def __call__(self): - grad_op = GradOperation('grad', get_by_list=True, sens_param=True) + grad_op = GradOperation(get_by_list=True, sens_param=True) return create_funcs(self.verification_set, gen_grad_net, run_block, grad_op) diff --git a/tests/mindspore_test_framework/utils/block_util.py b/tests/mindspore_test_framework/utils/block_util.py index faf7143f4b..0ecfd066dc 100644 --- a/tests/mindspore_test_framework/utils/block_util.py +++ b/tests/mindspore_test_framework/utils/block_util.py @@ -331,7 +331,7 @@ def create_funcs(verification_set, block_generator, block_runner, grad_op=None, # gradient if grad_op: if num_outputs == 0: - grad_op_ = GradOperation('grad', get_all=grad_op.get_all, + grad_op_ = GradOperation(get_all=grad_op.get_all, get_by_list=grad_op.get_by_list, sens_param=False) b = block_generator(block, grad_op_, len(inputs), desc_const=desc_const, const_first=const_first, add_fake_input=add_fake_input) diff --git a/tests/mindspore_test_framework/utils/bprop_util.py b/tests/mindspore_test_framework/utils/bprop_util.py index 1990c1d0df..11e9c0f90f 100644 --- a/tests/mindspore_test_framework/utils/bprop_util.py +++ b/tests/mindspore_test_framework/utils/bprop_util.py @@ -85,7 +85,7 @@ def bprop(func, *inputs, grads_wrt_outputs=None, wrt: list = None, params: list if not params: params = func.trainable_params() - grad_op = GradOperation(name='grad', get_all=wrt_inputs, get_by_list=wrt_params, sens_param=with_sens_param) + grad_op = GradOperation(get_all=wrt_inputs, get_by_list=wrt_params, sens_param=with_sens_param) grad = Bprop(func, wrt_params, params, grad_op, grads_wrt_outputs) if context.get_context("mode") == context.PYNATIVE_MODE: diff --git a/tests/mindspore_test_framework/utils/check_gradient.py b/tests/mindspore_test_framework/utils/check_gradient.py index 81490e7ee1..c11a7db5de 100644 --- a/tests/mindspore_test_framework/utils/check_gradient.py +++ b/tests/mindspore_test_framework/utils/check_gradient.py @@ -315,7 +315,7 @@ class ScalarGradChecker(_GradChecker): output_selector=None, sampling_times=-1, reduce_output=False) -> None: - grad_op = GradOperation('grad', get_all=True, sens_param=True) + grad_op = GradOperation(get_all=True, sens_param=True) super(ScalarGradChecker, self).__init__(fn, grad_op, args, delta, max_error, input_selector, \ output_selector, sampling_times, reduce_output) @@ -358,7 +358,7 @@ class OperationGradChecker(_GradChecker): output_selector=None, sampling_times=-1, reduce_output=False) -> None: - grad_op = GradOperation('grad', get_all=True, sens_param=True) + grad_op = GradOperation(get_all=True, sens_param=True) super(OperationGradChecker, self).__init__(fn, grad_op, args, delta, max_error, input_selector, \ output_selector, sampling_times, reduce_output) @@ -390,7 +390,7 @@ class NNGradChecker(_GradChecker): output_selector=None, sampling_times=-1, reduce_output=False) -> None: - grad_op = GradOperation('grad', get_by_list=True, sens_param=True) + grad_op = GradOperation(get_by_list=True, sens_param=True) self.params = ParameterTuple(fn.trainable_params()) super(NNGradChecker, self).__init__(fn, grad_op, args, delta, max_error, input_selector, \ output_selector, sampling_times, reduce_output) diff --git a/tests/ops_common.py b/tests/ops_common.py index fc41ea575d..7e042f57d4 100644 --- a/tests/ops_common.py +++ b/tests/ops_common.py @@ -23,7 +23,7 @@ from mindspore import Tensor from mindspore.common.api import _executor -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) class InputBackward(nn.Cell): diff --git a/tests/perf_test/test_lenet.py b/tests/perf_test/test_lenet.py index 41ff41acf4..8b61e9be5e 100644 --- a/tests/perf_test/test_lenet.py +++ b/tests/perf_test/test_lenet.py @@ -27,7 +27,7 @@ from mindspore.common.api import _executor context.set_context(mode=context.GRAPH_MODE) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) batch_size = 1 channel = 1 diff --git a/tests/st/control/test_cont_grad.py b/tests/st/control/test_cont_grad.py index 4eb2257dcb..68f6b1f30d 100644 --- a/tests/st/control/test_cont_grad.py +++ b/tests/st/control/test_cont_grad.py @@ -28,8 +28,8 @@ from mindspore.ops import operations as P # context.set_context(save_graphs=True) -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all = C.GradOperation('get_all', get_all=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all = C.GradOperation(get_all=True) def test_while_forward(): diff --git a/tests/st/gnn/test_gnn_aggregator.py b/tests/st/gnn/test_gnn_aggregator.py index a17187a2d4..fc1f682a78 100644 --- a/tests/st/gnn/test_gnn_aggregator.py +++ b/tests/st/gnn/test_gnn_aggregator.py @@ -25,7 +25,7 @@ from mindspore.common.api import _executor context.set_context(mode=context.GRAPH_MODE) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) class MeanAggregatorGrad(nn.Cell): diff --git a/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py b/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py index e860e0afef..c31b3b5b0e 100644 --- a/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py +++ b/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py @@ -284,9 +284,9 @@ class TrainStepWrap(nn.Cell): self.optimizer_d = Adam( self.weights_d, learning_rate=3.5e-4, eps=1e-8, loss_scale=sens) self.hyper_map = C.HyperMap() - self.grad_w = C.GradOperation('grad_w', get_by_list=True, + self.grad_w = C.GradOperation(get_by_list=True, sens_param=True) - self.grad_d = C.GradOperation('grad_d', get_by_list=True, + self.grad_d = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.loss_net_w = IthOutputCell(network, output_index=0) diff --git a/tests/st/model_zoo_tests/yolov3/src/yolov3.py b/tests/st/model_zoo_tests/yolov3/src/yolov3.py index 0ac6b21070..c33ed1a0d3 100644 --- a/tests/st/model_zoo_tests/yolov3/src/yolov3.py +++ b/tests/st/model_zoo_tests/yolov3/src/yolov3.py @@ -647,7 +647,7 @@ class TrainingWrapper(nn.Cell): self.network = network self.weights = ms.ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.grad_reducer = None diff --git a/tests/st/networks/models/bert/src/bert_for_pre_training.py b/tests/st/networks/models/bert/src/bert_for_pre_training.py index 7c557a49c9..2577cf617a 100644 --- a/tests/st/networks/models/bert/src/bert_for_pre_training.py +++ b/tests/st/networks/models/bert/src/bert_for_pre_training.py @@ -271,7 +271,7 @@ class BertTrainOneStepCell(nn.Cell): self.network = network self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.reducer_flag = False self.parallel_mode = context.get_auto_parallel_context("parallel_mode") @@ -351,8 +351,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): self.network = network self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() diff --git a/tests/st/networks/models/bert/src/utils.py b/tests/st/networks/models/bert/src/utils.py index e4dd3e7b47..2b19d3d291 100644 --- a/tests/st/networks/models/bert/src/utils.py +++ b/tests/st/networks/models/bert/src/utils.py @@ -52,8 +52,7 @@ class BertFinetuneCell(nn.Cell): self.network = network self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.allreduce = P.AllReduce() diff --git a/tests/st/networks/models/resnet50/src_thor/grad_reducer_thor.py b/tests/st/networks/models/resnet50/src_thor/grad_reducer_thor.py index 0b160c02f2..e84c941249 100644 --- a/tests/st/networks/models/resnet50/src_thor/grad_reducer_thor.py +++ b/tests/st/networks/models/resnet50/src_thor/grad_reducer_thor.py @@ -120,7 +120,7 @@ class DistributedGradReducerThor(Cell): >>> self.network.add_flags(defer_inline=True) >>> self.weights = ParameterTuple(network.trainable_params()) >>> self.optimizer = optimizer - >>> self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + >>> self.grad = C.GradOperation(get_by_list=True, sens_param=True) >>> self.sens = sens >>> self.reducer_flag = False >>> self.grad_reducer = None diff --git a/tests/st/networks/test_cell_bprop.py b/tests/st/networks/test_cell_bprop.py index 9fd699682e..92cda581a0 100644 --- a/tests/st/networks/test_cell_bprop.py +++ b/tests/st/networks/test_cell_bprop.py @@ -29,7 +29,7 @@ from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class MulAdd(nn.Cell): @@ -351,7 +351,7 @@ class MulAddWithParam(nn.Cell): @pytest.mark.platform_x86_ascend_training @pytest.mark.env_onecard def test_refkey_bprop(): - grad_by_list = C.GradOperation('get_by_list', get_all=True, get_by_list=True) + grad_by_list = C.GradOperation(get_all=True, get_by_list=True) class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() diff --git a/tests/st/ops/ascend/test_addn.py b/tests/st/ops/ascend/test_addn.py index fa97fcc973..7644dea397 100644 --- a/tests/st/ops/ascend/test_addn.py +++ b/tests/st/ops/ascend/test_addn.py @@ -49,7 +49,7 @@ def test_net(): def test_grad_addn_with_list(): - grad_op = C.GradOperation('get_all', get_all=True) + grad_op = C.GradOperation(get_all=True) class AddN(nn.Cell): def __init__(self): super().__init__() diff --git a/tests/st/ops/ascend/test_conv_grad.py b/tests/st/ops/ascend/test_conv_grad.py index e24f218087..85aed21cbc 100644 --- a/tests/st/ops/ascend/test_conv_grad.py +++ b/tests/st/ops/ascend/test_conv_grad.py @@ -29,7 +29,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_dense_grad.py b/tests/st/ops/ascend/test_dense_grad.py index 7a529144ee..6cd6516da1 100644 --- a/tests/st/ops/ascend/test_dense_grad.py +++ b/tests/st/ops/ascend/test_dense_grad.py @@ -26,7 +26,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_fused_batchnorm_grad.py b/tests/st/ops/ascend/test_fused_batchnorm_grad.py index a8d4190e09..7210b1f3c1 100644 --- a/tests/st/ops/ascend/test_fused_batchnorm_grad.py +++ b/tests/st/ops/ascend/test_fused_batchnorm_grad.py @@ -30,7 +30,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_maxpool_grad.py b/tests/st/ops/ascend/test_maxpool_grad.py index 9af4511120..2fb8fb6028 100644 --- a/tests/st/ops/ascend/test_maxpool_grad.py +++ b/tests/st/ops/ascend/test_maxpool_grad.py @@ -27,7 +27,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py b/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py index 8d0d515580..a6c2933513 100644 --- a/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py +++ b/tests/st/ops/ascend/test_maxpool_with_argmax_grad.py @@ -27,7 +27,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_relu_grad.py b/tests/st/ops/ascend/test_relu_grad.py index 4ebc17d507..e969bf6b88 100644 --- a/tests/st/ops/ascend/test_relu_grad.py +++ b/tests/st/ops/ascend/test_relu_grad.py @@ -27,7 +27,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_simplemean_grad.py b/tests/st/ops/ascend/test_simplemean_grad.py index 2704c1434f..00605e1ef6 100644 --- a/tests/st/ops/ascend/test_simplemean_grad.py +++ b/tests/st/ops/ascend/test_simplemean_grad.py @@ -27,7 +27,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py index 0340f9e6be..7c9110ba74 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_batchnorm_grad.py @@ -30,7 +30,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py b/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py index 0f890ea998..70bce4794d 100755 --- a/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_gelu_grad_sens.py @@ -27,7 +27,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_, output_grad): @@ -71,7 +71,7 @@ class MEGeluLargeIn(Cell): class GradLargeIn(Cell): def __init__(self, network): super(GradLargeIn, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, x1, x2, output_grad): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py index c068cbfe8a..dfcae1b895 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_layernorm_grad.py @@ -27,7 +27,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_, output_grad,): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py index 529343812e..cd55676b0b 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_maximum_grad.py @@ -21,7 +21,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P context.set_context(device_target="Ascend") -grad = C.GradOperation('get_all', get_all=True, sens_param=True) +grad = C.GradOperation(get_all=True, sens_param=True) class MaxNetMe(Cell): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py index 7beb22f005..4d17b046ba 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_maxpool_grad.py @@ -27,7 +27,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py index 3a19aaa1d1..7559ebe957 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_minimum_grad.py @@ -21,7 +21,7 @@ from mindspore.ops import composite as C from mindspore.ops.operations import Minimum context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") -grad = C.GradOperation('get_all', get_all=True, sens_param=True) +grad = C.GradOperation(get_all=True, sens_param=True) class MinNetMe(Cell): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py index 40dc5ebada..705e8bac7f 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_relu_grad.py @@ -27,7 +27,7 @@ context.set_context(device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py index 645765792a..999a7af76e 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_relu_v2_grad.py @@ -27,7 +27,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True) + self.grad = GradOperation(get_all=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py index 4603fc59d8..8d53fac146 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_resize_nearest_neighbor_grad.py @@ -37,7 +37,7 @@ class Net(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py index f3eaef5b86..d9203f8892 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_cross_entropy_with_logits_grad.py @@ -37,7 +37,7 @@ class Net(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py index 8e68ac3235..e6bcb97e7e 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_sigmoid_grad.py @@ -37,7 +37,7 @@ class Net(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py index 7d30ae4bed..a69329a406 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_smooth_l1_loss_grad.py @@ -36,7 +36,7 @@ class Net(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, pred, gt, dout): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py b/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py index 1938aaeca3..d3a488a31d 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_stridedslice_grad.py @@ -26,7 +26,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_, output_grad): diff --git a/tests/st/ops/cpu/test_batchnorm_op.py b/tests/st/ops/cpu/test_batchnorm_op.py index 0dc090d63e..e020354f8b 100644 --- a/tests/st/ops/cpu/test_batchnorm_op.py +++ b/tests/st/ops/cpu/test_batchnorm_op.py @@ -37,7 +37,7 @@ class Batchnorm_Net(Cell): class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_data, sens): diff --git a/tests/st/ops/cpu/test_lstm_op.py b/tests/st/ops/cpu/test_lstm_op.py index c8174a5f90..3b159c83db 100644 --- a/tests/st/ops/cpu/test_lstm_op.py +++ b/tests/st/ops/cpu/test_lstm_op.py @@ -207,8 +207,7 @@ class Grad(nn.Cell): super(Grad, self).__init__() self.network = network self.weights = ParameterTuple(network.trainable_params()) - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) @ms_function diff --git a/tests/st/ops/custom_ops_tbe/test_square.py b/tests/st/ops/custom_ops_tbe/test_square.py index b8d847f4a7..5e9c4d8535 100644 --- a/tests/st/ops/custom_ops_tbe/test_square.py +++ b/tests/st/ops/custom_ops_tbe/test_square.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") -grad_with_sens = C.GradOperation('grad_with_sens', sens_param=True) +grad_with_sens = C.GradOperation(sens_param=True) class Net(nn.Cell): diff --git a/tests/st/ops/gpu/test_batchnorm_op.py b/tests/st/ops/gpu/test_batchnorm_op.py index 0aeac6dfcc..58aa841695 100644 --- a/tests/st/ops/gpu/test_batchnorm_op.py +++ b/tests/st/ops/gpu/test_batchnorm_op.py @@ -37,7 +37,7 @@ class Batchnorm_Net(Cell): class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_data, sens): diff --git a/tests/st/ops/gpu/test_binary_cross_entropy_op.py b/tests/st/ops/gpu/test_binary_cross_entropy_op.py index 724188314d..1b770b78d9 100644 --- a/tests/st/ops/gpu/test_binary_cross_entropy_op.py +++ b/tests/st/ops/gpu/test_binary_cross_entropy_op.py @@ -54,7 +54,7 @@ def test_binary_cross_entropy_loss(): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, x1, x2, sens, weight): diff --git a/tests/st/ops/gpu/test_ctcloss_op.py b/tests/st/ops/gpu/test_ctcloss_op.py index b9a88e7e70..964677740f 100644 --- a/tests/st/ops/gpu/test_ctcloss_op.py +++ b/tests/st/ops/gpu/test_ctcloss_op.py @@ -40,7 +40,7 @@ class Net(nn.Cell): class GradData(nn.Cell): def __init__(self, network): super(GradData, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=False) + self.grad = GradOperation(get_all=True, sens_param=False) self.network = network def construct(self, probs, labels, input_lengths, label_lengths): diff --git a/tests/st/ops/gpu/test_dense_op.py b/tests/st/ops/gpu/test_dense_op.py index e9c010ea77..b07baa658b 100644 --- a/tests/st/ops/gpu/test_dense_op.py +++ b/tests/st/ops/gpu/test_dense_op.py @@ -65,7 +65,7 @@ def test_biasadd(): class GradData(nn.Cell): def __init__(self, network): super(GradData, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, inputs, output_grad): @@ -77,8 +77,7 @@ class GradWeight(nn.Cell): super(GradWeight, self).__init__() self.network = network self.weights = ParameterTuple(network.trainable_params()) - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) def construct(self, x, output_grad): @@ -169,7 +168,7 @@ def test_dw(): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_, bias, dy): diff --git a/tests/st/ops/gpu/test_gelu_grad_op.py b/tests/st/ops/gpu/test_gelu_grad_op.py index 82145b9d3f..975355114e 100644 --- a/tests/st/ops/gpu/test_gelu_grad_op.py +++ b/tests/st/ops/gpu/test_gelu_grad_op.py @@ -37,7 +37,7 @@ class GeluNet(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_data, sens): diff --git a/tests/st/ops/gpu/test_kl_div_op.py b/tests/st/ops/gpu/test_kl_div_op.py index e5b8fcd079..64c9845f25 100644 --- a/tests/st/ops/gpu/test_kl_div_op.py +++ b/tests/st/ops/gpu/test_kl_div_op.py @@ -53,7 +53,7 @@ def test_binary_cross_entropy_loss(): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, x1, x2, sens): diff --git a/tests/st/ops/gpu/test_logsoftmax_op.py b/tests/st/ops/gpu/test_logsoftmax_op.py index 5834f90a10..271be6367b 100644 --- a/tests/st/ops/gpu/test_logsoftmax_op.py +++ b/tests/st/ops/gpu/test_logsoftmax_op.py @@ -52,7 +52,7 @@ class LogSoftmax(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_data, sens): diff --git a/tests/st/ops/gpu/test_lstm_op.py b/tests/st/ops/gpu/test_lstm_op.py index f0a58c2d36..de1197eeb2 100644 --- a/tests/st/ops/gpu/test_lstm_op.py +++ b/tests/st/ops/gpu/test_lstm_op.py @@ -581,8 +581,7 @@ class Grad(nn.Cell): super(Grad, self).__init__() self.network = network self.weights = ParameterTuple(network.trainable_params()) - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) @ms_function diff --git a/tests/st/ops/gpu/test_maximum_op.py b/tests/st/ops/gpu/test_maximum_op.py index 9566554231..4e009dae43 100644 --- a/tests/st/ops/gpu/test_maximum_op.py +++ b/tests/st/ops/gpu/test_maximum_op.py @@ -35,7 +35,7 @@ class Net(Cell): class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, x1, x2, sens): diff --git a/tests/st/ops/gpu/test_minimum_op.py b/tests/st/ops/gpu/test_minimum_op.py index 2a14a5bb04..78198db45a 100644 --- a/tests/st/ops/gpu/test_minimum_op.py +++ b/tests/st/ops/gpu/test_minimum_op.py @@ -36,7 +36,7 @@ class MinimumNet(Cell): class Grad(Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, x1, x2, sens): diff --git a/tests/st/ops/gpu/test_mirror_pad.py b/tests/st/ops/gpu/test_mirror_pad.py index 9e6613d744..d28eaeecc8 100644 --- a/tests/st/ops/gpu/test_mirror_pad.py +++ b/tests/st/ops/gpu/test_mirror_pad.py @@ -58,7 +58,7 @@ def test_mirror_pad(): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_, output_grad): return self.grad(self.network)(input_, output_grad) diff --git a/tests/st/ops/gpu/test_smoothl1loss_op.py b/tests/st/ops/gpu/test_smoothl1loss_op.py index 10d8411d20..4145f5e971 100644 --- a/tests/st/ops/gpu/test_smoothl1loss_op.py +++ b/tests/st/ops/gpu/test_smoothl1loss_op.py @@ -59,7 +59,7 @@ def test_smoothl1loss(): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, x1, x2, sens): diff --git a/tests/st/ops/gpu/test_softmax_op.py b/tests/st/ops/gpu/test_softmax_op.py index 73925d8c3b..0f654e10bd 100644 --- a/tests/st/ops/gpu/test_softmax_op.py +++ b/tests/st/ops/gpu/test_softmax_op.py @@ -79,7 +79,7 @@ class Net(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_data, sens): diff --git a/tests/st/ops/gpu/test_stridedslice_grad_op.py b/tests/st/ops/gpu/test_stridedslice_grad_op.py index 17ad80d00a..77cb7e6009 100644 --- a/tests/st/ops/gpu/test_stridedslice_grad_op.py +++ b/tests/st/ops/gpu/test_stridedslice_grad_op.py @@ -36,7 +36,7 @@ class StridedSliceNet(nn.Cell): class GradData(nn.Cell): def __init__(self, network): super(GradData, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=False) + self.grad = C.GradOperation(get_all=True, sens_param=False) self.network = network def construct(self, x): diff --git a/tests/st/ops/gpu/test_tanh_op.py b/tests/st/ops/gpu/test_tanh_op.py index 065bf50f08..b44c59570c 100644 --- a/tests/st/ops/gpu/test_tanh_op.py +++ b/tests/st/ops/gpu/test_tanh_op.py @@ -37,7 +37,7 @@ class TanhNet(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, input_data, sens): diff --git a/tests/st/pynative/test_pynative_hook.py b/tests/st/pynative/test_pynative_hook.py index b9431f9d34..99688697ae 100644 --- a/tests/st/pynative/test_pynative_hook.py +++ b/tests/st/pynative/test_pynative_hook.py @@ -30,7 +30,7 @@ from mindspore.common.initializer import TruncatedNormal context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) def weight_variable(): @@ -112,7 +112,7 @@ class GradWrap(nn.Cell): def construct(self, x, label): weights = self.weights - return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) + return C.GradOperation(get_by_list=True)(self.network, weights)(x, label) class test_custom_cell_base(): diff --git a/tests/st/pynative/test_pynative_lenet.py b/tests/st/pynative/test_pynative_lenet.py index eb669cbb76..75b5d0cfe5 100644 --- a/tests/st/pynative/test_pynative_lenet.py +++ b/tests/st/pynative/test_pynative_lenet.py @@ -29,7 +29,7 @@ from mindspore.ops import operations as P np.random.seed(1) -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) def weight_variable(): diff --git a/tests/st/pynative/test_pynative_mindarmour.py b/tests/st/pynative/test_pynative_mindarmour.py index 469964c871..23e7b2d042 100644 --- a/tests/st/pynative/test_pynative_mindarmour.py +++ b/tests/st/pynative/test_pynative_mindarmour.py @@ -87,7 +87,7 @@ class LeNet(nn.Cell): class GradWithSens(Cell): def __init__(self, network): super(GradWithSens, self).__init__() - self.grad = GradOperation(name="grad", get_all=False, + self.grad = GradOperation(get_all=False, sens_param=True) self.network = network @@ -99,8 +99,7 @@ class GradWithSens(Cell): class GradWrapWithLoss(Cell): def __init__(self, network): super(GradWrapWithLoss, self).__init__() - self._grad_all = GradOperation(name="get_all", - get_all=True, + self._grad_all = GradOperation(get_all=True, sens_param=False) self._network = network diff --git a/tests/st/pynative/test_pynative_resnet50.py b/tests/st/pynative/test_pynative_resnet50.py index 1a6df5db46..2f4c197239 100644 --- a/tests/st/pynative/test_pynative_resnet50.py +++ b/tests/st/pynative/test_pynative_resnet50.py @@ -40,7 +40,7 @@ np.random.seed(1) ds.config.set_seed(1) -grad_by_list = CP.GradOperation('get_by_list', get_by_list=True) +grad_by_list = CP.GradOperation(get_by_list=True) def weight_variable(shape): diff --git a/tests/st/pynative/test_tensor_index.py b/tests/st/pynative/test_tensor_index.py index 4f62204e7b..d1d496e034 100644 --- a/tests/st/pynative/test_tensor_index.py +++ b/tests/st/pynative/test_tensor_index.py @@ -24,7 +24,7 @@ from mindspore.common.parameter import ParameterTuple from mindspore.ops import composite as C -grad_by_list_with_sens = C.GradOperation('grad_by_list_with_sens', get_by_list=True, sens_param=True) +grad_by_list_with_sens = C.GradOperation(get_by_list=True, sens_param=True) def setup_module(): diff --git a/tests/train_step_wrap.py b/tests/train_step_wrap.py index 842b924198..0345167419 100644 --- a/tests/train_step_wrap.py +++ b/tests/train_step_wrap.py @@ -32,7 +32,7 @@ class TrainStepWrap(nn.Cell): self.weights = ParameterTuple(network.trainable_params()) self.optimizer = nn.Momentum(self.weights, 0.1, 0.9) self.hyper_map = C.HyperMap() - self.grad = C.GradOperation('grad', get_by_list=True) + self.grad = C.GradOperation(get_by_list=True) def construct(self, x, label): weights = self.weights @@ -71,7 +71,7 @@ class TrainStepWrap2(nn.Cell): self.weights = ParameterTuple(network.get_parameters()) self.optimizer = nn.Momentum(self.weights, 0.1, 0.9) self.hyper_map = C.HyperMap() - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens def construct(self, x): @@ -93,7 +93,7 @@ class TrainStepWrapWithoutOpt(nn.Cell): super(TrainStepWrapWithoutOpt, self).__init__() self.network = network self.weights = ParameterTuple(network.trainable_params()) - self.grad = C.GradOperation('grad', get_by_list=True) + self.grad = C.GradOperation(get_by_list=True) def construct(self, x, label): grads = self.grad(self.network, self.weights)(x, label) diff --git a/tests/ut/python/dtype/test_list.py b/tests/ut/python/dtype/test_list.py index e8b651c55b..13460e03ba 100644 --- a/tests/ut/python/dtype/test_list.py +++ b/tests/ut/python/dtype/test_list.py @@ -31,7 +31,7 @@ from tests.mindspore_test_framework.pipeline.forward.compile_forward \ context.set_context(mode=context.GRAPH_MODE) -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) def test_list_equal(): diff --git a/tests/ut/python/exec/test_train_with_lars.py b/tests/ut/python/exec/test_train_with_lars.py index 4d3621b3b2..b09584f298 100644 --- a/tests/ut/python/exec/test_train_with_lars.py +++ b/tests/ut/python/exec/test_train_with_lars.py @@ -52,8 +52,7 @@ class TrainOneStepWithLarsCell(nn.Cell): self.slice_index, self.params_len, weights = get_net_trainable_reordered_params(self.network) self.weights = ParameterTuple(weights) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = Parameter(Tensor([sens], mstype.float32), name='sens', requires_grad=False) self.weight_decay = 1.0 diff --git a/tests/ut/python/ir/test_row_tensor.py b/tests/ut/python/ir/test_row_tensor.py index cdfcf55bdc..62d7d761a1 100644 --- a/tests/ut/python/ir/test_row_tensor.py +++ b/tests/ut/python/ir/test_row_tensor.py @@ -248,7 +248,7 @@ def test_row_tensor_attr(): def test_row_tensor_sparse_gatherv2_grad_all(): - grad_all = C.GradOperation('get_all', get_all=True) + grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() @@ -269,7 +269,7 @@ def test_row_tensor_sparse_gatherv2_grad_all(): def test_row_tensor_sparse_gatherv2_grad_with_pram(): - grad_by_list = C.GradOperation('get_by_list', get_by_list=True) + grad_by_list = C.GradOperation(get_by_list=True) class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() diff --git a/tests/ut/python/ir/test_sparse_tensor.py b/tests/ut/python/ir/test_sparse_tensor.py index 76f53f2e13..184bc26d93 100644 --- a/tests/ut/python/ir/test_sparse_tensor.py +++ b/tests/ut/python/ir/test_sparse_tensor.py @@ -28,7 +28,7 @@ from mindspore import Tensor, SparseTensor, context context.set_context(mode=context.GRAPH_MODE, enable_sparse=True) -grad_op = C.GradOperation('get_all', get_all=True) +grad_op = C.GradOperation(get_all=True) class MakeSparseTensor(nn.Cell): def __init__(self, dense_shape): diff --git a/tests/ut/python/keep_order/test_keep_order.py b/tests/ut/python/keep_order/test_keep_order.py index fa0df6dd5d..0113a36278 100644 --- a/tests/ut/python/keep_order/test_keep_order.py +++ b/tests/ut/python/keep_order/test_keep_order.py @@ -50,7 +50,7 @@ class Func(nn.Cell): return out -grad_s = C.GradOperation('grad_with_sens', get_all=True, sens_param=True) +grad_s = C.GradOperation(get_all=True, sens_param=True) class Net(nn.Cell): diff --git a/tests/ut/python/model/test_mix_precision.py b/tests/ut/python/model/test_mix_precision.py index 89a71bd37c..4570bd243a 100644 --- a/tests/ut/python/model/test_mix_precision.py +++ b/tests/ut/python/model/test_mix_precision.py @@ -166,8 +166,7 @@ class GetParamGrad(nn.Cell): super(GetParamGrad, self).__init__(auto_prefix=False) self.network = network self.weights = ParameterTuple(network.trainable_params()) - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) def construct(self, data, sens): diff --git a/tests/ut/python/nn/optim/test_lr_schedule.py b/tests/ut/python/nn/optim/test_lr_schedule.py index 69fa8a356f..d3c9ab2aab 100644 --- a/tests/ut/python/nn/optim/test_lr_schedule.py +++ b/tests/ut/python/nn/optim/test_lr_schedule.py @@ -22,7 +22,7 @@ from mindspore.ops.operations import BiasAdd, MatMul import mindspore.ops.composite as C -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) class Net(Cell): diff --git a/tests/ut/python/nn/test_nn_pad.py b/tests/ut/python/nn/test_nn_pad.py index 5e0f7108d6..3ea28aefc0 100644 --- a/tests/ut/python/nn/test_nn_pad.py +++ b/tests/ut/python/nn/test_nn_pad.py @@ -34,7 +34,7 @@ class Net(nn.Cell): class Grad(nn.Cell): def __init__(self, network): super(Grad, self).__init__() - self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = GradOperation(get_all=True, sens_param=True) self.network = network @ms_function diff --git a/tests/ut/python/ops/test_bprop_disorder.py b/tests/ut/python/ops/test_bprop_disorder.py index c228c768d7..7f1829d5e7 100644 --- a/tests/ut/python/ops/test_bprop_disorder.py +++ b/tests/ut/python/ops/test_bprop_disorder.py @@ -28,7 +28,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ import pipeline_for_compile_forward_ge_graph_for_case_by_case_config -grad_by_list_with_sens = C.GradOperation('grad_by_list_with_sens', get_by_list=True, sens_param=True) +grad_by_list_with_sens = C.GradOperation(get_by_list=True, sens_param=True) class DisOrderTest1(nn.Cell): diff --git a/tests/ut/python/ops/test_control_ops.py b/tests/ut/python/ops/test_control_ops.py index 7784ab1e0d..00c653ce88 100644 --- a/tests/ut/python/ops/test_control_ops.py +++ b/tests/ut/python/ops/test_control_ops.py @@ -30,9 +30,9 @@ from mindspore.common import ms_function context.set_context(mode=context.GRAPH_MODE) -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all = C.GradOperation('get_all', get_all=True) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all = C.GradOperation(get_all=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def cond_data_test(x_init, y_init): @@ -564,7 +564,7 @@ def test_switch_layer_env_eliminate(): class NetGrad(nn.Cell): def __init__(self, net): super(NetGrad, self).__init__() - self.grad_op = C.GradOperation('grad', get_by_list=True, sens_param=False) + self.grad_op = C.GradOperation(get_by_list=True, sens_param=False) self.net = net self.weights = ParameterTuple(self.net.trainable_params()) @@ -593,7 +593,7 @@ def test_switch_layer_single_layer(): class NetGrad(nn.Cell): def __init__(self, net): super(NetGrad, self).__init__() - self.grad_op = C.GradOperation('grad', get_by_list=True, sens_param=False) + self.grad_op = C.GradOperation(get_by_list=True, sens_param=False) self.net = net self.weights = ParameterTuple(self.net.trainable_params()) diff --git a/tests/ut/python/ops/test_math_ops.py b/tests/ut/python/ops/test_math_ops.py index 2eeed81eaf..1113d9eeb6 100755 --- a/tests/ut/python/ops/test_math_ops.py +++ b/tests/ut/python/ops/test_math_ops.py @@ -38,7 +38,7 @@ context.set_context(mode=context.GRAPH_MODE) # W0613: unused-argument # W0231: super-init-not-called -grad = C.GradOperation('grad') +grad = C.GradOperation() def test_multiply(): """ test_multiply """ diff --git a/tests/ut/python/ops/test_momentum.py b/tests/ut/python/ops/test_momentum.py index ddc22b65d0..983ea9fe7d 100644 --- a/tests/ut/python/ops/test_momentum.py +++ b/tests/ut/python/ops/test_momentum.py @@ -34,7 +34,7 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ run_opt = C.MultitypeFuncGraph("run_opt") -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) @run_opt.register("Function", "Tensor", "Tensor", "Tensor", diff --git a/tests/ut/python/ops/test_nn_ops.py b/tests/ut/python/ops/test_nn_ops.py index a6541466c1..abf0b034d3 100644 --- a/tests/ut/python/ops/test_nn_ops.py +++ b/tests/ut/python/ops/test_nn_ops.py @@ -45,8 +45,8 @@ def conv1x1(in_channels, out_channels, stride=1, padding=0): kernel_size=1, stride=stride, padding=padding) -grad = C.GradOperation('grad') -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad = C.GradOperation() +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) class ResidualBlock(nn.Cell): @@ -230,7 +230,7 @@ class FusedBatchNormGrad(nn.Cell): def __init__(self, network): super(FusedBatchNormGrad, self).__init__() - self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) + self.grad = C.GradOperation(get_all=True, sens_param=True) self.network = network def construct(self, inp, output_grad): diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 17af407b1f..dbb23db167 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -36,7 +36,7 @@ from ....mindspore_test_framework.pipeline.gradient.compile_gradient \ from ....ops_common import convert -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) class InputBackward(nn.Cell): diff --git a/tests/ut/python/parallel/test_add_relu_redistribution.py b/tests/ut/python/parallel/test_add_relu_redistribution.py index f8e211ae1a..894c29a340 100644 --- a/tests/ut/python/parallel/test_add_relu_redistribution.py +++ b/tests/ut/python/parallel/test_add_relu_redistribution.py @@ -23,7 +23,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class AddRelu(nn.Cell): diff --git a/tests/ut/python/parallel/test_arithmetic.py b/tests/ut/python/parallel/test_arithmetic.py index 1b307d5733..4d2b623dd5 100644 --- a/tests/ut/python/parallel/test_arithmetic.py +++ b/tests/ut/python/parallel/test_arithmetic.py @@ -23,7 +23,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_attention.py b/tests/ut/python/parallel/test_attention.py index 25c8be5e9c..7af99af2bd 100644 --- a/tests/ut/python/parallel/test_attention.py +++ b/tests/ut/python/parallel/test_attention.py @@ -27,7 +27,7 @@ from mindspore.common.parameter import Parameter from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py b/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py index 32f6420061..4f9cd92c3c 100644 --- a/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py +++ b/tests/ut/python/parallel/test_auto_parallel_BN_PReLU.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_arithmetic.py b/tests/ut/python/parallel/test_auto_parallel_arithmetic.py index 0a8afcb6fb..2d25f18081 100644 --- a/tests/ut/python/parallel/test_auto_parallel_arithmetic.py +++ b/tests/ut/python/parallel/test_auto_parallel_arithmetic.py @@ -27,7 +27,7 @@ from tests.ut.python.ops.test_math_ops import VirtualLoss context.set_context(mode=context.GRAPH_MODE) -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py b/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py index 4a73ce6d7c..3c3cd40abb 100644 --- a/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py +++ b/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py @@ -25,7 +25,7 @@ from mindspore.parallel._utils import _reset_op_id as reset_op_id from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_cast.py b/tests/ut/python/parallel/test_auto_parallel_cast.py index 14ec846c9d..0e498878eb 100644 --- a/tests/ut/python/parallel/test_auto_parallel_cast.py +++ b/tests/ut/python/parallel/test_auto_parallel_cast.py @@ -26,7 +26,7 @@ from mindspore.parallel._utils import _reset_op_id as reset_op_id from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_common_parameter.py b/tests/ut/python/parallel/test_auto_parallel_common_parameter.py index c330d6259e..9ab8b27406 100644 --- a/tests/ut/python/parallel/test_auto_parallel_common_parameter.py +++ b/tests/ut/python/parallel/test_auto_parallel_common_parameter.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_double_sources.py b/tests/ut/python/parallel/test_auto_parallel_double_sources.py index 2f89606974..6ad7858505 100644 --- a/tests/ut/python/parallel/test_auto_parallel_double_sources.py +++ b/tests/ut/python/parallel/test_auto_parallel_double_sources.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_double_star.py b/tests/ut/python/parallel/test_auto_parallel_double_star.py index 08b13a6bbe..5a43159993 100644 --- a/tests/ut/python/parallel/test_auto_parallel_double_star.py +++ b/tests/ut/python/parallel/test_auto_parallel_double_star.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py b/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py index eb9c397abc..70443858aa 100644 --- a/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py +++ b/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py @@ -85,8 +85,8 @@ class TrainStepWarp(nn.Cell): self.optimizer_d = Adam(self.weights_d, learning_rate=3.5e-4, eps=1e-8, loss_scale=sens) self.hyper_map = C.HyperMap() - self.grad_w = C.GradOperation('grad_w', get_by_list=True, sens_param=True) - self.grad_d = C.GradOperation('grad_d', get_by_list=True, sens_param=True) + self.grad_w = C.GradOperation(get_by_list=True, sens_param=True) + self.grad_d = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.loss_net_w = IthOutputCell(network, output_index=0) self.loss_net_d = IthOutputCell(network, output_index=1) diff --git a/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py b/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py index c633933013..05e57801c0 100644 --- a/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py +++ b/tests/ut/python/parallel/test_auto_parallel_fc_nobias.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_four_matmul.py b/tests/ut/python/parallel/test_auto_parallel_four_matmul.py index 2eb8243a02..c005fcffde 100644 --- a/tests/ut/python/parallel/test_auto_parallel_four_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_four_matmul.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_l2normalize.py b/tests/ut/python/parallel/test_auto_parallel_l2normalize.py index 1601a99c36..1a1c1502f3 100644 --- a/tests/ut/python/parallel/test_auto_parallel_l2normalize.py +++ b/tests/ut/python/parallel/test_auto_parallel_l2normalize.py @@ -25,7 +25,7 @@ from mindspore.parallel._utils import _reset_op_id as reset_op_id from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_matmul_drop.py b/tests/ut/python/parallel/test_auto_parallel_matmul_drop.py index d8a89be72f..738614ab5e 100644 --- a/tests/ut/python/parallel/test_auto_parallel_matmul_drop.py +++ b/tests/ut/python/parallel/test_auto_parallel_matmul_drop.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py b/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py index 9d7635dd02..bc086c5907 100644 --- a/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py +++ b/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py @@ -26,7 +26,7 @@ from mindspore.parallel._utils import _reset_op_id as reset_op_id from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_onehot.py b/tests/ut/python/parallel/test_auto_parallel_onehot.py index f36eb5e109..302de23a50 100644 --- a/tests/ut/python/parallel/test_auto_parallel_onehot.py +++ b/tests/ut/python/parallel/test_auto_parallel_onehot.py @@ -30,7 +30,7 @@ from tests.ut.python.ops.test_math_ops import VirtualLoss context.set_context(mode=context.GRAPH_MODE) -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class Dataset(MindData): diff --git a/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py b/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py index 0c08d6a482..4aa2fe6b8d 100644 --- a/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py +++ b/tests/ut/python/parallel/test_auto_parallel_partial_strategy.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_reduce_method.py b/tests/ut/python/parallel/test_auto_parallel_reduce_method.py index 337eeff49b..415ddf94d0 100644 --- a/tests/ut/python/parallel/test_auto_parallel_reduce_method.py +++ b/tests/ut/python/parallel/test_auto_parallel_reduce_method.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_reshape.py b/tests/ut/python/parallel/test_auto_parallel_reshape.py index 2d9ef96b42..2f4c4efb6e 100644 --- a/tests/ut/python/parallel/test_auto_parallel_reshape.py +++ b/tests/ut/python/parallel/test_auto_parallel_reshape.py @@ -25,7 +25,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_rhombus.py b/tests/ut/python/parallel/test_auto_parallel_rhombus.py index e83eebf648..fb7b6caf6e 100644 --- a/tests/ut/python/parallel/test_auto_parallel_rhombus.py +++ b/tests/ut/python/parallel/test_auto_parallel_rhombus.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py b/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py index cf20ca0ef5..448e322c2a 100644 --- a/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py +++ b/tests/ut/python/parallel/test_auto_parallel_softmax_loss.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_transformer.py b/tests/ut/python/parallel/test_auto_parallel_transformer.py index c3ea02eaa9..4a3d8daa44 100644 --- a/tests/ut/python/parallel/test_auto_parallel_transformer.py +++ b/tests/ut/python/parallel/test_auto_parallel_transformer.py @@ -23,7 +23,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_transpose.py b/tests/ut/python/parallel/test_auto_parallel_transpose.py index 246ab2d588..b542004ea7 100644 --- a/tests/ut/python/parallel/test_auto_parallel_transpose.py +++ b/tests/ut/python/parallel/test_auto_parallel_transpose.py @@ -25,7 +25,7 @@ from mindspore.parallel._utils import _reset_op_id as reset_op_id from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py b/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py index ba1ffd6cd8..8ed66b958e 100644 --- a/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py +++ b/tests/ut/python/parallel/test_auto_parallel_tuple_depend.py @@ -25,7 +25,7 @@ from mindspore.ops.operations.comm_ops import _VirtualDataset from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py index 2a4d4f40f6..f4863cce1c 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py @@ -27,7 +27,7 @@ from mindspore.parallel._utils import _reset_op_id as reset_op_id from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py b/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py index d5f6086c80..a05730aa5e 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_partial_matmul.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_parallel_zig_zag.py b/tests/ut/python/parallel/test_auto_parallel_zig_zag.py index 77c85cb271..14affccf50 100644 --- a/tests/ut/python/parallel/test_auto_parallel_zig_zag.py +++ b/tests/ut/python/parallel/test_auto_parallel_zig_zag.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_auto_star_elimination.py b/tests/ut/python/parallel/test_auto_star_elimination.py index 8ab81ccc2c..7b1945304e 100644 --- a/tests/ut/python/parallel/test_auto_star_elimination.py +++ b/tests/ut/python/parallel/test_auto_star_elimination.py @@ -26,7 +26,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_batch_parallel.py b/tests/ut/python/parallel/test_batch_parallel.py index bd823bd416..db0c93dbf9 100644 --- a/tests/ut/python/parallel/test_batch_parallel.py +++ b/tests/ut/python/parallel/test_batch_parallel.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_batch_parallel_dropout.py b/tests/ut/python/parallel/test_batch_parallel_dropout.py index ce76194460..ba9c1a6933 100644 --- a/tests/ut/python/parallel/test_batch_parallel_dropout.py +++ b/tests/ut/python/parallel/test_batch_parallel_dropout.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_batch_parallel_tensoradd.py b/tests/ut/python/parallel/test_batch_parallel_tensoradd.py index 54cf437e44..a81079e8ea 100644 --- a/tests/ut/python/parallel/test_batch_parallel_tensoradd.py +++ b/tests/ut/python/parallel/test_batch_parallel_tensoradd.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_comparison_function_info.py b/tests/ut/python/parallel/test_comparison_function_info.py index 56bc8888bb..b56a08ec51 100644 --- a/tests/ut/python/parallel/test_comparison_function_info.py +++ b/tests/ut/python/parallel/test_comparison_function_info.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_dataset_interface.py b/tests/ut/python/parallel/test_dataset_interface.py index 46114cc0bd..0e70b2513c 100644 --- a/tests/ut/python/parallel/test_dataset_interface.py +++ b/tests/ut/python/parallel/test_dataset_interface.py @@ -107,7 +107,7 @@ class TrainOneStepCell(nn.Cell): self.network.add_flags(defer_inline=True) self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) def construct(self, data, sens): weights = self.weights diff --git a/tests/ut/python/parallel/test_different_type_for_div_op.py b/tests/ut/python/parallel/test_different_type_for_div_op.py index 1dcdcac860..92480d06b0 100644 --- a/tests/ut/python/parallel/test_different_type_for_div_op.py +++ b/tests/ut/python/parallel/test_different_type_for_div_op.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): diff --git a/tests/ut/python/parallel/test_element_wise_function.py b/tests/ut/python/parallel/test_element_wise_function.py index 450914eb69..668618fcab 100644 --- a/tests/ut/python/parallel/test_element_wise_function.py +++ b/tests/ut/python/parallel/test_element_wise_function.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_embeddinglookup.py b/tests/ut/python/parallel/test_embeddinglookup.py index f131a85457..576a6b3bc9 100644 --- a/tests/ut/python/parallel/test_embeddinglookup.py +++ b/tests/ut/python/parallel/test_embeddinglookup.py @@ -23,7 +23,7 @@ from mindspore import Tensor, context from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): diff --git a/tests/ut/python/parallel/test_gather_v2.py b/tests/ut/python/parallel/test_gather_v2.py index e914af3102..2d657a6101 100644 --- a/tests/ut/python/parallel/test_gather_v2.py +++ b/tests/ut/python/parallel/test_gather_v2.py @@ -23,7 +23,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_gather_v2_primitive.py b/tests/ut/python/parallel/test_gather_v2_primitive.py index e6f269e2db..4c0534aad0 100644 --- a/tests/ut/python/parallel/test_gather_v2_primitive.py +++ b/tests/ut/python/parallel/test_gather_v2_primitive.py @@ -109,8 +109,7 @@ class TrainOneStepCell(Cell): self.network.add_flags(defer_inline=True) self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens diff --git a/tests/ut/python/parallel/test_get_next.py b/tests/ut/python/parallel/test_get_next.py index 956723a3f3..c1db710ad5 100644 --- a/tests/ut/python/parallel/test_get_next.py +++ b/tests/ut/python/parallel/test_get_next.py @@ -25,7 +25,7 @@ from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE) -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_gpu_dropout.py b/tests/ut/python/parallel/test_gpu_dropout.py index 1a1ac55301..0eade2b962 100644 --- a/tests/ut/python/parallel/test_gpu_dropout.py +++ b/tests/ut/python/parallel/test_gpu_dropout.py @@ -23,7 +23,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_hybird_parallel_activation.py b/tests/ut/python/parallel/test_hybird_parallel_activation.py index 32596ab44a..cf2dd849cf 100644 --- a/tests/ut/python/parallel/test_hybird_parallel_activation.py +++ b/tests/ut/python/parallel/test_hybird_parallel_activation.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_l2normalize.py b/tests/ut/python/parallel/test_l2normalize.py index 9a842c603a..8a26bf3943 100644 --- a/tests/ut/python/parallel/test_l2normalize.py +++ b/tests/ut/python/parallel/test_l2normalize.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_linear.py b/tests/ut/python/parallel/test_linear.py index 4c0df3c665..b0fd410566 100644 --- a/tests/ut/python/parallel/test_linear.py +++ b/tests/ut/python/parallel/test_linear.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_loop_two_matmul.py b/tests/ut/python/parallel/test_loop_two_matmul.py index e05c84fca4..5b066d53a8 100644 --- a/tests/ut/python/parallel/test_loop_two_matmul.py +++ b/tests/ut/python/parallel/test_loop_two_matmul.py @@ -25,7 +25,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_loss_scale.py b/tests/ut/python/parallel/test_loss_scale.py index 7737fe1271..498a83d928 100644 --- a/tests/ut/python/parallel/test_loss_scale.py +++ b/tests/ut/python/parallel/test_loss_scale.py @@ -61,8 +61,7 @@ class TrainOneStepWithLossScaleCell(nn.Cell): self.network = network self.weights = optimizer.parameters self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.reducer_flag = False self.grad_reducer = F.identity diff --git a/tests/ut/python/parallel/test_matmul_dropout.py b/tests/ut/python/parallel/test_matmul_dropout.py index 41892ade40..5dfa4cabb3 100644 --- a/tests/ut/python/parallel/test_matmul_dropout.py +++ b/tests/ut/python/parallel/test_matmul_dropout.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_matmul_tensor.py b/tests/ut/python/parallel/test_matmul_tensor.py index 757242bf09..aff6cfca73 100644 --- a/tests/ut/python/parallel/test_matmul_tensor.py +++ b/tests/ut/python/parallel/test_matmul_tensor.py @@ -26,7 +26,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py b/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py index 4790f60c99..81bb9cae7b 100644 --- a/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py +++ b/tests/ut/python/parallel/test_mix_precision_hybrid_parallel.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_one_hot_net.py b/tests/ut/python/parallel/test_one_hot_net.py index a67010143d..33c8fcbc82 100644 --- a/tests/ut/python/parallel/test_one_hot_net.py +++ b/tests/ut/python/parallel/test_one_hot_net.py @@ -30,7 +30,7 @@ from tests.dataset_mock import MindData from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) device_num = 16 diff --git a/tests/ut/python/parallel/test_one_weight_parameter.py b/tests/ut/python/parallel/test_one_weight_parameter.py index 558d2ec322..8cf6b6aa8e 100644 --- a/tests/ut/python/parallel/test_one_weight_parameter.py +++ b/tests/ut/python/parallel/test_one_weight_parameter.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_onehot.py b/tests/ut/python/parallel/test_onehot.py index e65871e55b..725e9e33ec 100644 --- a/tests/ut/python/parallel/test_onehot.py +++ b/tests/ut/python/parallel/test_onehot.py @@ -26,7 +26,7 @@ from mindspore.ops.operations.comm_ops import _VirtualDataset context.set_context(mode=context.GRAPH_MODE) -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_prelu.py b/tests/ut/python/parallel/test_prelu.py index 6afef01f5c..e60aafeba0 100644 --- a/tests/ut/python/parallel/test_prelu.py +++ b/tests/ut/python/parallel/test_prelu.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_reduce_method_info.py b/tests/ut/python/parallel/test_reduce_method_info.py index ecf3dc3094..07712a2d9d 100644 --- a/tests/ut/python/parallel/test_reduce_method_info.py +++ b/tests/ut/python/parallel/test_reduce_method_info.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLossNoBias(nn.Cell): diff --git a/tests/ut/python/parallel/test_reshape.py b/tests/ut/python/parallel/test_reshape.py index 070e874119..9cfb376e1b 100644 --- a/tests/ut/python/parallel/test_reshape.py +++ b/tests/ut/python/parallel/test_reshape.py @@ -36,7 +36,7 @@ context.set_context(mode=context.GRAPH_MODE) context.reset_auto_parallel_context() -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class Dataset(MindData): @@ -419,8 +419,7 @@ class TrainOneStepCell(nn.Cell): self.network.add_flags(defer_inline=True) self.weights = ParameterTuple(network.trainable_params()) self.optimizer = optimizer - self.grad = C.GradOperation('grad', - get_by_list=True, + self.grad = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens diff --git a/tests/ut/python/parallel/test_reshape_parameter.py b/tests/ut/python/parallel/test_reshape_parameter.py index 4dfaa89ba2..f074f566f1 100644 --- a/tests/ut/python/parallel/test_reshape_parameter.py +++ b/tests/ut/python/parallel/test_reshape_parameter.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_scalar_loss.py b/tests/ut/python/parallel/test_scalar_loss.py index 3d07cd035d..0f8dcc03f8 100644 --- a/tests/ut/python/parallel/test_scalar_loss.py +++ b/tests/ut/python/parallel/test_scalar_loss.py @@ -24,7 +24,7 @@ from mindspore.ops import functional as F from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): diff --git a/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py b/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py index b25d86a6e4..95e642cf3d 100644 --- a/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py +++ b/tests/ut/python/parallel/test_semi_auto_two_subgraphs.py @@ -83,9 +83,9 @@ class TrainStepWrap(nn.Cell): self.optimizer_d = Adam(self.weights_d, learning_rate=3.5e-4, eps=1e-8, loss_scale=sens) self.hyper_map = C.HyperMap() - self.grad_w = C.GradOperation('grad_w', get_by_list=True, + self.grad_w = C.GradOperation(get_by_list=True, sens_param=True) - self.grad_d = C.GradOperation('grad_d', get_by_list=True, + self.grad_d = C.GradOperation(get_by_list=True, sens_param=True) self.sens = sens self.loss_net_w = IthOutputCell(network, output_index=0) diff --git a/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py b/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py index cf4ee1710c..24b45600ba 100644 --- a/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py +++ b/tests/ut/python/parallel/test_softmax_cross_entropy_loss.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_sparse_feature_bprop.py b/tests/ut/python/parallel/test_sparse_feature_bprop.py index 73d8097605..f7de90d9cd 100644 --- a/tests/ut/python/parallel/test_sparse_feature_bprop.py +++ b/tests/ut/python/parallel/test_sparse_feature_bprop.py @@ -26,7 +26,7 @@ from mindspore.common.api import _executor from mindspore.nn import TrainOneStepCell, Adam -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): diff --git a/tests/ut/python/parallel/test_sparse_gather_v2.py b/tests/ut/python/parallel/test_sparse_gather_v2.py index 1f3d70134c..30957dcf9d 100644 --- a/tests/ut/python/parallel/test_sparse_gather_v2.py +++ b/tests/ut/python/parallel/test_sparse_gather_v2.py @@ -25,7 +25,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_split_grad_sens.py b/tests/ut/python/parallel/test_split_grad_sens.py index 3c94728356..a181a1858a 100644 --- a/tests/ut/python/parallel/test_split_grad_sens.py +++ b/tests/ut/python/parallel/test_split_grad_sens.py @@ -24,8 +24,8 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all = C.GradOperation(get_all=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) class GradWrap(nn.Cell): diff --git a/tests/ut/python/parallel/test_step_parallel.py b/tests/ut/python/parallel/test_step_parallel.py index fce960ed95..a03a151e13 100644 --- a/tests/ut/python/parallel/test_step_parallel.py +++ b/tests/ut/python/parallel/test_step_parallel.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_strategy_checkpoint.py b/tests/ut/python/parallel/test_strategy_checkpoint.py index ada3246307..2d39b7aae6 100644 --- a/tests/ut/python/parallel/test_strategy_checkpoint.py +++ b/tests/ut/python/parallel/test_strategy_checkpoint.py @@ -25,7 +25,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) # model_parallel test diff --git a/tests/ut/python/parallel/test_sum_as_loss.py b/tests/ut/python/parallel/test_sum_as_loss.py index c8cd5e63ae..bca26d0b2c 100644 --- a/tests/ut/python/parallel/test_sum_as_loss.py +++ b/tests/ut/python/parallel/test_sum_as_loss.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): diff --git a/tests/ut/python/parallel/test_two_matmul.py b/tests/ut/python/parallel/test_two_matmul.py index 0e6b47286c..854df0ca71 100644 --- a/tests/ut/python/parallel/test_two_matmul.py +++ b/tests/ut/python/parallel/test_two_matmul.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_two_weights_parameter.py b/tests/ut/python/parallel/test_two_weights_parameter.py index 160ae9f40d..c05fa63c2d 100644 --- a/tests/ut/python/parallel/test_two_weights_parameter.py +++ b/tests/ut/python/parallel/test_two_weights_parameter.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parallel/test_virtual_dataset_3_input.py b/tests/ut/python/parallel/test_virtual_dataset_3_input.py index 63979e59f8..a3b2f8d96b 100644 --- a/tests/ut/python/parallel/test_virtual_dataset_3_input.py +++ b/tests/ut/python/parallel/test_virtual_dataset_3_input.py @@ -26,7 +26,7 @@ from mindspore.ops.operations.comm_ops import _VirtualDataset from tests.ut.python.ops.test_math_ops import VirtualLoss -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class NetWithLoss(nn.Cell): diff --git a/tests/ut/python/parameter_feature/test_parameter.py b/tests/ut/python/parameter_feature/test_parameter.py index 0cdb7f2469..551f175dbe 100644 --- a/tests/ut/python/parameter_feature/test_parameter.py +++ b/tests/ut/python/parameter_feature/test_parameter.py @@ -23,8 +23,8 @@ from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, save_graphs=True) -grad_all = C.GradOperation('get_all', get_all=True) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all = C.GradOperation(get_all=True) +grad_all_with_sens = C.GradOperation(sens_param=True) def test_parser_three_default_mixed_args_subnet(): diff --git a/tests/ut/python/parameter_feature/test_var_grad.py b/tests/ut/python/parameter_feature/test_var_grad.py index 760dd8531a..6b3d05a978 100644 --- a/tests/ut/python/parameter_feature/test_var_grad.py +++ b/tests/ut/python/parameter_feature/test_var_grad.py @@ -25,11 +25,11 @@ from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE, save_graphs=True) -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) -grad_by_list_with_sens = C.GradOperation('grad_by_list_with_sens', get_by_list=True, sens_param=True) -grad_all = C.GradOperation('get_all', get_all=True) -grad_with_sens = C.GradOperation('grad_with_sens', sens_param=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) +grad_by_list_with_sens = C.GradOperation(get_by_list=True, sens_param=True) +grad_all = C.GradOperation(get_all=True) +grad_with_sens = C.GradOperation(sens_param=True) def test_net_vargs_expand(): @@ -200,7 +200,7 @@ def test_grad_with_param_sens(): self.weights = ParameterTuple(net.trainable_params()) self.net = net self.sens = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), name='sens', requires_grad=False) - self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) + self.grad = C.GradOperation(get_by_list=True, sens_param=True) def construct(self, x, y): return self.grad(self.net, self.weights)(x, y, self.sens) @@ -290,8 +290,7 @@ def test_grad_within_if_else(): super(GradNet, self).__init__() self.weights = ParameterTuple(net.trainable_params()) self.net = net - grad_op = C.GradOperation( - name='grad', get_all=False, get_by_list=True, sens_param=True) + grad_op = C.GradOperation(get_all=False, get_by_list=True, sens_param=True) sens = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) self.grad = Bprop(self.net, True, self.weights, grad_op, sens) @@ -312,8 +311,7 @@ def test_grad_for_concat(): super(GradNet, self).__init__() self.weights = ParameterTuple(net.trainable_params()) self.net = net - grad_op = C.GradOperation( - name='grad', get_all=True, get_by_list=False, sens_param=True) + grad_op = C.GradOperation(get_all=True, get_by_list=False, sens_param=True) self.grad = Bprop(self.net, False, self.weights, grad_op) def construct(self, *inputs): diff --git a/tests/ut/python/pipeline/infer/test_scalar_add_grad.py b/tests/ut/python/pipeline/infer/test_scalar_add_grad.py index 027cfdb779..a775b82236 100644 --- a/tests/ut/python/pipeline/infer/test_scalar_add_grad.py +++ b/tests/ut/python/pipeline/infer/test_scalar_add_grad.py @@ -23,7 +23,7 @@ from mindspore.ops import composite as C from mindspore.ops.operations import TensorAdd context.set_context(mode=context.GRAPH_MODE) -grad = C.GradOperation('get_all', get_all=True, sens_param=True) +grad = C.GradOperation(get_all=True, sens_param=True) class TensorAddNetMe(Cell): diff --git a/tests/ut/python/pipeline/parse/test_parse.py b/tests/ut/python/pipeline/parse/test_parse.py index 8bafdf26c7..b5d0fb0ae4 100644 --- a/tests/ut/python/pipeline/parse/test_parse.py +++ b/tests/ut/python/pipeline/parse/test_parse.py @@ -37,7 +37,7 @@ from ...ut_filter import non_graph_engine # W0613: unused-argument -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) log = logging.getLogger("test") diff --git a/tests/ut/python/pynative_mode/ops/test_grad.py b/tests/ut/python/pynative_mode/ops/test_grad.py index d0bf9c893f..1e8849cc97 100644 --- a/tests/ut/python/pynative_mode/ops/test_grad.py +++ b/tests/ut/python/pynative_mode/ops/test_grad.py @@ -29,8 +29,8 @@ def setup_module(module): context.set_context(mode=context.PYNATIVE_MODE) -grad = C.GradOperation('grad') -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad = C.GradOperation() +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def mul(x, y): diff --git a/tests/ut/python/pynative_mode/test_cont_cases.py b/tests/ut/python/pynative_mode/test_cont_cases.py index 518678244a..5c7350ed56 100644 --- a/tests/ut/python/pynative_mode/test_cont_cases.py +++ b/tests/ut/python/pynative_mode/test_cont_cases.py @@ -26,8 +26,8 @@ from mindspore.ops import operations as P # from tests.vm_impl.vm_interface import * # from tests.vm_impl import * -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all = C.GradOperation('get_all', get_all=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all = C.GradOperation(get_all=True) def setup_module(): diff --git a/tests/ut/python/pynative_mode/test_framstruct.py b/tests/ut/python/pynative_mode/test_framstruct.py index 7e19b7452a..ab5a80bbb7 100644 --- a/tests/ut/python/pynative_mode/test_framstruct.py +++ b/tests/ut/python/pynative_mode/test_framstruct.py @@ -35,10 +35,10 @@ def setup_module(module): context.set_context(mode=context.PYNATIVE_MODE) -grad = C.GradOperation('grad') -grad_all = C.GradOperation('get_all', get_all=True) -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad = C.GradOperation() +grad_all = C.GradOperation(get_all=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) @ms_function diff --git a/tests/ut/python/pynative_mode/test_high_order_grad.py b/tests/ut/python/pynative_mode/test_high_order_grad.py index 133f65c728..e41df500c3 100644 --- a/tests/ut/python/pynative_mode/test_high_order_grad.py +++ b/tests/ut/python/pynative_mode/test_high_order_grad.py @@ -18,9 +18,9 @@ from mindspore.common.api import ms_function import mindspore.ops.composite as C -grad = C.GradOperation('grad') -grad_all = C.GradOperation('get_all', get_all=True) -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad = C.GradOperation() +grad_all = C.GradOperation(get_all=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def setup_module(module): context.set_context(mode=context.PYNATIVE_MODE, check_bprop=False) diff --git a/tests/ut/python/pynative_mode/test_hook.py b/tests/ut/python/pynative_mode/test_hook.py index 532fcbebec..6c2204f381 100644 --- a/tests/ut/python/pynative_mode/test_hook.py +++ b/tests/ut/python/pynative_mode/test_hook.py @@ -28,7 +28,7 @@ var_hook_done = False cell_bprop_done = False -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): @@ -135,7 +135,7 @@ class GradWrap(nn.Cell): def construct(self, x, label): weights = self.weights - return C.GradOperation('get_by_list', get_by_list=True)(self.network, weights)(x, label) + return C.GradOperation(get_by_list=True)(self.network, weights)(x, label) def test_hook(): diff --git a/tests/ut/python/pynative_mode/test_implicit_conversion.py b/tests/ut/python/pynative_mode/test_implicit_conversion.py index b7c6144d22..39c885bd66 100644 --- a/tests/ut/python/pynative_mode/test_implicit_conversion.py +++ b/tests/ut/python/pynative_mode/test_implicit_conversion.py @@ -20,7 +20,7 @@ from mindspore import Tensor, nn from mindspore.ops import composite as C -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def test_float_tensor_and_int_add(): diff --git a/tests/ut/python/pynative_mode/test_insert_grad_of.py b/tests/ut/python/pynative_mode/test_insert_grad_of.py index ee84677472..5573517b1f 100644 --- a/tests/ut/python/pynative_mode/test_insert_grad_of.py +++ b/tests/ut/python/pynative_mode/test_insert_grad_of.py @@ -26,8 +26,8 @@ from ....mindspore_test_framework.utils.bprop_util import bprop from ....mindspore_test_framework.utils.debug_util import PrintShapeTypeCell, PrintGradShapeTypeCell -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all = C.GradOperation('get_all', get_all=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all = C.GradOperation(get_all=True) def setup_module(module): diff --git a/tests/ut/python/pynative_mode/test_kw_and_kwarg.py b/tests/ut/python/pynative_mode/test_kw_and_kwarg.py index 0100e0d0fc..3fdbcefc41 100644 --- a/tests/ut/python/pynative_mode/test_kw_and_kwarg.py +++ b/tests/ut/python/pynative_mode/test_kw_and_kwarg.py @@ -64,7 +64,7 @@ def test_kw_grad(): def __init__(self, net): super(GradKwNet, self).__init__() self.net = net - self.grad_all_wit_sense = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) + self.grad_all_wit_sense = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, y, *arg, **kwargs): return self.grad_all_wit_sense(self.net)(x, y, *arg, **kwargs) @@ -112,7 +112,7 @@ def test_grad(): def __init__(self, net): super(GradNet, self).__init__() self.net = net - self.grad_all_wit_sense = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) + self.grad_all_wit_sense = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, y, z, sens): return self.grad_all_wit_sense(self.net)(x, y, z, sens) diff --git a/tests/ut/python/pynative_mode/test_pynative_model.py b/tests/ut/python/pynative_mode/test_pynative_model.py index 521f25a301..a0469cdaf4 100644 --- a/tests/ut/python/pynative_mode/test_pynative_model.py +++ b/tests/ut/python/pynative_mode/test_pynative_model.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from ..ut_filter import non_graph_engine -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) +grad_by_list = C.GradOperation(get_by_list=True) def setup_module(module): diff --git a/tests/ut/python/pynative_mode/test_sparse_pynative.py b/tests/ut/python/pynative_mode/test_sparse_pynative.py index 4d9db16cb7..3568491b23 100644 --- a/tests/ut/python/pynative_mode/test_sparse_pynative.py +++ b/tests/ut/python/pynative_mode/test_sparse_pynative.py @@ -26,7 +26,7 @@ from mindspore.ops import composite as C context.set_context(mode=context.PYNATIVE_MODE, enable_sparse=True) -grad_all = C.GradOperation('get_all', get_all=True) +grad_all = C.GradOperation(get_all=True) class GradWrap(nn.Cell): def __init__(self, network): super(GradWrap, self).__init__() diff --git a/tests/ut/python/pynative_mode/test_stop_gradient.py b/tests/ut/python/pynative_mode/test_stop_gradient.py index d880aa7b17..59ae8d6429 100644 --- a/tests/ut/python/pynative_mode/test_stop_gradient.py +++ b/tests/ut/python/pynative_mode/test_stop_gradient.py @@ -31,8 +31,8 @@ from ..ut_filter import non_graph_engine from ....mindspore_test_framework.utils.bprop_util import bprop -grad_by_list = C.GradOperation('get_by_list', get_by_list=True) -grad_all = C.GradOperation('get_all', get_all=True) +grad_by_list = C.GradOperation(get_by_list=True) +grad_all = C.GradOperation(get_all=True) def setup_module(module): diff --git a/tests/ut/python/pynative_mode/test_user_define_bprop_check.py b/tests/ut/python/pynative_mode/test_user_define_bprop_check.py index 6ebe94aceb..fe76611cfe 100644 --- a/tests/ut/python/pynative_mode/test_user_define_bprop_check.py +++ b/tests/ut/python/pynative_mode/test_user_define_bprop_check.py @@ -21,7 +21,7 @@ from mindspore import dtype as mstype from mindspore.ops import composite as C -grad_all_with_sens = C.GradOperation('grad_all_with_sens', get_all=True, sens_param=True) +grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def test_user_define_bprop_check_ok():