From 9ec100d06928da5d13496e206ad9789ed1080ec1 Mon Sep 17 00:00:00 2001 From: l00591931 Date: Mon, 1 Feb 2021 10:28:27 +0800 Subject: [PATCH] Change TensorAdd to Add, from r1.1 to master --- config/op_info.config | 6 +- .../graph_kernel/expanders/bias_add.py | 10 +- .../graph_kernel/expanders/fused_adam.py | 6 +- .../expanders/fused_adam_weight_decay.py | 8 +- .../_extends/graph_kernel/expanders/gelu.py | 4 +- .../graph_kernel/expanders/gelu_grad.py | 8 +- .../graph_kernel/expanders/layernorm.py | 4 +- .../graph_kernel/expanders/layernorm_grad.py | 8 +- .../_extends/graph_kernel/model/model.py | 2 +- .../cpu/arithmetic_cpu_kernel.cc | 2 +- .../cpu/mkldnn/tensoradd_cpu_kernel.h | 3 +- .../gpu/math/broadcast_gpu_kernel.cc | 10 +- .../gpu/math/broadcast_gpu_kernel.h | 2 +- .../kernel_compiler/tbe/tbe_kernel_build.cc | 2 +- .../bnupdate_eltwise_eltwise_fusion_pass.cc | 2 +- .../ascend/ir_fusion/adam_apply_one_fusion.cc | 20 ++-- .../ascend/ir_fusion/adam_apply_one_fusion.h | 4 +- .../adam_apply_one_with_decay_rule.cc | 40 ++++---- .../adam_apply_one_with_decay_rule.h | 4 +- .../ascend/ir_fusion/lamb_next_mv_rule.cc | 24 ++--- .../ascend/ir_fusion/lamb_next_mv_rule.h | 4 +- .../ir_fusion/lamb_next_mv_with_decay_rule.cc | 32 +++---- .../ir_fusion/lamb_next_mv_with_decay_rule.h | 4 +- .../lamb_next_mv_with_decay_v1_rule.cc | 12 +-- .../ascend/ir_fusion/lamb_next_right_rule.cc | 2 +- .../ascend/ir_fusion/lamb_next_right_rule.h | 2 +- .../ascend/ir_fusion/mul_add_fusion.cc | 2 +- .../backend/optimizer/gpu/adam_fusion.cc | 10 +- .../optimizer/gpu/adam_weight_decay_fusion.cc | 12 +-- .../optimizer/gpu/add_relu_grad_v2_fusion.cc | 2 +- .../optimizer/gpu/add_relu_v2_fusion.cc | 2 +- .../gpu/batch_norm_add_relu_fusion.cc | 2 +- .../optimizer/gpu/replace_addn_fusion.cc | 2 +- .../graph_kernel/arithmetic_simplify.cc | 10 +- .../graph_kernel/graph_kernel_helper.cc | 4 +- mindspore/ccsrc/frontend/optimizer/irpass.cc | 2 +- .../frontend/optimizer/irpass/merge_addn.h | 4 +- .../rec_core/rec_generate_strategy.cc | 2 +- .../auto_parallel/rec_core/rec_parse_graph.h | 2 +- .../ccsrc/frontend/parallel/dynamic_creator.h | 2 +- .../parallel/ops_info/arithmetic_info.h | 7 +- .../frontend/parallel/ops_info/onehot_info.cc | 2 +- .../frontend/parallel/ops_info/ops_utils.h | 3 +- .../frontend/parallel/step_auto_parallel.cc | 2 +- .../transform/express_ir/onnx_exporter.cc | 4 +- .../elewise_calculation_ops_declare.cc | 2 +- mindspore/ccsrc/utils/utils.h | 2 +- mindspore/compression/export/quant_export.py | 4 +- mindspore/compression/quant/qat.py | 2 +- mindspore/core/abstract/infer_functions.h | 4 +- mindspore/core/abstract/prim_maths.cc | 4 +- .../core/abstract/primitive_infer_map.cc | 2 +- mindspore/core/base/core_ops.h | 2 +- mindspore/core/c_ops/add.cc | 2 +- mindspore/core/ir/pattern_matcher.h | 2 +- mindspore/nn/_graph_kernels/graph_kernels.py | 2 +- mindspore/nn/layer/activation.py | 2 +- mindspore/nn/layer/embedding.py | 6 +- mindspore/nn/layer/quant.py | 8 +- mindspore/ops/_grad/grad_math_ops.py | 4 +- mindspore/ops/_op_impl/akg/ascend/add.py | 6 +- mindspore/ops/_op_impl/tbe/tensor_add.py | 4 +- mindspore/ops/_op_impl/tbe/tensor_add_ds.py | 2 +- mindspore/ops/composite/base.py | 2 +- mindspore/ops/functional.py | 2 +- mindspore/ops/operations/__init__.py | 3 +- mindspore/ops/operations/array_ops.py | 1 + mindspore/ops/operations/control_ops.py | 2 +- mindspore/ops/operations/debug_ops.py | 8 +- mindspore/ops/operations/math_ops.py | 9 +- .../official/cv/centerface/src/mobile_v2.py | 4 +- model_zoo/official/cv/cnnctc/src/cnn_ctc.py | 2 +- .../src/nets/deeplab_v3/deeplab_v3.py | 2 +- .../cv/faster_rcnn/src/FasterRcnn/resnet50.py | 2 +- .../cv/maskrcnn/src/maskrcnn/resnet50.py | 2 +- .../cv/mobilenetv2/src/mobilenetV2.py | 4 +- .../cv/mobilenetv2/src/mobilenetV2_fusion.py | 2 +- .../cv/mobilenetv2_quant/src/mobilenetV2.py | 2 +- .../cv/mobilenetv3/src/mobilenetV3.py | 2 +- .../cv/psenet/src/ETSNET/dice_loss.py | 2 +- .../official/cv/psenet/src/ETSNET/resnet50.py | 2 +- model_zoo/official/cv/resnet/src/resnet.py | 2 +- .../cv/resnet/src/resnet_gpu_benchmark.py | 2 +- .../cv/resnet50_quant/models/resnet_quant.py | 2 +- .../cv/resnet_thor/src/resnet_thor.py | 2 +- .../official/cv/resnet_thor/src/thor_layer.py | 4 +- .../cv/resnext50/src/backbone/resnet.py | 6 +- .../cv/retinaface_resnet50/src/network.py | 2 +- .../cv/shufflenetv1/src/shufflenetv1.py | 2 +- .../official/cv/squeezenet/src/squeezenet.py | 2 +- model_zoo/official/cv/ssd/src/ssd.py | 2 +- .../official/cv/xception/src/Xception.py | 2 +- .../cv/yolov3_darknet53/src/darknet.py | 2 +- .../cv/yolov3_darknet53_quant/src/darknet.py | 2 +- .../official/cv/yolov3_resnet18/src/yolov3.py | 2 +- .../official/cv/yolov4/src/cspdarknet53.py | 4 +- model_zoo/official/nlp/bert/src/bert_model.py | 6 +- .../official/nlp/bert_thor/src/bert_model.py | 6 +- .../nlp/gnmt_v2/src/gnmt_model/beam_search.py | 4 +- .../nlp/mass/src/transformer/beam_search.py | 4 +- .../src/transformer/multi_head_attention.py | 2 +- .../src/transformer/positional_embedding.py | 2 +- .../nlp/mass/src/transformer/residual_conn.py | 2 +- .../src/transformer/positional_embedding.py | 2 +- .../src/transformer/residual_conn.py | 2 +- .../nlp/tinybert/src/tinybert_model.py | 4 +- .../nlp/transformer/src/beam_search.py | 4 +- .../nlp/transformer/src/transformer_model.py | 6 +- .../audio/deepspeech2/src/deepspeech2.py | 2 +- .../audio/wavenet/wavenet_vocoder/modules.py | 2 +- .../src/FaceAttribute/resnet18.py | 4 +- .../src/FaceAttribute/resnet18_softmax.py | 4 +- .../cv/FaceQualityAssessment/src/face_qa.py | 6 +- .../cv/FaceRecognition/src/backbone/resnet.py | 4 +- .../cv/FaceRecognitionForTracking/src/reid.py | 4 +- .../src/reid_for_export.py | 4 +- model_zoo/research/cv/centernet/src/decode.py | 2 +- .../research/cv/ghostnet/src/ghostnet.py | 2 +- .../research/cv/ghostnet/src/ghostnet600.py | 2 +- .../cv/ghostnet_quant/src/ghostnet.py | 2 +- .../resnet50_adv_pruning/src/resnet_imgnet.py | 2 +- .../research/cv/squeezenet/src/squeezenet.py | 2 +- .../cv/ssd_ghostnet/src/ssd_ghostnet.py | 4 +- .../nlp/ternarybert/src/tinybert_model.py | 4 +- .../apps/bert_attention_submodules.py | 4 +- .../pipeline/forward/compare_forward.py | 12 +-- .../pipeline/forward/compile_forward.py | 8 +- .../pipeline/gradient/compare_gradient.py | 12 +-- .../pipeline/gradient/compile_gradient.py | 8 +- tests/perf_test/resnet_example.py | 2 +- tests/st/auto_parallel/optimizer_parallel.py | 4 +- .../auto_parallel/parallel_strategy_search.py | 2 +- .../st/auto_parallel/resnet50_expand_loss.py | 4 +- tests/st/control/test_cont_grad.py | 48 +++++----- tests/st/control/test_if_by_if.py | 4 +- tests/st/cpp/model/test_tensor_add.cc | 8 +- tests/st/dump/test_data_dump.py | 2 +- tests/st/export/text_air.py | 6 +- .../st/fusion/test_add_relu_buffer_fusion.py | 2 +- tests/st/fusion/test_conv_bn1_fusion.py | 2 +- .../test_tbe_multi_inout_eltwise_fusion.py | 2 +- tests/st/graph_kernel/model/test_split.py | 20 ++-- tests/st/mem_reuse/resnet.py | 4 +- tests/st/model_zoo_tests/yolov3/src/yolov3.py | 2 +- .../yolov3_darknet53/src/darknet.py | 2 +- .../st/networks/models/bert/src/bert_model.py | 4 +- .../deeplabv3/src/backbone/resnet_deeplab.py | 6 +- .../st/networks/models/resnet50/src/resnet.py | 2 +- .../models/resnet50/src_thor/resnet.py | 2 +- .../models/resnet50/src_thor/thor_layer.py | 2 +- tests/st/networks/models/resnetv1_5.py | 4 +- tests/st/networks/test_gpu_resnet.py | 6 +- tests/st/ops/ascend/test_add.py | 2 +- .../st/ops/ascend/test_maxpool_with_argmax.py | 2 +- tests/st/ops/ascend/test_tbe_ops/test_add.py | 2 +- tests/st/ops/cpu/test_tensoradd.py | 2 +- tests/st/ops/gpu/test_cudnn_inplace_fusion.py | 2 +- tests/st/ops/gpu/test_relu_v2.py | 4 +- tests/st/ops/gpu/test_tensoradd.py | 4 +- tests/st/ops/graph_kernel/test_atomic_add.py | 4 +- tests/st/ops/graph_kernel/test_cse.py | 2 +- tests/st/ops/graph_kernel/test_fuse.py | 2 +- tests/st/ops/graph_kernel/test_simplify.py | 2 +- tests/st/ps/multi_full_ps/resnet.py | 2 +- .../st/pynative/loss_scale/test_loss_scale.py | 2 +- .../pynative/parser/test_parser_construct.py | 2 +- .../st/pynative/test_graph_param_transform.py | 2 +- .../test_pynative_mixed_precision_cells.py | 2 +- .../pynative/test_pynative_resnet50_ascend.py | 4 +- .../st/pynative/test_pynative_resnet50_gpu.py | 4 +- .../mobilenetv2_quant/mobilenetV2.py | 2 +- tests/st/tbe_networks/resnet.py | 4 +- .../parallel/ops_info/tensor_add_info_test.cc | 10 +- tests/ut/cpp/parallel/step_parallel_test.cc | 2 +- .../pre_activate/mem_reuse/mem_reuse_test.cc | 4 +- .../gtest_input/mem_reuse/mem_reuse_test.py | 2 +- .../gtest_input/optimizer/opt_test.py | 2 +- .../adam_apply_one_fusion_test.py | 2 +- .../adam_apply_one_with_decay_rule.py | 2 +- .../pre_activate/buffer_fusion_test.py | 2 +- ...nvert_tuple_input_to_dynamic_input_test.py | 2 +- .../convert_tuple_output_to_maketuple_test.py | 2 +- .../eliminate_redundant_op_test.py | 2 +- .../getnext_memcpy_elimination_test.py | 2 +- .../gtest_input/pre_activate/hw_opt_test.py | 2 +- .../pre_activate/insert_trans_op_test.py | 2 +- .../pre_activate/ir_fusion_test.py | 2 +- .../pre_activate/lamb_next_mv_rule_test.py | 2 +- .../lamb_next_mv_with_decay_rule_test.py | 2 +- .../lamb_next_mv_with_decay_v1_rule.py | 2 +- .../pre_activate/lamb_next_right_rule_test.py | 2 +- ...er_norm_beta_gamma_backprop_fusion_test.py | 2 +- .../pre_activate/mixed_precision_test.py | 2 +- .../pre_activate/mul_add_fusion_test.py | 2 +- .../pre_activate/optimize_dependence_test.py | 2 +- .../remove_internal_output_test.py | 2 +- .../pre_activate/transdata_split_test.py | 2 +- .../transpose_transdata_fusion_test.py | 2 +- .../gtest_input/session/session_test.py | 2 +- .../cpp/session/anf_runtime_algorithm_test.cc | 96 +++++++++---------- tests/ut/cpp/session/kernel_graph_test.cc | 10 +- tests/ut/cpp/session/session_basic_test.cc | 4 +- tests/ut/cpp/transform/convert_test.cc | 2 +- .../test_data_parallel_resnet.py | 4 +- tests/ut/python/exec/resnet_example.py | 2 +- tests/ut/python/exec/test_tensor_add.py | 6 +- tests/ut/python/ir/test_row_tensor.py | 2 +- tests/ut/python/keep_order/test_keep_order.py | 4 +- tests/ut/python/model/res18_example.py | 4 +- tests/ut/python/model/resnet.py | 2 +- tests/ut/python/model/test_mix_precision.py | 6 +- tests/ut/python/ops/test_array_ops.py | 2 +- tests/ut/python/ops/test_bprop_disorder.py | 4 +- tests/ut/python/ops/test_control_ops.py | 10 +- tests/ut/python/ops/test_dynamic_shape.py | 4 +- tests/ut/python/ops/test_math_ops_check.py | 4 +- tests/ut/python/ops/test_nn_ops.py | 2 +- tests/ut/python/ops/test_ops.py | 18 ++-- .../python/optimizer/test_debug_location.py | 4 +- .../test_optimizer_with_loss_scale.py | 4 +- .../parallel/test_add_relu_redistribution.py | 2 +- tests/ut/python/parallel/test_arithmetic.py | 6 +- .../parallel/test_auto_parallel_flag.py | 4 +- .../parallel/test_auto_parallel_for_loop.py | 2 +- .../test_auto_parallel_for_loop_reshape.py | 2 +- .../parallel/test_auto_parallel_resnet.py | 2 +- .../parallel/test_auto_parallel_rhombus.py | 18 ++-- ...est_auto_parallel_star_partial_strategy.py | 2 +- .../test_auto_parallel_transformer.py | 2 +- .../test_auto_parallel_triangle_overwrite.py | 2 +- .../parallel/test_auto_parallel_two_bn.py | 2 +- .../parallel/test_batch_parallel_tensoradd.py | 2 +- tests/ut/python/parallel/test_linear.py | 2 +- .../ut/python/parallel/test_matmul_tensor.py | 2 +- tests/ut/python/parallel/test_mul_div_bn.py | 2 +- tests/ut/python/parallel/test_one_hot_net.py | 6 +- .../parallel/test_operator_model_parallel.py | 8 +- .../parallel/test_reduce_method_info.py | 2 +- .../ut/python/parallel/test_repeated_calc.py | 2 +- tests/ut/python/parallel/test_reshape.py | 2 +- .../parameter_feature/test_parameter.py | 4 +- .../pipeline/infer/test_scalar_add_grad.py | 4 +- .../python/pipeline/parse/test_create_obj.py | 4 +- .../pipeline/parse/test_structure_output.py | 8 +- .../pynative_mode/ge/ops/test_tensor_add.py | 4 +- .../python/pynative_mode/ops/test_hypermap.py | 2 +- .../pynative_mode/ops/test_multitype.py | 2 +- tests/ut/python/pynative_mode/test_backend.py | 2 +- .../python/pynative_mode/test_cont_cases.py | 40 ++++---- .../pynative_mode/test_implicit_conversion.py | 6 +- tests/ut/python/pynative_mode/test_staging.py | 4 +- .../pynative_mode/test_tuple_parameter.py | 4 +- tests/ut/python/train/summary/test_summary.py | 2 +- .../train/summary/test_summary_collector.py | 6 +- .../test_summary_ops_params_valid_check.py | 2 +- .../train/summary/test_tensor_summary.py | 2 +- tests/ut/python/transform/test_transform.py | 2 +- tests/ut/python/utils/test_initializer.py | 2 +- tests/ut/python/utils/test_serialize.py | 2 +- tests/vm_impl/math_ops_vm_impl.py | 2 +- 260 files changed, 609 insertions(+), 607 deletions(-) diff --git a/config/op_info.config b/config/op_info.config index bea3ecf006..78d5157216 100644 --- a/config/op_info.config +++ b/config/op_info.config @@ -56,7 +56,7 @@ {"op_name": "TransData", "inputs": [{"index": 0, "name": "src", "param_type": "required"}], "outputs": [{"index": 0, "name": "dst", "param_type": "required"}], "attr": [{"name": "src_format", "type": "str"}, {"name": "dst_format", "type": "str"}], "fusion_type": "OPAQUE", "dtype_format": [[["uint16", "DefaultFormat"], ["uint16", "NC1HWC0"]], [["uint16", "NC1HWC0"], ["uint16", "DefaultFormat"]]], "imply_type": "AiCPU"} {"op_name": "CTCGreedyDecoder", "inputs": [{"index": 0, "name": "inputs", "param_type": "required"}, {"index": 1, "name": "sequence_length", "param_type": "required"}], "outputs": [{"index": 0, "name": "decoded_indices", "param_type": "required"}, {"index": 1, "name": "decoded_values", "param_type": "required"}, {"index": 2, "name": "decoded_shape", "param_type": "required"}, {"index": 3, "name": "log_probability", "param_type": "required"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["float32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["float32", "DefaultFormat"]], [["float64", "DefaultFormat"], ["int32", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["int64", "DefaultFormat"], ["float64", "DefaultFormat"]]], "imply_type": "AiCPU"} {"op_name": "Abs", "inputs": [{"index": 0, "name": "x"}], "outputs": [{"index": 0, "name": "output"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"]], [["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"]], [["float32", "FRACTAL_NZ"], ["float32", "FRACTAL_NZ"]]], "imply_type": "AKG", "processor": "AiCore"} -{"op_name": "TensorAdd", "inputs": [{"index": 0, "name": "x"}, {"index": 1, "name": "y"}], "outputs": [{"index": 0, "name": "output"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"], ["float32", "NC1HWC0"]], [["int32", "NC1HWC0"], ["int32", "NC1HWC0"], ["int32", "NC1HWC0"]], [["float16", "FracZ"], ["float16", "FracZ"], ["float16", "FracZ"]], [["float32", "FracZ"], ["float32", "FracZ"], ["float32", "FracZ"]], [["int32", "FracZ"], ["int32", "FracZ"], ["int32", "FracZ"]], [["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"]], [["float32", "FRACTAL_NZ"], ["float32", "FRACTAL_NZ"], ["float32", "FRACTAL_NZ"]], [["int32", "FRACTAL_NZ"], ["int32", "FRACTAL_NZ"], ["int32", "FRACTAL_NZ"]]], "imply_type": "AKG", "processor": "AiCore"} +{"op_name": "Add", "inputs": [{"index": 0, "name": "x"}, {"index": 1, "name": "y"}], "outputs": [{"index": 0, "name": "output"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"], ["float32", "NC1HWC0"]], [["int32", "NC1HWC0"], ["int32", "NC1HWC0"], ["int32", "NC1HWC0"]], [["float16", "FracZ"], ["float16", "FracZ"], ["float16", "FracZ"]], [["float32", "FracZ"], ["float32", "FracZ"], ["float32", "FracZ"]], [["int32", "FracZ"], ["int32", "FracZ"], ["int32", "FracZ"]], [["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"]], [["float32", "FRACTAL_NZ"], ["float32", "FRACTAL_NZ"], ["float32", "FRACTAL_NZ"]], [["int32", "FRACTAL_NZ"], ["int32", "FRACTAL_NZ"], ["int32", "FRACTAL_NZ"]]], "imply_type": "AKG", "processor": "AiCore"} {"op_name": "AddN", "inputs": [{"index": 0, "name": "inputs", "param_type": "dynamic"}], "outputs": [{"index": 0, "name": "output"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"]], [["float16", "FracZ"], ["float16", "FracZ"]], [["float32", "FracZ"], ["float32", "FracZ"]], [["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"]], [["float32", "FRACTAL_NZ"], ["float32", "FRACTAL_NZ"]]], "imply_type": "AKG", "processor": "AiCore"} {"op_name": "BatchMatMul", "inputs": [{"index": 0, "name": "x1"}, {"index": 1, "name": "x2"}], "outputs": [{"index": 0, "name": "output"}], "attr": [{"name": "transpose_a", "param_type": "optional", "type": "bool"}, {"name": "transpose_b", "param_type": "optional", "type": "bool"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"]]], "imply_type": "AKG", "processor": "AiCore"} {"op_name": "Cast", "inputs": [{"index": 0, "name": "x"}], "outputs": [{"index": 0, "name": "output"}], "attr": [{"name": "dst_type", "param_type": "required", "type": "str"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "DefaultFormat"], ["float32", "DefaultFormat"]], [["float16", "DefaultFormat"], ["int32", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["int32", "DefaultFormat"], ["float16", "DefaultFormat"]], [["int32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["bool", "DefaultFormat"], ["float16", "DefaultFormat"]], [["bool", "DefaultFormat"], ["float32", "DefaultFormat"]], [["bool", "DefaultFormat"], ["int32", "DefaultFormat"]], [["float16", "NC1HWC0"], ["float32", "NC1HWC0"]], [["float32", "NC1HWC0"], ["float16", "NC1HWC0"]], [["bool", "NC1HWC0"], ["int32", "NC1HWC0"]], [["bool", "NC1HWC0"], ["float32", "NC1HWC0"]], [["float16", "FRACTAL_NZ"], ["float32", "FRACTAL_NZ"]], [["float32", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"]], [["bool", "FRACTAL_NZ"], ["int32", "FRACTAL_NZ"]], [["bool", "FRACTAL_NZ"], ["float32", "FRACTAL_NZ"]]], "imply_type": "AKG", "processor": "AiCore"} @@ -172,8 +172,8 @@ {"op_name": "SoftmaxCrossEntropyWithLogits", "inputs": [{"index": 0, "name": "input_features", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "input_labels", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "output_loss", "need_compile": true, "param_type": "required", "shape": "all"}, {"index": 1, "name": "output_backprop", "need_compile": true, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "softmax_cross_entropy_with_logits.so", "compute_cost": 10, "kernel_name": "softmax_cross_entropy_with_logits", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "need_check_supported": false, "op_pattern": ""} {"op_name": "SigmoidCrossEntropyWithLogits", "inputs": [{"index": 0, "name": "predict", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "target", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "loss", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"], ["float32", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "sigmoid_cross_entropy_with_logits.so", "compute_cost": 10, "kernel_name": "sigmoid_cross_entropy_with_logits", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "need_check_supported": false, "op_pattern": ""} {"op_name": "SigmoidCrossEntropyWithLogitsGrad", "inputs": [{"index": 0, "name": "predict", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "target", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 2, "name": "dout", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "gradient", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"], ["float32", "NC1HWC0"], ["float32", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "sigmoid_cross_entropy_with_logits_grad.so", "compute_cost": 10, "kernel_name": "sigmoid_cross_entropy_with_logits_grad", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "need_check_supported": false, "op_pattern": ""} -{"op_name": "TensorAdd", "inputs": [{"index": 0, "name": "x1", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x2", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["int32", "NC1HWC0"], ["int32", "NC1HWC0"], ["int32", "NC1HWC0"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"], ["float32", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "add.so", "compute_cost": 10, "kernel_name": "add", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "need_check_supported": false, "op_pattern": "dynamicFormat"} -{"op_name": "TensorAdd", "inputs": [{"index": 0, "name": "x1", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x2", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["int32", "NC1HWC0"], ["int32", "NC1HWC0"], ["int32", "NC1HWC0"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"], ["float32", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "add.so", "compute_cost": 10, "kernel_name": "add", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": true, "need_check_supported": false, "op_pattern": "dynamicFormat"} +{"op_name": "Add", "inputs": [{"index": 0, "name": "x1", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x2", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["int32", "NC1HWC0"], ["int32", "NC1HWC0"], ["int32", "NC1HWC0"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"], ["float32", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "add.so", "compute_cost": 10, "kernel_name": "add", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "need_check_supported": false, "op_pattern": "dynamicFormat"} +{"op_name": "Add", "inputs": [{"index": 0, "name": "x1", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x2", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [], "fusion_type": "ELEMWISE", "dtype_format": [[["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"]], [["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"]], [["int32", "NC1HWC0"], ["int32", "NC1HWC0"], ["int32", "NC1HWC0"]], [["float16", "NC1HWC0"], ["float16", "NC1HWC0"], ["float16", "NC1HWC0"]], [["float32", "NC1HWC0"], ["float32", "NC1HWC0"], ["float32", "NC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "add.so", "compute_cost": 10, "kernel_name": "add", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": true, "need_check_supported": false, "op_pattern": "dynamicFormat"} {"op_name": "TransData", "inputs": [{"index": 0, "name": "src", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "dst", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "src_format", "param_type": "required", "type": "str", "value": "DefaultFormat, NC1HWC0, FracZ, FRACTAL_NZ, HWCN, C1HWNCoC0, NDHWC, NHWC"}, {"name": "dst_format", "param_type": "required", "type": "str", "value": "DefaultFormat, NC1HWC0, FracZ, FRACTAL_NZ, HWCN, C1HWNCoC0, NDHWC, NHWC"}], "fusion_type": "OPAQUE", "dtype_format": [[["float32", "NHWC"], ["float32", "NC1HWC0"]], [["float32", "DefaultFormat"], ["float32", "NC1HWC0"]], [["float32", "NC1HWC0"], ["float32", "NHWC"]], [["float32", "NC1HWC0"], ["float32", "DefaultFormat"]], [["float32", "FracZ"], ["float32", "DefaultFormat"]], [["float32", "DefaultFormat"], ["float32", "FracZ"]], [["float32", "HWCN"], ["float32", "FracZ"]], [["float32", "FracZ"], ["float32", "HWCN"]], [["float32", "C1HWNCoC0"], ["float32", "HWCN"]], [["float32", "HWCN"], ["float32", "C1HWNCoC0"]], [["float16", "DefaultFormat"], ["float16", "FracZ"]], [["float16", "NHWC"], ["float16", "FracZ"]], [["float16", "HWCN"], ["float16", "FracZ"]], [["float16", "DefaultFormat"], ["float16", "NC1HWC0"]], [["float16", "NHWC"], ["float16", "NC1HWC0"]], [["float16", "HWCN"], ["float16", "NC1HWC0"]], [["float16", "NC1HWC0"], ["float16", "NHWC"]], [["float16", "NC1HWC0"], ["float16", "DefaultFormat"]], [["float16", "FracZ"], ["float16", "DefaultFormat"]], [["float16", "DefaultFormat"], ["float16", "FracZ"]], [["float16", "HWCN"], ["float16", "FracZ"]], [["float16", "FracZ"], ["float16", "HWCN"]], [["float16", "C1HWNCoC0"], ["float16", "HWCN"]], [["float16", "HWCN"], ["float16", "C1HWNCoC0"]], [["float16", "DefaultFormat"], ["float16", "FRACTAL_NZ"]], [["float32", "DefaultFormat"], ["float32", "FRACTAL_NZ"]], [["float16", "FRACTAL_NZ"], ["float16", "DefaultFormat"]], [["float32", "FRACTAL_NZ"], ["float32", "DefaultFormat"]], [["bool", "NHWC"], ["bool", "NC1HWC0"]], [["bool", "DefaultFormat"], ["bool", "NC1HWC0"]], [["bool", "NC1HWC0"], ["bool", "NHWC"]], [["bool", "NC1HWC0"], ["bool", "DefaultFormat"]], [["float16", "DefaultFormat"], ["float16", "NHWC"]], [["float16", "DefaultFormat"], ["float16", "HWCN"]], [["float16", "NHWC"], ["float16", "DefaultFormat"]], [["float16", "NHWC"], ["float16", "HWCN"]], [["float16", "HWCN"], ["float16", "DefaultFormat"]], [["float16", "HWCN"], ["float16", "NHWC"]], [["float32", "DefaultFormat"], ["float32", "NHWC"]], [["float32", "DefaultFormat"], ["float32", "HWCN"]], [["float32", "NHWC"], ["float32", "DefaultFormat"]], [["float32", "NHWC"], ["float32", "HWCN"]], [["float32", "HWCN"], ["float32", "DefaultFormat"]], [["float32", "HWCN"], ["float32", "NHWC"]], [["int8", "DefaultFormat"], ["int8", "FRACTAL_NZ"]], [["int8", "DefaultFormat"], ["int8", "FracZ"]], [["int8", "DefaultFormat"], ["int8", "NHWC"]], [["int8", "DefaultFormat"], ["int8", "HWCN"]], [["int8", "NHWC"], ["int8", "DefaultFormat"]], [["int8", "NHWC"], ["int8", "HWCN"]], [["int8", "HWCN"], ["int8", "DefaultFormat"]], [["int8", "HWCN"], ["int8", "NHWC"]], [["int16", "DefaultFormat"], ["int16", "NHWC"]], [["int16", "DefaultFormat"], ["int16", "HWCN"]], [["int16", "NHWC"], ["int16", "DefaultFormat"]], [["int16", "NHWC"], ["int16", "HWCN"]], [["int16", "HWCN"], ["int16", "DefaultFormat"]], [["int16", "HWCN"], ["int16", "NHWC"]], [["int32", "DefaultFormat"], ["int32", "NHWC"]], [["int32", "DefaultFormat"], ["int32", "HWCN"]], [["int32", "NHWC"], ["int32", "DefaultFormat"]], [["int32", "NHWC"], ["int32", "HWCN"]], [["int32", "HWCN"], ["int32", "DefaultFormat"]], [["int32", "HWCN"], ["int32", "NHWC"]], [["int64", "DefaultFormat"], ["int64", "NHWC"]], [["int64", "DefaultFormat"], ["int64", "HWCN"]], [["int64", "NHWC"], ["int64", "DefaultFormat"]], [["int64", "NHWC"], ["int64", "HWCN"]], [["int64", "HWCN"], ["int64", "DefaultFormat"]], [["int64", "HWCN"], ["int64", "NHWC"]], [["uint8", "DefaultFormat"], ["uint8", "NHWC"]], [["uint8", "DefaultFormat"], ["uint8", "HWCN"]], [["uint8", "NHWC"], ["uint8", "DefaultFormat"]], [["uint8", "NHWC"], ["uint8", "HWCN"]], [["uint8", "HWCN"], ["uint8", "DefaultFormat"]], [["uint8", "HWCN"], ["uint8", "NHWC"]], [["uint16", "DefaultFormat"], ["uint16", "NHWC"]], [["uint16", "DefaultFormat"], ["uint16", "HWCN"]], [["uint16", "NHWC"], ["uint16", "DefaultFormat"]], [["uint16", "NHWC"], ["uint16", "HWCN"]], [["uint16", "HWCN"], ["uint16", "DefaultFormat"]], [["uint16", "HWCN"], ["uint16", "NHWC"]], [["uint32", "DefaultFormat"], ["uint32", "NHWC"]], [["uint32", "DefaultFormat"], ["uint32", "HWCN"]], [["uint32", "NHWC"], ["uint32", "DefaultFormat"]], [["uint32", "NHWC"], ["uint32", "HWCN"]], [["uint32", "HWCN"], ["uint32", "DefaultFormat"]], [["uint32", "HWCN"], ["uint32", "NHWC"]], [["uint64", "DefaultFormat"], ["uint64", "NHWC"]], [["uint64", "DefaultFormat"], ["uint64", "HWCN"]], [["uint64", "NHWC"], ["uint64", "DefaultFormat"]], [["uint64", "NHWC"], ["uint64", "HWCN"]], [["uint64", "HWCN"], ["uint64", "DefaultFormat"]], [["uint64", "HWCN"], ["uint64", "NHWC"]], [["int32", "FRACTAL_NZ"], ["int32", "DefaultFormat"]], [["float16", "NDHWC"], ["float16", "NC1HWC0"]], [["float16", "NC1HWC0"], ["float16", "NDHWC"]], [["int8", "HWCN"], ["int8", "C1HWNCoC0"]], [["float16", "HWCN"], ["float16", "FracZ"]], [["float16", "FracZ"], ["float16", "HWCN"]], [["float16", "HWCN"], ["float16", "FRACTAL_NZ"]], [["float32", "HWCN"], ["float16", "FRACTAL_NZ"]], [["float16", "HWCN"], ["float16", "FRACTAL_ZN_LSTM"]], [["float32", "HWCN"], ["float32", "FRACTAL_ZN_LSTM"]], [["float16", "FRACTAL_ZN_LSTM"], ["float16", "HWCN"]], [["float32", "FRACTAL_ZN_LSTM"], ["float32", "HWCN"]], [["float16", "NDHWC"], ["float16", "NDC1HWC0"]], [["float16", "NDC1HWC0"], ["float16", "NDHWC"]], [["float16", "DHWCN"], ["float16", "FRACTAL_Z_3D"]], [["float16", "FRACTAL_Z_3D"], ["float16", "DHWCN"]], [["float16", "NCDHW"], ["float16", "NDC1HWC0"]], [["float16", "NDC1HWC0"], ["float16", "NCDHW"]], [["float16", "NCDHW"], ["float16", "FRACTAL_Z_3D"]], [["float32", "NCDHW"], ["float32", "FRACTAL_Z_3D"]], [["float16", "FRACTAL_Z_3D"], ["float16", "NCDHW"]], [["float32", "FRACTAL_Z_3D"], ["float32", "NCDHW"]], [["float16", "NDHWC"], ["float16", "FRACTAL_Z_3D"]], [["float32", "NDHWC"], ["float32", "FRACTAL_Z_3D"]], [["float16", "FRACTAL_Z_3D"], ["float16", "NDHWC"]], [["float32", "FRACTAL_Z_3D"], ["float32", "NDHWC"]], [["float32", "DHWCN"], ["float32", "FRACTAL_Z_3D"]], [["float32", "FRACTAL_Z_3D"], ["float32", "DHWCN"]], [["float32", "NDC1HWC0"], ["float32", "NDHWC"]], [["float32", "NDHWC"], ["float32", "NDC1HWC0"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "trans_data.so", "compute_cost": 10, "kernel_name": "trans_data", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "need_check_supported": false, "op_pattern": ""} {"op_name": "TopK", "inputs": [{"index": 0, "name": "x", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "assist_seq", "need_compile": false, "param_type": "required", "shape": "all"}], "outputs": [{"index": 0, "name": "values", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "indices", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "dim", "param_type": "optional", "type": "int", "value": "all"}, {"name": "k", "param_type": "required", "type": "int", "value": "all"}, {"name": "largest", "param_type": "optional", "type": "bool", "value": "all"}, {"name": "sorted", "param_type": "optional", "type": "bool", "value": "true"}], "fusion_type": "OPAQUE", "dtype_format": [[["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["float16", "DefaultFormat"], ["int32", "DefaultFormat"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "top_k_d.so", "compute_cost": 10, "kernel_name": "top_k_d", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "need_check_supported": true, "op_pattern": ""} {"op_name": "MatMul", "inputs": [{"index": 0, "name": "x1", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 1, "name": "x2", "need_compile": false, "param_type": "required", "shape": "all"}, {"index": 2, "name": "bias", "need_compile": false, "param_type": "optional", "shape": "all"}, {"index": 3, "name": "offset_w", "need_compile": false, "param_type": "optional", "shape": "all"}], "outputs": [{"index": 0, "name": "y", "need_compile": false, "param_type": "required", "shape": "all"}], "attr": [{"name": "transpose_x1", "param_type": "required", "type": "bool", "value": "all"}, {"name": "transpose_x2", "param_type": "required", "type": "bool", "value": "all"}, {"name": "offset_x", "param_type": "optional", "type": "int", "value": "all"}], "fusion_type": "OPAQUE", "dtype_format": [[["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int32", "DefaultFormat"], ["int8", "DefaultFormat"], ["int32", "DefaultFormat"]], [["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"], ["float16", "DefaultFormat"], ["int8", "DefaultFormat"], ["float16", "FRACTAL_NZ"]], [["float16", "FRACTAL_NZ"], ["float16", "FRACTAL_NZ"], ["float32", "DefaultFormat"], ["int8", "DefaultFormat"], ["float32", "FRACTAL_NZ"]], [["float32", "NHWC"], ["float32", "NHWC"], ["float32", "NHWC"], ["int8", "DefaultFormat"], ["float32", "NHWC"]], [["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["float32", "DefaultFormat"], ["int8", "DefaultFormat"], ["float32", "DefaultFormat"]], [["int32", "NHWC"], ["int32", "NHWC"], ["int32", "NHWC"], ["int8", "DefaultFormat"], ["int32", "NHWC"]]], "imply_type": "TBE", "async_flag": false, "binfile_name": "mat_mul.so", "compute_cost": 10, "kernel_name": "mat_mul", "partial_flag": true, "reshape_type": "", "dynamic_format": false, "dynamic_shape": false, "need_check_supported": true, "op_pattern": ""} diff --git a/mindspore/_extends/graph_kernel/expanders/bias_add.py b/mindspore/_extends/graph_kernel/expanders/bias_add.py index a775f8f25b..8f51168eee 100644 --- a/mindspore/_extends/graph_kernel/expanders/bias_add.py +++ b/mindspore/_extends/graph_kernel/expanders/bias_add.py @@ -36,24 +36,24 @@ def expand_biasadd(expand_info): 'ExpandDims', [input_y], attrs={'axis': 1}) input_y_expand = graph_builder.emit( 'ExpandDims', [input_y_expand], attrs={'axis': 2}) - result = graph_builder.emit('TensorAdd', [input_x, input_y_expand]) + result = graph_builder.emit('Add', [input_x, input_y_expand]) elif input_x.data_format == "DefaultFormat": if len(input_x.shape) == 2: - result = graph_builder.emit('TensorAdd', [input_x, input_y]) + result = graph_builder.emit('Add', [input_x, input_y]) elif len(input_x.shape) == 3: input_y_expand = graph_builder.emit( 'ExpandDims', [input_y], attrs={'axis': 1}) result = graph_builder.emit( - 'TensorAdd', [input_x, input_y_expand]) + 'Add', [input_x, input_y_expand]) else: input_y_expand = graph_builder.emit( 'ExpandDims', [input_y], attrs={'axis': 1}) input_y_expand = graph_builder.emit( 'ExpandDims', [input_y_expand], attrs={'axis': 2}) result = graph_builder.emit( - 'TensorAdd', [input_x, input_y_expand]) + 'Add', [input_x, input_y_expand]) else: - result = graph_builder.emit('TensorAdd', [input_x, input_y]) + result = graph_builder.emit('Add', [input_x, input_y]) # set graph output. graph_scope.set_output(result) diff --git a/mindspore/_extends/graph_kernel/expanders/fused_adam.py b/mindspore/_extends/graph_kernel/expanders/fused_adam.py index e8db66c71f..e2ef30abaf 100644 --- a/mindspore/_extends/graph_kernel/expanders/fused_adam.py +++ b/mindspore/_extends/graph_kernel/expanders/fused_adam.py @@ -49,13 +49,13 @@ def expand_fusedadam(expand_info): # compute result beta_1_mul_m = graph_builder.emit('Mul', [beta_1, m]) one_sub_beta_1_mul_grad = graph_builder.emit('Mul', [one_sub_beta_1, gradient]) - next_m = graph_builder.emit('TensorAdd', [beta_1_mul_m, one_sub_beta_1_mul_grad]) + next_m = graph_builder.emit('Add', [beta_1_mul_m, one_sub_beta_1_mul_grad]) beta_2_mul_v = graph_builder.emit('Mul', [beta_2, v]) grad_square = graph_builder.emit('Mul', [gradient, gradient]) one_sub_beta_2_mul_grad_square = graph_builder.emit('Mul', [one_sub_beta_2, grad_square]) - next_v = graph_builder.emit('TensorAdd', [beta_2_mul_v, one_sub_beta_2_mul_grad_square]) + next_v = graph_builder.emit('Add', [beta_2_mul_v, one_sub_beta_2_mul_grad_square]) sqrt_next_v = graph_builder.emit('Sqrt', [next_v]) - sqrt_next_v_add_eps = graph_builder.emit('TensorAdd', [sqrt_next_v, eps]) + sqrt_next_v_add_eps = graph_builder.emit('Add', [sqrt_next_v, eps]) update = graph_builder.emit('RealDiv', [next_m, sqrt_next_v_add_eps]) update_with_lr = graph_builder.emit('Mul', [lr, update]) next_para = graph_builder.emit('Sub', [param, update_with_lr]) diff --git a/mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py b/mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py index 772cadd0d5..3a8c811531 100644 --- a/mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +++ b/mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py @@ -52,16 +52,16 @@ def expand_fusedadamweightdecay(expand_info): # compute result beta_1_mul_m = graph_builder.emit('Mul', [beta_1, m]) one_sub_beta_1_mul_grad = graph_builder.emit('Mul', [one_sub_beta_1, gradient]) - next_m = graph_builder.emit('TensorAdd', [beta_1_mul_m, one_sub_beta_1_mul_grad]) + next_m = graph_builder.emit('Add', [beta_1_mul_m, one_sub_beta_1_mul_grad]) beta_2_mul_v = graph_builder.emit('Mul', [beta_2, v]) grad_square = graph_builder.emit('Mul', [gradient, gradient]) one_sub_beta_2_mul_grad_square = graph_builder.emit('Mul', [one_sub_beta_2, grad_square]) - next_v = graph_builder.emit('TensorAdd', [beta_2_mul_v, one_sub_beta_2_mul_grad_square]) + next_v = graph_builder.emit('Add', [beta_2_mul_v, one_sub_beta_2_mul_grad_square]) sqrt_next_v = graph_builder.emit('Sqrt', [next_v]) - sqrt_next_v_add_eps = graph_builder.emit('TensorAdd', [sqrt_next_v, eps]) + sqrt_next_v_add_eps = graph_builder.emit('Add', [sqrt_next_v, eps]) update = graph_builder.emit('RealDiv', [next_m, sqrt_next_v_add_eps]) param_with_weight_decay = graph_builder.emit('Mul', [weight_decay, param]) - update = graph_builder.emit('TensorAdd', [update, param_with_weight_decay]) + update = graph_builder.emit('Add', [update, param_with_weight_decay]) update_with_lr = graph_builder.emit('Mul', [lr, update]) next_para = graph_builder.emit('Sub', [param, update_with_lr]) diff --git a/mindspore/_extends/graph_kernel/expanders/gelu.py b/mindspore/_extends/graph_kernel/expanders/gelu.py index 86be18aed9..dc81b3c6cd 100644 --- a/mindspore/_extends/graph_kernel/expanders/gelu.py +++ b/mindspore/_extends/graph_kernel/expanders/gelu.py @@ -42,7 +42,7 @@ def expand_gelu(expand_info): pow_0 = graph_builder.emit('Mul', [mul_0, input_x]) const_csvalue = graph_builder.value(pow_0.dtype, CSVALUE, input_desc['format']) mul_1 = graph_builder.emit('Mul', [pow_0, const_csvalue]) - tanh_res = graph_builder.emit('TensorAdd', [input_x, mul_1]) + tanh_res = graph_builder.emit('Add', [input_x, mul_1]) const_csvalue_sqrt_two_div_pi = graph_builder.value( tanh_res.dtype, CSVALUE_SQRT_TWO_DIV_PI, input_desc['format']) y = graph_builder.emit('Mul', [tanh_res, const_csvalue_sqrt_two_div_pi]) @@ -51,7 +51,7 @@ def expand_gelu(expand_info): tanh_y = graph_builder.emit('Tanh', [y]) const_one = graph_builder.value(tanh_y.dtype, ONE, input_desc['format']) const_half = graph_builder.value(tanh_y.dtype, HALF, input_desc['format']) - tanh_y_add_one = graph_builder.emit('TensorAdd', [tanh_y, const_one]) + tanh_y_add_one = graph_builder.emit('Add', [tanh_y, const_one]) mul_x = graph_builder.emit('Mul', [input_x, tanh_y_add_one]) result = graph_builder.emit('Mul', [const_half, mul_x]) diff --git a/mindspore/_extends/graph_kernel/expanders/gelu_grad.py b/mindspore/_extends/graph_kernel/expanders/gelu_grad.py index 5e0647634f..41c1642496 100644 --- a/mindspore/_extends/graph_kernel/expanders/gelu_grad.py +++ b/mindspore/_extends/graph_kernel/expanders/gelu_grad.py @@ -55,18 +55,18 @@ def expand_gelugrad(expand_info): # cal mul_right mul_double = graph_builder.emit('Mul', [input_x, input_x]) mul_double_mul_tri = graph_builder.emit('Mul', [const_csvalue_tri, mul_double]) - mul_add_one = graph_builder.emit('TensorAdd', [const_one, mul_double_mul_tri]) + mul_add_one = graph_builder.emit('Add', [const_one, mul_double_mul_tri]) mul_right = graph_builder.emit('Mul', [const_csvalue_sqrt_two_div_pi, mul_add_one]) # cal tanh_para mul_triple = graph_builder.emit('Mul', [input_x, mul_double]) mul_triple_mul_csvalue = graph_builder.emit('Mul', [const_csvalue, mul_triple]) - mul_add_x = graph_builder.emit('TensorAdd', [input_x, mul_triple_mul_csvalue]) + mul_add_x = graph_builder.emit('Add', [input_x, mul_triple_mul_csvalue]) tanh_para = graph_builder.emit('Mul', [const_csvalue_sqrt_two_div_pi, mul_add_x]) # cal 0.5 * (1.0 + tanh(tahn_para)) tanh_res = graph_builder.emit('Tanh', [tanh_para]) - tanh_res_add_one = graph_builder.emit('TensorAdd', [const_one, tanh_res]) + tanh_res_add_one = graph_builder.emit('Add', [const_one, tanh_res]) half_mul_tanh_res_add_one = graph_builder.emit('Mul', [const_half, tanh_res_add_one]) # cal 0.5 * x * (1.0 - tanh(tanh_para) * tanh(tanh_para)) * mul_right @@ -77,7 +77,7 @@ def expand_gelugrad(expand_info): mul_final = graph_builder.emit('Mul', [mul_tmp, mul_right]) # cal result - result_tmp = graph_builder.emit('TensorAdd', [half_mul_tanh_res_add_one, mul_final]) + result_tmp = graph_builder.emit('Add', [half_mul_tanh_res_add_one, mul_final]) result = graph_builder.emit('Mul', [input_dy, result_tmp]) # set graph output. diff --git a/mindspore/_extends/graph_kernel/expanders/layernorm.py b/mindspore/_extends/graph_kernel/expanders/layernorm.py index 8f49486c01..9089a9f06b 100644 --- a/mindspore/_extends/graph_kernel/expanders/layernorm.py +++ b/mindspore/_extends/graph_kernel/expanders/layernorm.py @@ -68,13 +68,13 @@ def expand_layernorm(expand_info): # Calculate normalize normalize_sub = graph_builder.emit('Sub', [input_x, mean]) epsilon_v = graph_builder.value(input_x.dtype, epsilon, input_x.data_format) - normalize_add = graph_builder.emit('TensorAdd', [variance, epsilon_v]) + normalize_add = graph_builder.emit('Add', [variance, epsilon_v]) normlize_rsqrt = graph_builder.emit('Rsqrt', [normalize_add]) normalize_mul = graph_builder.emit('Mul', [normalize_sub, normlize_rsqrt]) # Calculate scale and translate scale_mul = graph_builder.emit('Mul', [input_gamma, normalize_mul]) - res = graph_builder.emit('TensorAdd', [scale_mul, input_beta]) + res = graph_builder.emit('Add', [scale_mul, input_beta]) # set graph output. graph_scope.set_output(res, mean, variance) diff --git a/mindspore/_extends/graph_kernel/expanders/layernorm_grad.py b/mindspore/_extends/graph_kernel/expanders/layernorm_grad.py index 9d73fa92d2..35eca7e7d8 100644 --- a/mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +++ b/mindspore/_extends/graph_kernel/expanders/layernorm_grad.py @@ -66,7 +66,7 @@ def expand_layernormgrad(expand_info): mean_cof = graph_builder.value(x.dtype, (1.0 / reduce_size), x.data_format) # cal dg db - var_eps = graph_builder.emit('TensorAdd', [variance, eps]) + var_eps = graph_builder.emit('Add', [variance, eps]) sqrt_var_eps = graph_builder.emit('Sqrt', [var_eps]) rsqrt_var_eps = graph_builder.emit('RealDiv', [const_one, sqrt_var_eps]) x_sub_mean = graph_builder.emit('Sub', [x, mean]) @@ -100,10 +100,10 @@ def expand_layernormgrad(expand_info): neg_rsqrt_var_eps_mul_sum_2 = graph_builder.emit('Mul', [neg_rsqrt_var_eps, sum_2]) sum_1_mul_sum_3 = graph_builder.emit('Mul', [sum_1, sum_3]) mean_cof_mul_sum_1_mul_sum_3 = graph_builder.emit('Mul', [mean_cof, sum_1_mul_sum_3]) - add_tmp = graph_builder.emit('TensorAdd', [neg_rsqrt_var_eps_mul_sum_2, mean_cof_mul_sum_1_mul_sum_3]) + add_tmp = graph_builder.emit('Add', [neg_rsqrt_var_eps_mul_sum_2, mean_cof_mul_sum_1_mul_sum_3]) dx_3 = graph_builder.emit('Mul', [add_tmp, mean_cof]) - dx_tmp = graph_builder.emit('TensorAdd', [dx_1, dx_2]) - dx = graph_builder.emit('TensorAdd', [dx_tmp, dx_3]) + dx_tmp = graph_builder.emit('Add', [dx_1, dx_2]) + dx = graph_builder.emit('Add', [dx_tmp, dx_3]) # set graph output. graph_scope.set_output(dx, dg, db) diff --git a/mindspore/_extends/graph_kernel/model/model.py b/mindspore/_extends/graph_kernel/model/model.py index 81822a2b12..f55391e9d7 100644 --- a/mindspore/_extends/graph_kernel/model/model.py +++ b/mindspore/_extends/graph_kernel/model/model.py @@ -131,7 +131,7 @@ class PrimLib: ] primtives = { - 'TensorAdd': Prim(ELEMWISE), + 'Add': Prim(ELEMWISE), 'Abs': Prim(ELEMWISE), 'Neg': Prim(ELEMWISE), 'Mul': Prim(ELEMWISE), diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/arithmetic_cpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/cpu/arithmetic_cpu_kernel.cc index 6b8397e032..a531095b33 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/arithmetic_cpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/arithmetic_cpu_kernel.cc @@ -238,7 +238,7 @@ void ArithmeticCPUKernel::LessEqual(const T *input1, const T *input2, bool *out, void ArithmeticCPUKernel::InitKernel(const CNodePtr &kernel_node) { MS_EXCEPTION_IF_NULL(kernel_node); std::string kernel_name = AnfAlgo::GetCNodeName(kernel_node); - if (kernel_name == prim::kPrimTensorAdd->name()) { + if (kernel_name == prim::kPrimAdd->name()) { operate_type_ = ADD; } else if (kernel_name == prim::kPrimSub->name()) { operate_type_ = SUB; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/tensoradd_cpu_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/tensoradd_cpu_kernel.h index 0ede516da9..8c3730a2c7 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/tensoradd_cpu_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/tensoradd_cpu_kernel.h @@ -37,8 +37,7 @@ class TensorAddCPUKernel : public MKLCPUKernel { }; MS_REG_CPU_KERNEL( - TensorAdd, - KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), + Add, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), TensorAddCPUKernel); } // namespace kernel } // namespace mindspore diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.cc b/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.cc index 271da4f6a4..1614b008f0 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.cc @@ -51,8 +51,7 @@ MS_REG_GPU_KERNEL_ONE( Sub, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), BroadcastOpGpuKernel, float) MS_REG_GPU_KERNEL_ONE( - TensorAdd, - KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), + Add, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), BroadcastOpGpuKernel, float) MS_REG_GPU_KERNEL_ONE( FloorDiv, @@ -103,8 +102,7 @@ MS_REG_GPU_KERNEL_ONE( Sub, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16), BroadcastOpGpuKernel, half) MS_REG_GPU_KERNEL_ONE( - TensorAdd, - KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16), + Add, KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16), BroadcastOpGpuKernel, half) MS_REG_GPU_KERNEL_ONE( FloorDiv, @@ -133,7 +131,7 @@ MS_REG_GPU_KERNEL_ONE( Equal, KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeBool), BroadcastOpGpuKernel, int) MS_REG_GPU_KERNEL_ONE( - TensorAdd, KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32), + Add, KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32), BroadcastOpGpuKernel, int) MS_REG_GPU_KERNEL_ONE( Minimum, KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32), @@ -171,7 +169,7 @@ MS_REG_GPU_KERNEL_ONE( Equal, KernelAttr().AddInputAttr(kNumberTypeInt64).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeBool), BroadcastOpGpuKernel, int64_t) MS_REG_GPU_KERNEL_ONE( - TensorAdd, KernelAttr().AddInputAttr(kNumberTypeInt64).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt64), + Add, KernelAttr().AddInputAttr(kNumberTypeInt64).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt64), BroadcastOpGpuKernel, int64_t) MS_REG_GPU_KERNEL_ONE( Minimum, KernelAttr().AddInputAttr(kNumberTypeInt64).AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeInt64), diff --git a/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h b/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h index e9617220e0..538c7299f0 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h +++ b/mindspore/ccsrc/backend/kernel_compiler/gpu/math/broadcast_gpu_kernel.h @@ -145,7 +145,7 @@ class BroadcastOpGpuKernel : public GpuKernel { static std::map kBroadcastArithmetricTypeMap = { {"Maximum", BROADCAST_TYPE_MAXIMUM}, {"Minimum", BROADCAST_TYPE_MINIMUM}, {"Pow", BROADCAST_TYPE_POWER}, {"RealDiv", BROADCAST_TYPE_REALDIV}, {"Mul", BROADCAST_TYPE_MUL}, {"Sub", BROADCAST_TYPE_SUB}, - {"TensorAdd", BROADCAST_TYPE_ADD}, {"FloorDiv", BROADCAST_TYPE_FLOORDIV}, {"AbsGrad", BROADCAST_TYPE_ABSGRAD}, + {"Add", BROADCAST_TYPE_ADD}, {"FloorDiv", BROADCAST_TYPE_FLOORDIV}, {"AbsGrad", BROADCAST_TYPE_ABSGRAD}, {"Div", BROADCAST_TYPE_DIV}, {"DivNoNan", BROADCAST_TYPE_DIVNONAN}, }; diff --git a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_build.cc b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_build.cc index 48f3505be7..2e0a47f3ab 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_build.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_build.cc @@ -1063,7 +1063,7 @@ size_t TbeKernelBuild::GetOptionalInput(const mindspore::CNodePtr &cnode, bool i std::string TbeKernelBuild::GetRealOpType(const std::string &origin_type) { static std::map buffer_fussion_op_map = { - {parallel::DEPTHWISE_CONV2D_NATIVE, parallel::DEPTHWISE_CONV2D}, {parallel::TENSOR_ADD, parallel::ADD}}; + {parallel::DEPTHWISE_CONV2D_NATIVE, parallel::DEPTHWISE_CONV2D}}; string result = origin_type; auto iter = buffer_fussion_op_map.find(origin_type); if (iter != buffer_fussion_op_map.end()) { diff --git a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.cc b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.cc index 77c5bc1195..da885a4591 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/buffer_fusion/bnupdate_eltwise_eltwise_fusion_pass.cc @@ -99,7 +99,7 @@ void BnupdateEltwiseEltwiseFusionPass::MatchSingleFusionPattern(const session::K AnfAlgo::GetFusionType(cnode) == kernel::FusionType::ELEMWISE && CheckEltwiseInputAndOutputSize(cnode)) { auto eltwise_input = cnode->input(1); MS_EXCEPTION_IF_NULL(eltwise_input); - if (eltwise_input->isa() && AnfAlgo::CheckPrimitiveType(eltwise_input, prim::kPrimTensorAdd)) { + if (eltwise_input->isa() && AnfAlgo::CheckPrimitiveType(eltwise_input, prim::kPrimAdd)) { MatchBnupdateAddRelu(cnode, eltwise_input, kernel_graph, candidate_fusion); } } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_fusion.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_fusion.cc index 8ab01e132d..81ed138356 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_fusion.cc @@ -28,7 +28,7 @@ const BaseRef AdamApplyOneFusion::DefinePattern() const { VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); - VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, sqrt0, add2_y_})}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimAdd, sqrt0, add2_y_})}); return VectorRef({prim::kPrimSub, input_vars_[3], VectorRef({prim::kPrimMul, input_vars_[4], true_div0})}); } @@ -41,7 +41,7 @@ const BaseRef AdamApplyOneCond1Fusion::DefinePattern() const { VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); - VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, add2_y_, sqrt0})}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimAdd, add2_y_, sqrt0})}); return VectorRef({prim::kPrimSub, input_vars_[3], VectorRef({prim::kPrimMul, input_vars_[4], true_div0})}); } @@ -54,7 +54,7 @@ const BaseRef AdamApplyOneCond2Fusion::DefinePattern() const { VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); - VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, sqrt0, add2_y_})}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimAdd, sqrt0, add2_y_})}); return VectorRef({prim::kPrimSub, input_vars_[3], VectorRef({prim::kPrimMul, true_div0, input_vars_[4]})}); } @@ -67,7 +67,7 @@ const BaseRef AdamApplyOneCond3Fusion::DefinePattern() const { VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); - VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, sqrt0, add2_y_})}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimAdd, sqrt0, add2_y_})}); return VectorRef({prim::kPrimSub, input_vars_[3], VectorRef({prim::kPrimMul, true_div0, input_vars_[4]})}); } @@ -80,7 +80,7 @@ const BaseRef AdamApplyOneCond4Fusion::DefinePattern() const { VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); - VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, add2_y_, sqrt0})}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimAdd, add2_y_, sqrt0})}); return VectorRef({prim::kPrimSub, input_vars_[3], VectorRef({prim::kPrimMul, true_div0, input_vars_[4]})}); } @@ -94,7 +94,7 @@ const BaseRef AdamApplyOneAssignFusion::DefinePattern() const { VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); - VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, sqrt0, add2_y_})}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimAdd, sqrt0, add2_y_})}); VectorRef sub0 = VectorRef({sub0_var_, input_vars_[3], VectorRef({prim::kPrimMul, input_vars_[4], true_div0})}); VectorRef assign0 = VectorRef({prim::kPrimAssign, input_vars_[3], sub0}); VectorRef depend0 = VectorRef({prim::kPrimDepend, sub0, assign0}); @@ -114,7 +114,7 @@ const BaseRef AdamApplyOneAssignCond1Fusion::DefinePattern() const { VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); - VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, add2_y_, sqrt0})}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimAdd, add2_y_, sqrt0})}); VectorRef sub0 = VectorRef({sub0_var_, input_vars_[3], VectorRef({prim::kPrimMul, input_vars_[4], true_div0})}); VectorRef assign0 = VectorRef({prim::kPrimAssign, input_vars_[3], sub0}); VectorRef depend0 = VectorRef({prim::kPrimDepend, sub0, assign0}); @@ -134,7 +134,7 @@ const BaseRef AdamApplyOneAssignCond2Fusion::DefinePattern() const { VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); - VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, sqrt0, add2_y_})}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimAdd, sqrt0, add2_y_})}); VectorRef sub0 = VectorRef({sub0_var_, input_vars_[3], VectorRef({prim::kPrimMul, true_div0, input_vars_[4]})}); VectorRef assign0 = VectorRef({prim::kPrimAssign, input_vars_[3], sub0}); VectorRef depend0 = VectorRef({prim::kPrimDepend, sub0, assign0}); @@ -154,7 +154,7 @@ const BaseRef AdamApplyOneAssignCond3Fusion::DefinePattern() const { VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); - VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, sqrt0, add2_y_})}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimAdd, sqrt0, add2_y_})}); VectorRef sub0 = VectorRef({sub0_var_, input_vars_[3], VectorRef({prim::kPrimMul, true_div0, input_vars_[4]})}); VectorRef assign0 = VectorRef({prim::kPrimAssign, input_vars_[3], sub0}); VectorRef depend0 = VectorRef({prim::kPrimDepend, sub0, assign0}); @@ -174,7 +174,7 @@ const BaseRef AdamApplyOneAssignCond4Fusion::DefinePattern() const { VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); - VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, add2_y_, sqrt0})}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimAdd, add2_y_, sqrt0})}); VectorRef sub0 = VectorRef({sub0_var_, input_vars_[3], VectorRef({prim::kPrimMul, true_div0, input_vars_[4]})}); VectorRef assign0 = VectorRef({prim::kPrimAssign, input_vars_[3], sub0}); VectorRef depend0 = VectorRef({prim::kPrimDepend, sub0, assign0}); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_fusion.h b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_fusion.h index 4c5649f978..3314d60402 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_fusion.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_fusion.h @@ -38,8 +38,8 @@ class AdamApplyOneFusion : public PatternProcessPass { mul_x_input_vars_.push_back(std::make_shared()); } add2_y_ = std::make_shared(); - add0_var_ = std::make_shared(std::make_shared(prim::kPrimTensorAdd->name())); - add1_var_ = std::make_shared(std::make_shared(prim::kPrimTensorAdd->name())); + add0_var_ = std::make_shared(std::make_shared(prim::kPrimAdd->name())); + add1_var_ = std::make_shared(std::make_shared(prim::kPrimAdd->name())); sub0_var_ = std::make_shared(std::make_shared(prim::kPrimSub->name())); } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_with_decay_rule.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_with_decay_rule.cc index 6c60342461..b2e4f40e03 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_with_decay_rule.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_with_decay_rule.cc @@ -59,10 +59,10 @@ const BaseRef AdamApplyOneWithDecayRuleCond1::DefinePattern() const { VectorRef mul3({prim::kPrimMul, mul3_x_, square0}); VectorRef add1({add1_var_, mul2, mul3}); VectorRef sqrt0({sqrt, add1}); - VectorRef add2({prim::kPrimTensorAdd, add2_y_, sqrt0}); + VectorRef add2({prim::kPrimAdd, add2_y_, sqrt0}); VectorRef mul4({prim::kPrimMul, mul4_x_, input3_}); VectorRef real_div0({real_div, add0, add2}); - VectorRef add3({prim::kPrimTensorAdd, mul4, real_div0}); + VectorRef add3({prim::kPrimAdd, mul4, real_div0}); VectorRef mul5({prim::kPrimMul, input4_, add3}); VectorRef sub0({prim::kPrimSub, input3_, mul5}); return sub0; @@ -79,10 +79,10 @@ const BaseRef AdamApplyOneWithDecayRuleCond2::DefinePattern() const { VectorRef mul3({prim::kPrimMul, mul3_x_, square0}); VectorRef add1({add1_var_, mul2, mul3}); VectorRef sqrt0({sqrt, add1}); - VectorRef add2({prim::kPrimTensorAdd, sqrt0, add2_y_}); + VectorRef add2({prim::kPrimAdd, sqrt0, add2_y_}); VectorRef mul4({prim::kPrimMul, input3_, mul4_x_}); VectorRef real_div0({real_div, add0, add2}); - VectorRef add3({prim::kPrimTensorAdd, mul4, real_div0}); + VectorRef add3({prim::kPrimAdd, mul4, real_div0}); VectorRef mul5({prim::kPrimMul, add3, input4_}); VectorRef sub0({prim::kPrimSub, input3_, mul5}); return sub0; @@ -99,10 +99,10 @@ const BaseRef AdamApplyOneWithDecayRuleCond3::DefinePattern() const { VectorRef mul3({prim::kPrimMul, square0, mul3_x_}); VectorRef add1({add1_var_, mul2, mul3}); VectorRef sqrt0({sqrt, add1}); - VectorRef add2({prim::kPrimTensorAdd, sqrt0, add2_y_}); + VectorRef add2({prim::kPrimAdd, sqrt0, add2_y_}); VectorRef mul4({prim::kPrimMul, mul4_x_, input3_}); VectorRef real_div0({real_div, add0, add2}); - VectorRef add3({prim::kPrimTensorAdd, mul4, real_div0}); + VectorRef add3({prim::kPrimAdd, mul4, real_div0}); VectorRef mul5({prim::kPrimMul, add3, input4_}); VectorRef sub0({prim::kPrimSub, input3_, mul5}); return sub0; @@ -119,10 +119,10 @@ const BaseRef AdamApplyOneWithDecayRuleCond4::DefinePattern() const { VectorRef mul3({prim::kPrimMul, mul3_x_, square0}); VectorRef add1({add1_var_, mul2, mul3}); VectorRef sqrt0({sqrt, add1}); - VectorRef add2({prim::kPrimTensorAdd, add2_y_, sqrt0}); + VectorRef add2({prim::kPrimAdd, add2_y_, sqrt0}); VectorRef mul4({prim::kPrimMul, mul4_x_, input3_}); VectorRef real_div0({real_div, add0, add2}); - VectorRef add3({prim::kPrimTensorAdd, mul4, real_div0}); + VectorRef add3({prim::kPrimAdd, mul4, real_div0}); VectorRef mul5({prim::kPrimMul, add3, input4_}); VectorRef sub0({prim::kPrimSub, input3_, mul5}); return sub0; @@ -139,10 +139,10 @@ const BaseRef AdamApplyOneWithDecayRuleCond5::DefinePattern() const { VectorRef mul3({prim::kPrimMul, mul3_x_, square0}); VectorRef add1({add1_var_, mul2, mul3}); VectorRef sqrt0({sqrt, add1}); - VectorRef add2({prim::kPrimTensorAdd, sqrt0, add2_y_}); + VectorRef add2({prim::kPrimAdd, sqrt0, add2_y_}); VectorRef mul4({prim::kPrimMul, mul4_x_, input3_}); VectorRef real_div0({real_div, add0, add2}); - VectorRef add3({prim::kPrimTensorAdd, mul4, real_div0}); + VectorRef add3({prim::kPrimAdd, mul4, real_div0}); VectorRef mul5({prim::kPrimMul, add3, input4_}); VectorRef sub0({prim::kPrimSub, input3_, mul5}); return sub0; @@ -159,10 +159,10 @@ const BaseRef AdamApplyOneWithDecayAssignRuleCond1::DefinePattern() const { VectorRef mul3({prim::kPrimMul, mul3_x_, square0}); VectorRef add1({add1_var_, mul2, mul3}); VectorRef sqrt0({sqrt, add1}); - VectorRef add2({prim::kPrimTensorAdd, add2_y_, sqrt0}); + VectorRef add2({prim::kPrimAdd, add2_y_, sqrt0}); VectorRef mul4({prim::kPrimMul, mul4_x_, input3_}); VectorRef real_div0({real_div, add0, add2}); - VectorRef add3({prim::kPrimTensorAdd, mul4, real_div0}); + VectorRef add3({prim::kPrimAdd, mul4, real_div0}); VectorRef mul5({prim::kPrimMul, input4_, add3}); VectorRef sub0({sub0_var_, input3_, mul5}); VectorRef assign0 = VectorRef({prim::kPrimAssign, input3_, sub0}); @@ -184,10 +184,10 @@ const BaseRef AdamApplyOneWithDecayAssignRuleCond2::DefinePattern() const { VectorRef mul3({prim::kPrimMul, mul3_x_, square0}); VectorRef add1({add1_var_, mul2, mul3}); VectorRef sqrt0({sqrt, add1}); - VectorRef add2({prim::kPrimTensorAdd, sqrt0, add2_y_}); + VectorRef add2({prim::kPrimAdd, sqrt0, add2_y_}); VectorRef mul4({prim::kPrimMul, input3_, mul4_x_}); VectorRef real_div0({real_div, add0, add2}); - VectorRef add3({prim::kPrimTensorAdd, mul4, real_div0}); + VectorRef add3({prim::kPrimAdd, mul4, real_div0}); VectorRef mul5({prim::kPrimMul, add3, input4_}); VectorRef sub0({sub0_var_, input3_, mul5}); VectorRef assign0 = VectorRef({prim::kPrimAssign, input3_, sub0}); @@ -209,10 +209,10 @@ const BaseRef AdamApplyOneWithDecayAssignRuleCond3::DefinePattern() const { VectorRef mul3({prim::kPrimMul, square0, mul3_x_}); VectorRef add1({add1_var_, mul2, mul3}); VectorRef sqrt0({sqrt, add1}); - VectorRef add2({prim::kPrimTensorAdd, sqrt0, add2_y_}); + VectorRef add2({prim::kPrimAdd, sqrt0, add2_y_}); VectorRef mul4({prim::kPrimMul, mul4_x_, input3_}); VectorRef real_div0({real_div, add0, add2}); - VectorRef add3({prim::kPrimTensorAdd, mul4, real_div0}); + VectorRef add3({prim::kPrimAdd, mul4, real_div0}); VectorRef mul5({prim::kPrimMul, add3, input4_}); VectorRef sub0({sub0_var_, input3_, mul5}); VectorRef assign0 = VectorRef({prim::kPrimAssign, input3_, sub0}); @@ -234,10 +234,10 @@ const BaseRef AdamApplyOneWithDecayAssignRuleCond4::DefinePattern() const { VectorRef mul3({prim::kPrimMul, mul3_x_, square0}); VectorRef add1({add1_var_, mul2, mul3}); VectorRef sqrt0({sqrt, add1}); - VectorRef add2({prim::kPrimTensorAdd, add2_y_, sqrt0}); + VectorRef add2({prim::kPrimAdd, add2_y_, sqrt0}); VectorRef mul4({prim::kPrimMul, mul4_x_, input3_}); VectorRef real_div0({real_div, add0, add2}); - VectorRef add3({prim::kPrimTensorAdd, mul4, real_div0}); + VectorRef add3({prim::kPrimAdd, mul4, real_div0}); VectorRef mul5({prim::kPrimMul, add3, input4_}); VectorRef sub0({sub0_var_, input3_, mul5}); VectorRef assign0 = VectorRef({prim::kPrimAssign, input3_, sub0}); @@ -259,10 +259,10 @@ const BaseRef AdamApplyOneWithDecayAssignRuleCond5::DefinePattern() const { VectorRef mul3({prim::kPrimMul, mul3_x_, square0}); VectorRef add1({add1_var_, mul2, mul3}); VectorRef sqrt0({sqrt, add1}); - VectorRef add2({prim::kPrimTensorAdd, sqrt0, add2_y_}); + VectorRef add2({prim::kPrimAdd, sqrt0, add2_y_}); VectorRef mul4({prim::kPrimMul, mul4_x_, input3_}); VectorRef real_div0({real_div, add0, add2}); - VectorRef add3({prim::kPrimTensorAdd, mul4, real_div0}); + VectorRef add3({prim::kPrimAdd, mul4, real_div0}); VectorRef mul5({prim::kPrimMul, add3, input4_}); VectorRef sub0({sub0_var_, input3_, mul5}); VectorRef assign0 = VectorRef({prim::kPrimAssign, input3_, sub0}); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_with_decay_rule.h b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_with_decay_rule.h index e43c5ad496..650c8b375e 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_with_decay_rule.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/adam_apply_one_with_decay_rule.h @@ -38,8 +38,8 @@ class AdamApplyOneWithDecayRule : public PatternProcessPass { mul3_x_ = std::make_shared(); mul4_x_ = std::make_shared(); add2_y_ = std::make_shared(); - add0_var_ = std::make_shared(std::make_shared(prim::kPrimTensorAdd->name())); - add1_var_ = std::make_shared(std::make_shared(prim::kPrimTensorAdd->name())); + add0_var_ = std::make_shared(std::make_shared(prim::kPrimAdd->name())); + add1_var_ = std::make_shared(std::make_shared(prim::kPrimAdd->name())); sub0_var_ = std::make_shared(std::make_shared(prim::kPrimSub->name())); } ~AdamApplyOneWithDecayRule() override = default; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.cc index 3433d5305c..c487b6a3cb 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.cc @@ -130,11 +130,11 @@ const BaseRef LambNextMVRuleCond1::DefinePattern() const { auto real_div0 = VectorRef({real_div0_var_, add0, input5_}); auto real_div1 = VectorRef({real_div1_var_, add1, input2_}); - auto add2 = VectorRef({prim::kPrimTensorAdd, add2_y_, real_div1}); + auto add2 = VectorRef({prim::kPrimAdd, add2_y_, real_div1}); auto sqrt0 = VectorRef({prim_rsqrt, add2}); auto real_div2 = VectorRef({real_div2_var_, sqrt0, real_div0}); - return VectorRef({prim::kPrimTensorAdd, mul4, real_div2}); + return VectorRef({prim::kPrimAdd, mul4, real_div2}); } BaseRef LambNextMVRuleCond1::DefineAnotherPattern() const { @@ -147,7 +147,7 @@ BaseRef LambNextMVRuleCond1::DefineAnotherPattern() const { VectorRef real_div1 = VectorRef({real_div1_var_, Ys}); VectorRef sqrt1 = VectorRef({prim_sqrt, real_div1}); - VectorRef add4 = VectorRef({prim::kPrimTensorAdd, add2_y_, sqrt1}); + VectorRef add4 = VectorRef({prim::kPrimAdd, add2_y_, sqrt1}); VectorRef real_div4 = VectorRef({prim_real_div, real_div0, add4}); return real_div4; } @@ -166,11 +166,11 @@ const BaseRef LambNextMVRuleCond2::DefinePattern() const { auto real_div0 = VectorRef({real_div0_var_, add0, input5_}); auto real_div1 = VectorRef({real_div1_var_, add1, input2_}); - auto add2 = VectorRef({prim::kPrimTensorAdd, add2_y_, real_div1}); + auto add2 = VectorRef({prim::kPrimAdd, add2_y_, real_div1}); auto sqrt0 = VectorRef({prim_rsqrt, add2}); auto real_div2 = VectorRef({real_div2_var_, sqrt0, real_div0}); - return VectorRef({prim::kPrimTensorAdd, mul4, real_div2}); + return VectorRef({prim::kPrimAdd, mul4, real_div2}); } BaseRef LambNextMVRuleCond2::DefineAnotherPattern() const { @@ -183,7 +183,7 @@ BaseRef LambNextMVRuleCond2::DefineAnotherPattern() const { VectorRef real_div1 = VectorRef({real_div1_var_, Ys}); VectorRef sqrt1 = VectorRef({prim_sqrt, real_div1}); - VectorRef add4 = VectorRef({prim::kPrimTensorAdd, sqrt1, add2_y_}); + VectorRef add4 = VectorRef({prim::kPrimAdd, sqrt1, add2_y_}); VectorRef real_div4 = VectorRef({prim_real_div, real_div0, add4}); return real_div4; } @@ -202,11 +202,11 @@ const BaseRef LambNextMVRuleCond3::DefinePattern() const { auto real_div0 = VectorRef({real_div0_var_, add0, input5_}); auto real_div1 = VectorRef({real_div1_var_, add1, input2_}); - auto add2 = VectorRef({prim::kPrimTensorAdd, real_div1, add2_y_}); + auto add2 = VectorRef({prim::kPrimAdd, real_div1, add2_y_}); auto sqrt0 = VectorRef({prim_rsqrt, add2}); auto real_div2 = VectorRef({real_div2_var_, sqrt0, real_div0}); - return VectorRef({prim::kPrimTensorAdd, mul4, real_div2}); + return VectorRef({prim::kPrimAdd, mul4, real_div2}); } BaseRef LambNextMVRuleCond3::DefineAnotherPattern() const { @@ -219,7 +219,7 @@ BaseRef LambNextMVRuleCond3::DefineAnotherPattern() const { VectorRef real_div1 = VectorRef({real_div1_var_, Ys}); VectorRef sqrt1 = VectorRef({prim_sqrt, real_div1}); - VectorRef add4 = VectorRef({prim::kPrimTensorAdd, sqrt1, add2_y_}); + VectorRef add4 = VectorRef({prim::kPrimAdd, sqrt1, add2_y_}); VectorRef real_div4 = VectorRef({prim_real_div, real_div0, add4}); return real_div4; } @@ -238,11 +238,11 @@ const BaseRef LambNextMVRuleCond4::DefinePattern() const { auto real_div0 = VectorRef({real_div0_var_, add0, input5_}); auto real_div1 = VectorRef({real_div1_var_, add1, input2_}); - auto add2 = VectorRef({prim::kPrimTensorAdd, real_div1, add2_y_}); + auto add2 = VectorRef({prim::kPrimAdd, real_div1, add2_y_}); auto sqrt0 = VectorRef({prim_rsqrt, add2}); auto real_div2 = VectorRef({real_div2_var_, real_div0, sqrt0}); - return VectorRef({prim::kPrimTensorAdd, real_div2, mul4}); + return VectorRef({prim::kPrimAdd, real_div2, mul4}); } BaseRef LambNextMVRuleCond4::DefineAnotherPattern() const { @@ -255,7 +255,7 @@ BaseRef LambNextMVRuleCond4::DefineAnotherPattern() const { VectorRef real_div1 = VectorRef({real_div1_var_, Ys}); VectorRef sqrt1 = VectorRef({prim_sqrt, real_div1}); - VectorRef add4 = VectorRef({prim::kPrimTensorAdd, sqrt1, add2_y_}); + VectorRef add4 = VectorRef({prim::kPrimAdd, sqrt1, add2_y_}); VectorRef real_div4 = VectorRef({prim_real_div, real_div0, add4}); return real_div4; } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.h b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.h index 36389dbb04..a160d6989e 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_rule.h @@ -49,8 +49,8 @@ class LambNextMVRule : public MultipleOutputPatternProcessPass { real_div0_var_ = std::make_shared(std::make_shared(kRealDivOpName)); real_div1_var_ = std::make_shared(std::make_shared(kRealDivOpName)); real_div2_var_ = std::make_shared(std::make_shared(prim::kPrimMul->name())); - add0_var_ = std::make_shared(std::make_shared(prim::kPrimTensorAdd->name())); - add1_var_ = std::make_shared(std::make_shared(prim::kPrimTensorAdd->name())); + add0_var_ = std::make_shared(std::make_shared(prim::kPrimAdd->name())); + add1_var_ = std::make_shared(std::make_shared(prim::kPrimAdd->name())); } ~LambNextMVRule() override = default; const BaseRef DefinePattern() const override = 0; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_with_decay_rule.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_with_decay_rule.cc index 78c265da80..f1a840adad 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_with_decay_rule.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_with_decay_rule.cc @@ -124,10 +124,10 @@ BaseRef LambNextMVWithDecayRuleCond1::DefineAnotherPattern() const { VectorRef real_div1 = VectorRef({real_div1_var_, Ys}); VectorRef mul4 = VectorRef({mul4_var_, Zs}); - VectorRef add2 = VectorRef({prim::kPrimTensorAdd, constant_add2_y_, real_div1}); + VectorRef add2 = VectorRef({prim::kPrimAdd, constant_add2_y_, real_div1}); VectorRef sqrt0 = VectorRef({prim_rsqrt, add2}); VectorRef real_div2 = VectorRef({prim::kPrimMul, sqrt0, real_div0}); - VectorRef add3 = VectorRef({prim::kPrimTensorAdd, mul4, real_div2}); + VectorRef add3 = VectorRef({prim::kPrimAdd, mul4, real_div2}); return add3; } @@ -141,14 +141,14 @@ const BaseRef LambNextMVWithDecayRuleCond1::DefinePattern() const { VectorRef add1 = VectorRef({add1_var_, mul2, mul3}); VectorRef real_div1 = VectorRef({real_div1_var_, add1, input_vars_[2]}); VectorRef sqrt1 = VectorRef({prim_sqrt, real_div1}); - VectorRef add4 = VectorRef({prim::kPrimTensorAdd, sqrt1, constant_add2_y_}); + VectorRef add4 = VectorRef({prim::kPrimAdd, sqrt1, constant_add2_y_}); VectorRef mul0 = VectorRef({prim::kPrimMul, input_vars_[4], constant_mul_input_vars_[0]}); VectorRef mul1 = VectorRef({prim::kPrimMul, input_vars_[3], constant_mul_input_vars_[1]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); VectorRef real_div0 = VectorRef({real_div0_var_, add0, input_vars_[5]}); VectorRef real_div4 = VectorRef({prim_deal_div, real_div0, add4}); VectorRef mul4 = VectorRef({mul4_var_, constant_mul_input_vars_[4], input_vars_[6]}); - VectorRef add5 = VectorRef({prim::kPrimTensorAdd, mul4, real_div4}); + VectorRef add5 = VectorRef({prim::kPrimAdd, mul4, real_div4}); return add5; } @@ -165,10 +165,10 @@ BaseRef LambNextMVWithDecayRuleCond2::DefineAnotherPattern() const { VectorRef real_div1 = VectorRef({real_div1_var_, Ys}); VectorRef mul4 = VectorRef({mul4_var_, Zs}); - VectorRef add2 = VectorRef({prim::kPrimTensorAdd, constant_add2_y_, real_div1}); + VectorRef add2 = VectorRef({prim::kPrimAdd, constant_add2_y_, real_div1}); VectorRef sqrt0 = VectorRef({prim_rsqrt, add2}); VectorRef real_div2 = VectorRef({prim::kPrimMul, sqrt0, real_div0}); - VectorRef add3 = VectorRef({prim::kPrimTensorAdd, mul4, real_div2}); + VectorRef add3 = VectorRef({prim::kPrimAdd, mul4, real_div2}); return add3; } @@ -182,14 +182,14 @@ const BaseRef LambNextMVWithDecayRuleCond2::DefinePattern() const { VectorRef add1 = VectorRef({add1_var_, mul2, mul3}); VectorRef real_div1 = VectorRef({real_div1_var_, add1, input_vars_[2]}); VectorRef sqrt1 = VectorRef({prim_sqrt, real_div1}); - VectorRef add4 = VectorRef({prim::kPrimTensorAdd, constant_add2_y_, sqrt1}); + VectorRef add4 = VectorRef({prim::kPrimAdd, constant_add2_y_, sqrt1}); VectorRef mul0 = VectorRef({prim::kPrimMul, constant_mul_input_vars_[0], input_vars_[4]}); VectorRef mul1 = VectorRef({prim::kPrimMul, constant_mul_input_vars_[1], input_vars_[3]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); VectorRef real_div0 = VectorRef({real_div0_var_, add0, input_vars_[5]}); VectorRef real_div4 = VectorRef({prim_deal_div, real_div0, add4}); VectorRef mul4 = VectorRef({mul4_var_, constant_mul_input_vars_[4], input_vars_[6]}); - VectorRef add5 = VectorRef({prim::kPrimTensorAdd, mul4, real_div4}); + VectorRef add5 = VectorRef({prim::kPrimAdd, mul4, real_div4}); return add5; } @@ -206,10 +206,10 @@ BaseRef LambNextMVWithDecayRuleCond3::DefineAnotherPattern() const { VectorRef real_div1 = VectorRef({real_div1_var_, Ys}); VectorRef mul4 = VectorRef({mul4_var_, Zs}); - VectorRef add2 = VectorRef({prim::kPrimTensorAdd, real_div1, constant_add2_y_}); + VectorRef add2 = VectorRef({prim::kPrimAdd, real_div1, constant_add2_y_}); VectorRef sqrt0 = VectorRef({prim_rsqrt, add2}); VectorRef real_div2 = VectorRef({prim::kPrimMul, sqrt0, real_div0}); - VectorRef add3 = VectorRef({prim::kPrimTensorAdd, mul4, real_div2}); + VectorRef add3 = VectorRef({prim::kPrimAdd, mul4, real_div2}); return add3; } @@ -223,14 +223,14 @@ const BaseRef LambNextMVWithDecayRuleCond3::DefinePattern() const { VectorRef add1 = VectorRef({add1_var_, mul2, mul3}); VectorRef real_div1 = VectorRef({real_div1_var_, add1, input_vars_[2]}); VectorRef sqrt1 = VectorRef({prim_sqrt, real_div1}); - VectorRef add4 = VectorRef({prim::kPrimTensorAdd, sqrt1, constant_add2_y_}); + VectorRef add4 = VectorRef({prim::kPrimAdd, sqrt1, constant_add2_y_}); VectorRef mul0 = VectorRef({prim::kPrimMul, input_vars_[4], constant_mul_input_vars_[0]}); VectorRef mul1 = VectorRef({prim::kPrimMul, input_vars_[3], constant_mul_input_vars_[1]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); VectorRef real_div0 = VectorRef({real_div0_var_, add0, input_vars_[5]}); VectorRef real_div4 = VectorRef({prim_deal_div, real_div0, add4}); VectorRef mul4 = VectorRef({mul4_var_, input_vars_[6], constant_mul_input_vars_[4]}); - VectorRef add5 = VectorRef({prim::kPrimTensorAdd, mul4, real_div4}); + VectorRef add5 = VectorRef({prim::kPrimAdd, mul4, real_div4}); return add5; } @@ -248,10 +248,10 @@ BaseRef LambNextMVWithDecayRuleCond4::DefineAnotherPattern() const { VectorRef real_div1 = VectorRef({real_div1_var_, Ys}); VectorRef mul4 = VectorRef({mul4_var_, Zs}); - VectorRef add2 = VectorRef({prim::kPrimTensorAdd, real_div1, constant_add2_y_}); + VectorRef add2 = VectorRef({prim::kPrimAdd, real_div1, constant_add2_y_}); VectorRef sqrt0 = VectorRef({prim_rsqrt, add2}); VectorRef real_div2 = VectorRef({prim::kPrimMul, real_div0, sqrt0}); - VectorRef add3 = VectorRef({prim::kPrimTensorAdd, real_div2, mul4}); + VectorRef add3 = VectorRef({prim::kPrimAdd, real_div2, mul4}); return add3; } @@ -265,14 +265,14 @@ const BaseRef LambNextMVWithDecayRuleCond4::DefinePattern() const { VectorRef add1 = VectorRef({add1_var_, mul2, mul3}); VectorRef real_div1 = VectorRef({real_div1_var_, add1, input_vars_[2]}); VectorRef sqrt1 = VectorRef({prim_sqrt, real_div1}); - VectorRef add4 = VectorRef({prim::kPrimTensorAdd, sqrt1, constant_add2_y_}); + VectorRef add4 = VectorRef({prim::kPrimAdd, sqrt1, constant_add2_y_}); VectorRef mul0 = VectorRef({prim::kPrimMul, constant_mul_input_vars_[0], input_vars_[4]}); VectorRef mul1 = VectorRef({prim::kPrimMul, constant_mul_input_vars_[1], input_vars_[3]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); VectorRef real_div0 = VectorRef({real_div0_var_, add0, input_vars_[5]}); VectorRef real_div4 = VectorRef({prim_deal_div, real_div0, add4}); VectorRef mul4 = VectorRef({mul4_var_, constant_mul_input_vars_[4], input_vars_[6]}); - VectorRef add5 = VectorRef({prim::kPrimTensorAdd, real_div4, mul4}); + VectorRef add5 = VectorRef({prim::kPrimAdd, real_div4, mul4}); return add5; } } // namespace opt diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_with_decay_rule.h b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_with_decay_rule.h index ca76d82678..9ed6e56753 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_with_decay_rule.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_with_decay_rule.h @@ -38,8 +38,8 @@ class LambNextMVWithDecayRule : public MultipleOutputPatternProcessPass { mul4_var_ = std::make_shared(std::make_shared(prim::kPrimMul->name())); real_div0_var_ = std::make_shared(std::make_shared(kRealDivOpName)); real_div1_var_ = std::make_shared(std::make_shared(kRealDivOpName)); - add0_var_ = std::make_shared(std::make_shared(prim::kPrimTensorAdd->name())); - add1_var_ = std::make_shared(std::make_shared(prim::kPrimTensorAdd->name())); + add0_var_ = std::make_shared(std::make_shared(prim::kPrimAdd->name())); + add1_var_ = std::make_shared(std::make_shared(prim::kPrimAdd->name())); } ~LambNextMVWithDecayRule() override = default; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_with_decay_v1_rule.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_with_decay_v1_rule.cc index 1ad0db72b9..313e88af3f 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_with_decay_v1_rule.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_mv_with_decay_v1_rule.cc @@ -66,7 +66,7 @@ bool MatchAdd5Pattern(const AnfNodePtr &node, const AnfNodePtr &mul4, const AnfN return false; } auto add5 = node->cast(); - if (AnfAlgo::GetCNodeName(add5) != prim::kPrimTensorAdd->name() || add5->inputs().size() != kAddInputNum) { + if (AnfAlgo::GetCNodeName(add5) != prim::kPrimAdd->name() || add5->inputs().size() != kAddInputNum) { return false; } auto real_div4_anf = add5->input(1); @@ -82,7 +82,7 @@ bool MatchAdd5Pattern(const AnfNodePtr &node, const AnfNodePtr &mul4, const AnfN return false; } auto add4 = add4_anf->cast(); - if (AnfAlgo::GetCNodeName(add4) != prim::kPrimTensorAdd->name() || add4->inputs().size() != kAddInputNum) { + if (AnfAlgo::GetCNodeName(add4) != prim::kPrimAdd->name() || add4->inputs().size() != kAddInputNum) { return false; } auto sqrt1_anf = add4->input(1); @@ -140,17 +140,17 @@ const BaseRef LambNextMVWithDecayV1Rule::DefinePattern() const { const auto prim_real_div = std::make_shared(kRealDivOpName); VectorRef mul3({prim::kPrimMul, mul3_sub1_, input0_}); VectorRef mul2({prim::kPrimMul, mul2_x_, input1_}); - VectorRef add1({prim::kPrimTensorAdd, mul2, mul3}); + VectorRef add1({prim::kPrimAdd, mul2, mul3}); VectorRef real_div1({prim_real_div, add1, input2_}); - VectorRef add2({prim::kPrimTensorAdd, real_div1, add2_y_}); + VectorRef add2({prim::kPrimAdd, real_div1, add2_y_}); VectorRef mul0({prim::kPrimMul, mul0_x_, input4_}); VectorRef mul1({prim::kPrimMul, mul1_sub_, input3_}); VectorRef sqrt0({prim_rsqrt, add2}); - VectorRef add0({prim::kPrimTensorAdd, mul0, mul1}); + VectorRef add0({prim::kPrimAdd, mul0, mul1}); VectorRef real_div0({prim_real_div, add0, input5_}); VectorRef real_div2({prim::kPrimMul, real_div0, sqrt0}); VectorRef mul4({prim::kPrimMul, mul4_x_, input6_}); - VectorRef add3({prim::kPrimTensorAdd, real_div2, mul4}); + VectorRef add3({prim::kPrimAdd, real_div2, mul4}); return add3; } diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_right_rule.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_right_rule.cc index 08de5e17c1..e9dd085b42 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_right_rule.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_right_rule.cc @@ -54,7 +54,7 @@ const BaseRef LambNextRightRule::DefinePattern() const { VectorRef mul3 = VectorRef({prim::kPrimMul, mul3_x_, VectorRef({prim::kPrimSquare, input0_})}); VectorRef add1 = VectorRef({add1_var_, VectorRef({prim::kPrimMul, mul2_x_, input1_}), mul3}); return VectorRef( - {prim::kPrimTensorAdd, VectorRef({prim_sqrt, VectorRef({prim::kPrimMul, add1, true_div1_recip_})}), add2_y_}); + {prim::kPrimAdd, VectorRef({prim_sqrt, VectorRef({prim::kPrimMul, add1, true_div1_recip_})}), add2_y_}); } const AnfNodePtr LambNextRightRule::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_right_rule.h b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_right_rule.h index e13cab3f7f..db92b9f16a 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_right_rule.h +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/lamb_next_right_rule.h @@ -32,7 +32,7 @@ class LambNextRightRule : public PatternProcessPass { mul3_x_(std::make_shared()), true_div1_recip_(std::make_shared()), add2_y_(std::make_shared()), - add1_var_(std::make_shared(std::make_shared(prim::kPrimTensorAdd->name()))) {} + add1_var_(std::make_shared(std::make_shared(prim::kPrimAdd->name()))) {} ~LambNextRightRule() override = default; const BaseRef DefinePattern() const override; diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/mul_add_fusion.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/mul_add_fusion.cc index c0b9f63413..e6e146f4a0 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/mul_add_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/mul_add_fusion.cc @@ -58,7 +58,7 @@ bool GetMul(const FuncGraphPtr &graph, const CNodePtr &add, CNodePtr *mul, size_ const BaseRef MulAddFusion::DefinePattern() const { VarPtr x = std::make_shared(); VarPtr y = std::make_shared(); - VectorRef pattern({prim::kPrimTensorAdd, x, y}); + VectorRef pattern({prim::kPrimAdd, x, y}); return pattern; } diff --git a/mindspore/ccsrc/backend/optimizer/gpu/adam_fusion.cc b/mindspore/ccsrc/backend/optimizer/gpu/adam_fusion.cc index d9194950b4..a170d2002c 100644 --- a/mindspore/ccsrc/backend/optimizer/gpu/adam_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/gpu/adam_fusion.cc @@ -51,13 +51,13 @@ kernel::KernelBuildInfoPtr GenerateKernelBuildInfo(CNodePtr node) { } // namespace const BaseRef AdamFusion::DefinePattern() const { - VectorRef next_m = VectorRef({prim::kPrimTensorAdd, VectorRef({prim::kPrimMul, beta1_, m_}), - VectorRef({prim::kPrimMul, one_sub_beta1_, gradient_})}); + VectorRef next_m = VectorRef( + {prim::kPrimAdd, VectorRef({prim::kPrimMul, beta1_, m_}), VectorRef({prim::kPrimMul, one_sub_beta1_, gradient_})}); VectorRef next_v = - VectorRef({prim::kPrimTensorAdd, VectorRef({prim::kPrimMul, beta2_, v_}), + VectorRef({prim::kPrimAdd, VectorRef({prim::kPrimMul, beta2_, v_}), VectorRef({prim::kPrimMul, one_sub_beta2_, VectorRef({prim::kPrimSquare, gradient_})})}); - VectorRef update = VectorRef( - {prim::kPrimRealDiv, next_m, VectorRef({prim::kPrimTensorAdd, eps_, VectorRef({prim::kPrimSqrt, next_v})})}); + VectorRef update = + VectorRef({prim::kPrimRealDiv, next_m, VectorRef({prim::kPrimAdd, eps_, VectorRef({prim::kPrimSqrt, next_v})})}); VectorRef update_with_lr = VectorRef({prim::kPrimMul, lr_, update}); VectorRef next_param = VectorRef({prim::kPrimSub, param_, update_with_lr}); diff --git a/mindspore/ccsrc/backend/optimizer/gpu/adam_weight_decay_fusion.cc b/mindspore/ccsrc/backend/optimizer/gpu/adam_weight_decay_fusion.cc index f0f4ac6f36..89cefe9f1b 100644 --- a/mindspore/ccsrc/backend/optimizer/gpu/adam_weight_decay_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/gpu/adam_weight_decay_fusion.cc @@ -51,14 +51,14 @@ kernel::KernelBuildInfoPtr GenerateKernelBuildInfo(CNodePtr node) { } // namespace const BaseRef AdamWeightDecayFusion::DefinePattern() const { - VectorRef next_m = VectorRef({prim::kPrimTensorAdd, VectorRef({prim::kPrimMul, beta1_, m_}), - VectorRef({prim::kPrimMul, one_sub_beta1_, gradient_})}); + VectorRef next_m = VectorRef( + {prim::kPrimAdd, VectorRef({prim::kPrimMul, beta1_, m_}), VectorRef({prim::kPrimMul, one_sub_beta1_, gradient_})}); VectorRef next_v = - VectorRef({prim::kPrimTensorAdd, VectorRef({prim::kPrimMul, beta2_, v_}), + VectorRef({prim::kPrimAdd, VectorRef({prim::kPrimMul, beta2_, v_}), VectorRef({prim::kPrimMul, one_sub_beta2_, VectorRef({prim::kPrimSquare, gradient_})})}); - VectorRef update = VectorRef( - {prim::kPrimRealDiv, next_m, VectorRef({prim::kPrimTensorAdd, eps_, VectorRef({prim::kPrimSqrt, next_v})})}); - VectorRef new_update = VectorRef({prim::kPrimTensorAdd, VectorRef({prim::kPrimMul, weight_decay_, param_}), update}); + VectorRef update = + VectorRef({prim::kPrimRealDiv, next_m, VectorRef({prim::kPrimAdd, eps_, VectorRef({prim::kPrimSqrt, next_v})})}); + VectorRef new_update = VectorRef({prim::kPrimAdd, VectorRef({prim::kPrimMul, weight_decay_, param_}), update}); VectorRef update_with_lr = VectorRef({prim::kPrimMul, lr_, new_update}); VectorRef next_param = VectorRef({prim::kPrimSub, param_, update_with_lr}); diff --git a/mindspore/ccsrc/backend/optimizer/gpu/add_relu_grad_v2_fusion.cc b/mindspore/ccsrc/backend/optimizer/gpu/add_relu_grad_v2_fusion.cc index c7e05c6b9c..e35f97fa6e 100644 --- a/mindspore/ccsrc/backend/optimizer/gpu/add_relu_grad_v2_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/gpu/add_relu_grad_v2_fusion.cc @@ -51,7 +51,7 @@ kernel::KernelBuildInfoPtr GenerateKernelBuildInfo(CNodePtr node) { } // namespace const BaseRef AddReluGradV2Fusion::DefinePattern() const { - VectorRef relu_grad = VectorRef({prim::kPrimReluGradV2, VectorRef({prim::kPrimTensorAdd, x1_, x2_}), mask_}); + VectorRef relu_grad = VectorRef({prim::kPrimReluGradV2, VectorRef({prim::kPrimAdd, x1_, x2_}), mask_}); return relu_grad; } diff --git a/mindspore/ccsrc/backend/optimizer/gpu/add_relu_v2_fusion.cc b/mindspore/ccsrc/backend/optimizer/gpu/add_relu_v2_fusion.cc index f78f0d87aa..f643b2869e 100644 --- a/mindspore/ccsrc/backend/optimizer/gpu/add_relu_v2_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/gpu/add_relu_v2_fusion.cc @@ -51,7 +51,7 @@ kernel::KernelBuildInfoPtr GenerateKernelBuildInfo(CNodePtr node) { } // namespace const BaseRef AddReluV2Fusion::DefinePattern() const { - VectorRef relu = VectorRef({prim::kPrimReluV2, VectorRef({prim::kPrimTensorAdd, x1_, x2_})}); + VectorRef relu = VectorRef({prim::kPrimReluV2, VectorRef({prim::kPrimAdd, x1_, x2_})}); return relu; } diff --git a/mindspore/ccsrc/backend/optimizer/gpu/batch_norm_add_relu_fusion.cc b/mindspore/ccsrc/backend/optimizer/gpu/batch_norm_add_relu_fusion.cc index 2cd9716112..54ed9967ba 100644 --- a/mindspore/ccsrc/backend/optimizer/gpu/batch_norm_add_relu_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/gpu/batch_norm_add_relu_fusion.cc @@ -30,7 +30,7 @@ namespace opt { const BaseRef BatchNormAddReluFusion::DefinePattern() const { VectorRef batch_norm_ex = VectorRef({prim::kPrimFusedBatchNormEx, x_, scale_, bias_, mean_, var_}); VectorRef tuple_get_item = VectorRef({prim::kPrimTupleGetItem, batch_norm_ex, index_}); - VectorRef tensor_add = VectorRef({prim::kPrimTensorAdd, tuple_get_item, z_}); + VectorRef tensor_add = VectorRef({prim::kPrimAdd, tuple_get_item, z_}); VectorRef relu = VectorRef({prim::kPrimRelu, tensor_add}); return relu; } diff --git a/mindspore/ccsrc/backend/optimizer/gpu/replace_addn_fusion.cc b/mindspore/ccsrc/backend/optimizer/gpu/replace_addn_fusion.cc index bd250bc8f8..d8ffe0f190 100644 --- a/mindspore/ccsrc/backend/optimizer/gpu/replace_addn_fusion.cc +++ b/mindspore/ccsrc/backend/optimizer/gpu/replace_addn_fusion.cc @@ -42,7 +42,7 @@ const AnfNodePtr ReplaceAddNFusion::Process(const FuncGraphPtr &graph, const Anf MS_EXCEPTION_IF_NULL(B); int64_t num_input = AnfAlgo::GetNodeAttr(node, "n"); if (num_input == 2) { - auto prim = std::make_shared(prim::kPrimTensorAdd->name()); + auto prim = std::make_shared(prim::kPrimAdd->name()); MS_EXCEPTION_IF_NULL(prim); std::vector inputs = {NewValueNode(prim), A, B}; auto add_new = graph->NewCNode(inputs); diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/arithmetic_simplify.cc b/mindspore/ccsrc/backend/optimizer/graph_kernel/arithmetic_simplify.cc index d6b16fd363..13084e7392 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/arithmetic_simplify.cc +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/arithmetic_simplify.cc @@ -47,7 +47,7 @@ AnfNodePtr NewCNodeWithInfo(const AnfNodePtrList &inputs, const AnfNodePtr &ori_ } AnfNodePtr SimplifyAdd(const AnfNodePtr &node) { - if (!IsPrimitiveCNode(node, prim::kPrimTensorAdd)) { + if (!IsPrimitiveCNode(node, prim::kPrimAdd)) { return nullptr; } PatternNode x, y, z; @@ -57,13 +57,13 @@ AnfNodePtr SimplifyAdd(const AnfNodePtr &node) { PConstant any_const_2(node); auto add_distri_lambda = [&node, &x, &y, &any_const]() -> AnfNodePtr { - auto node_tmp = NewCNodeWithInfo({NewValueNode(prim::kPrimTensorAdd), x.GetNode(node), y.GetNode(node)}, node); + auto node_tmp = NewCNodeWithInfo({NewValueNode(prim::kPrimAdd), x.GetNode(node), y.GetNode(node)}, node); auto new_cnode = NewCNodeWithInfo({NewValueNode(prim::kPrimMul), node_tmp, any_const.GetNode(node)}, node); return new_cnode; }; auto add_union_lambda = [&node, &x, &any_const, &any_const_2]() -> AnfNodePtr { auto new_rhs = any_const.AddByPatternConst(any_const_2, x.GetNode(node)); - auto new_cnode = NewCNodeWithInfo({NewValueNode(prim::kPrimTensorAdd), x.GetNode(node), new_rhs}, node); + auto new_cnode = NewCNodeWithInfo({NewValueNode(prim::kPrimAdd), x.GetNode(node), new_rhs}, node); return new_cnode; }; // A + 0 = A @@ -88,7 +88,7 @@ AnfNodePtr SimplifySub(const AnfNodePtr &node) { PConstant any_const(node); auto sub_toadd_lambda = [&node, &x, &any_const]() -> AnfNodePtr { auto new_rhs = any_const.ValueNodeWithOprations(prim::kPrimNeg); - auto new_cnode = NewCNodeWithInfo({NewValueNode(prim::kPrimTensorAdd), x.GetNode(node), new_rhs}, node); + auto new_cnode = NewCNodeWithInfo({NewValueNode(prim::kPrimAdd), x.GetNode(node), new_rhs}, node); return new_cnode; }; // A - 0 = A @@ -269,7 +269,7 @@ AnfNodePtr SimplifyMul(const AnfNodePtr &node) { return new_cnode; }; auto exp_merge_lambda = [&node, &x, &y]() -> AnfNodePtr { - auto node_tmp = NewCNodeWithInfo({NewValueNode(prim::kPrimTensorAdd), x.GetNode(node), y.GetNode(node)}, node); + auto node_tmp = NewCNodeWithInfo({NewValueNode(prim::kPrimAdd), x.GetNode(node), y.GetNode(node)}, node); auto new_cnode = NewCNodeWithInfo({NewValueNode(prim::kPrimExp), node_tmp}, node); return new_cnode; }; diff --git a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.cc b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.cc index 5134c2269f..28c2ff8e2a 100644 --- a/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.cc +++ b/mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.cc @@ -741,14 +741,14 @@ std::string ExtractGraphKernelName(const AnfNodePtrList &cnodes, const string &p std::vector GetFusibleOpList() { #if ENABLE_D std::vector fusible_basic_ops = { - prim::kPrimAbs, prim::kPrimRound, prim::kPrimNeg, prim::kPrimExp, prim::kPrimTensorAdd, + prim::kPrimAbs, prim::kPrimRound, prim::kPrimNeg, prim::kPrimExp, prim::kPrimAdd, prim::kPrimExpandDims, prim::kPrimMul, prim::kPrimMinimum, prim::kPrimMaximum, prim::kPrimLog, prim::kPrimPow, prim::kPrimSub, prim::kPrimRsqrt, prim::kPrimSqrt, prim::kPrimAddN, prim::kPrimEqual, prim::kPrimReciprocal, prim::kPrimTanh, prim::kPrimReshape, prim::kPrimTranspose, prim::kPrimCast, prim::kPrimRealDiv}; #elif ENABLE_GPU std::vector fusible_basic_ops = { - prim::kPrimAbs, prim::kPrimRound, prim::kPrimNeg, prim::kPrimExp, prim::kPrimTensorAdd, + prim::kPrimAbs, prim::kPrimRound, prim::kPrimNeg, prim::kPrimExp, prim::kPrimAdd, prim::kPrimRealDiv, prim::kPrimMul, prim::kPrimMinimum, prim::kPrimMaximum, prim::kPrimLog, prim::kPrimPow, prim::kPrimSub, prim::kPrimRsqrt, prim::kPrimSqrt, prim::kPrimAddN, prim::kPrimEqual, prim::kPrimReciprocal, prim::KPrimTransData, prim::kPrimSelect, prim::kPrimGreater, diff --git a/mindspore/ccsrc/frontend/optimizer/irpass.cc b/mindspore/ccsrc/frontend/optimizer/irpass.cc index 365567ef68..4978338271 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass.cc +++ b/mindspore/ccsrc/frontend/optimizer/irpass.cc @@ -52,7 +52,7 @@ namespace opt { namespace irpass { OptimizeIRPassLib::OptimizeIRPassLib() { arithmetic_simplify_ = MakeSubstitution(std::make_shared(), "arithmetic_simplify", - {prim::kPrimScalarAdd, prim::kPrimScalarMul, prim::kPrimTensorAdd, + {prim::kPrimScalarAdd, prim::kPrimScalarMul, prim::kPrimAdd, prim::kPrimIdentity, prim::kPrimMomentum, prim::kPrimMul, prim::kPrimPow}); arithmetic_simplify2_ = MakeSubstitution(std::make_shared(), "arithmetic_simplify2", {prim::kPrimMul}); diff --git a/mindspore/ccsrc/frontend/optimizer/irpass/merge_addn.h b/mindspore/ccsrc/frontend/optimizer/irpass/merge_addn.h index 84ad4789e4..38ca1748e4 100644 --- a/mindspore/ccsrc/frontend/optimizer/irpass/merge_addn.h +++ b/mindspore/ccsrc/frontend/optimizer/irpass/merge_addn.h @@ -272,7 +272,7 @@ class AddNEliminater : public AnfVisitor { if (tuple_inputs.size() == 3) { // case2: inputs size = 2, -> TensorAdd(Tensor, Tensor) MS_LOG(DEBUG) << "Replace AddN with two inputs with TensorAdd. " << cnode->DebugString(2); - ValuePtr prim_tensoradd = prim::GetPythonOps("TensorAdd", "mindspore.ops.operations"); + ValuePtr prim_tensoradd = prim::GetPythonOps("Add", "mindspore.ops.operations"); std::vector new_xs{func_graph->NewCNode({NewValueNode(prim_tensoradd)}), tuple_inputs[1], tuple_inputs[2]}; mng->Replace(node, func_graph->NewCNode(new_xs)); @@ -299,7 +299,7 @@ class AddNEliminater : public AnfVisitor { ValuePtr prim_addn = prim::GetPythonOps("AddN", "mindspore.ops.operations"); auto new_addn = func_graph->NewCNode( {func_graph->NewCNode({NewValueNode(prim_addn)}), func_graph->NewCNode(make_tuple_new_xs)}); - ValuePtr prim_tensoradd = prim::GetPythonOps("TensorAdd", "mindspore.ops.operations"); + ValuePtr prim_tensoradd = prim::GetPythonOps("Add", "mindspore.ops.operations"); auto new_add = func_graph->NewCNode({func_graph->NewCNode({NewValueNode(prim_tensoradd)}), *first_valuenode, new_addn}); (void)mng->Replace(node, new_add); diff --git a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.cc b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.cc index 7ef922e0eb..316ce4188b 100644 --- a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.cc +++ b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_generate_strategy.cc @@ -860,7 +860,7 @@ Strategys GenerateStrategiesFromStrategy(const std::vectortype() == L2_NORMALIZE) { return PrepareL2Normalize(ops, iter_ops, basic_stra); } - if (ops[iter_ops]->type() == TENSOR_ADD || ops[iter_ops]->type() == SUB || ops[iter_ops]->type() == MUL || + if (ops[iter_ops]->type() == ADD || ops[iter_ops]->type() == SUB || ops[iter_ops]->type() == MUL || ops[iter_ops]->type() == DIV) { return CheckBroadcast(ops, iter_ops, basic_stra); } diff --git a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_parse_graph.h b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_parse_graph.h index c852fd3517..042884a1de 100644 --- a/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_parse_graph.h +++ b/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/rec_parse_graph.h @@ -78,7 +78,7 @@ const std::map DictOpType{ // Elm-wise OP {TRANSPOSE, OperatorType::kRecElmWiseOp}, {L2_NORMALIZE, OperatorType::kRecElmWiseOp}, - {TENSOR_ADD, OperatorType::kRecElmWiseOp}, + {ADD, OperatorType::kRecElmWiseOp}, {TENSOR_DOT, OperatorType::kRecElmWiseOp}, {SUB, OperatorType::kRecElmWiseOp}, {MUL, OperatorType::kRecElmWiseOp}, diff --git a/mindspore/ccsrc/frontend/parallel/dynamic_creator.h b/mindspore/ccsrc/frontend/parallel/dynamic_creator.h index 6e4ea6d8f3..38bafa45d1 100644 --- a/mindspore/ccsrc/frontend/parallel/dynamic_creator.h +++ b/mindspore/ccsrc/frontend/parallel/dynamic_creator.h @@ -86,7 +86,7 @@ REGISTER(LogSoftmaxInfo); REGISTER(ActivationInfo); REGISTER(SoftmaxCrossEntropyWithLogitsInfo); REGISTER(SubInfo); -REGISTER(TensorAddInfo); +REGISTER(AddInfo); REGISTER(BiasAddInfo); REGISTER(MulInfo); REGISTER(DivInfo); diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/arithmetic_info.h b/mindspore/ccsrc/frontend/parallel/ops_info/arithmetic_info.h index 38ce29f543..8a7345aa77 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/arithmetic_info.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/arithmetic_info.h @@ -60,12 +60,11 @@ class SubInfo : public ArithmeticBase { ~SubInfo() override = default; }; -class TensorAddInfo : public ArithmeticBase { +class AddInfo : public ArithmeticBase { public: - TensorAddInfo(const std::string &name, const Shapes &inputs_shape, const Shapes &outputs_shape, - const PrimitiveAttrs &attrs) + AddInfo(const std::string &name, const Shapes &inputs_shape, const Shapes &outputs_shape, const PrimitiveAttrs &attrs) : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} - ~TensorAddInfo() override = default; + ~AddInfo() override = default; }; class MulInfo : public ArithmeticBase { diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/onehot_info.cc b/mindspore/ccsrc/frontend/parallel/ops_info/onehot_info.cc index 8a700fb66a..4b3ca7ecb9 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/onehot_info.cc +++ b/mindspore/ccsrc/frontend/parallel/ops_info/onehot_info.cc @@ -191,7 +191,7 @@ Status OneHotInfo::ComputeReplaceGraph(const CNodePtr &cnode) { auto equal = gen_g.PushBack({gen_g.NewOpInst(EQUAL), floor_div, CreateInt32Tensor(mod_rank_)}); auto cast = gen_g.PushBack({gen_g.NewOpInst(CAST), equal, CreatTypeInt(32)}); auto mul2 = gen_g.PushBack({gen_g.NewOpInst(MUL), sub1, cast}); - auto tensor_add = gen_g.PushBack({gen_g.NewOpInst(TENSOR_ADD), mul2, CreateInt32Tensor(1)}); + auto tensor_add = gen_g.PushBack({gen_g.NewOpInst(ADD), mul2, CreateInt32Tensor(1)}); auto mul3 = gen_g.PushBack({gen_g.NewOpInst(MUL), cast, tensor_add}); auto sub2 = gen_g.PushBack({gen_g.NewOpInst(SUB), mul3, CreateInt32Tensor(1)}); Attr attr_onehot_axis = std::make_pair(AXIS, axis_value_ptr_); diff --git a/mindspore/ccsrc/frontend/parallel/ops_info/ops_utils.h b/mindspore/ccsrc/frontend/parallel/ops_info/ops_utils.h index 4d8c22b642..d1f0fcab12 100644 --- a/mindspore/ccsrc/frontend/parallel/ops_info/ops_utils.h +++ b/mindspore/ccsrc/frontend/parallel/ops_info/ops_utils.h @@ -200,7 +200,7 @@ constexpr char MAXPOOLV2[] = "MaxPoolV2"; constexpr char L2_NORMALIZE[] = "L2Normalize"; constexpr char TRANSPOSE[] = "Transpose"; constexpr char RESHAPE[] = "Reshape"; -constexpr char TENSOR_ADD[] = "TensorAdd"; +constexpr char ADD[] = "Add"; constexpr char BIAS_ADD[] = "BiasAdd"; constexpr char SUB[] = "Sub"; constexpr char MUL[] = "Mul"; @@ -315,7 +315,6 @@ constexpr char UNSORTED_SEGMENT_MIN[] = "UnsortedSegmentMin"; constexpr char UNSORTED_SEGMENT_MAX[] = "UnsortedSegmentMax"; constexpr char DEPTHWISE_CONV2D_NATIVE[] = "DepthwiseConv2dNative"; constexpr char DEPTHWISE_CONV2D[] = "DepthwiseConv2D"; -constexpr char ADD[] = "Add"; constexpr char DROPOUT[] = "Dropout"; constexpr char KStridedSlice[] = "StridedSlice"; constexpr char UNIQUE[] = "Unique"; diff --git a/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc b/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc index 2035feea57..93de7b4565 100644 --- a/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/frontend/parallel/step_auto_parallel.cc @@ -151,7 +151,7 @@ bool IsSplittableOperator(const std::string &op_name) { // clang-format off static const std::set splittable_op = {MATMUL, TRANSPOSE, GELU, TANH, SOFTMAX, SUB, MUL, DIV, RESHAPE, GREATER, LOG_SOFTMAX, ACTIVATION, PRELU, - FLOORDIV, L2_NORMALIZE, TENSOR_ADD, MAXPOOL, MAXPOOLV2, VIRTUAL_DATA_SET, RELU, ONEHOT, DROPOUT_DO_MASK, + FLOORDIV, L2_NORMALIZE, ADD, MAXPOOL, MAXPOOLV2, VIRTUAL_DATA_SET, RELU, ONEHOT, DROPOUT_DO_MASK, REDUCE_MAX, REDUCE_MIN, ARGMAXWITHVALUE, ARGMINWITHVALUE, REDUCE_SUM, CONV2D, FUSE_BATCH_NORM, POOLING, MAX_POOL_WITH_ARGMAX, SIMPLE_MEAN, FLATTEN, BATCH_NORM, LAYER_NORM, BIAS_ADD, ASSIGN_SUB, COS, ACOS, EXP, PACK, LOG, REDUCE_MEAN, REAL_DIV, SIGMOID, POW, MAXIMUM, MINIMUM, EQUAL, NOT_EQUAL, LOGICALNOT, GATHERV2, SQRT, CONCAT, diff --git a/mindspore/ccsrc/transform/express_ir/onnx_exporter.cc b/mindspore/ccsrc/transform/express_ir/onnx_exporter.cc index 2381cd305c..572058555b 100644 --- a/mindspore/ccsrc/transform/express_ir/onnx_exporter.cc +++ b/mindspore/ccsrc/transform/express_ir/onnx_exporter.cc @@ -165,7 +165,7 @@ class OpNameInfo { #define OPERATOR_ONNX_CONVERT_DEFINE(name, onnx_name, impl) \ OpNameInfo GetOpOnnxConvertInfo_##name() { return impl.set_op_type(#name).set_onnx_type(#onnx_name); } -OPERATOR_ONNX_CONVERT_DEFINE(TensorAdd, Add, OpNameInfo()) +OPERATOR_ONNX_CONVERT_DEFINE(Add, Add, OpNameInfo()) OPERATOR_ONNX_CONVERT_DEFINE(Mul, Mul, OpNameInfo()) OPERATOR_ONNX_CONVERT_DEFINE(ReLU, Relu, OpNameInfo()) @@ -257,7 +257,7 @@ OPERATOR_ONNX_CONVERT_DEFINE(Sub, Sub, OpNameInfo()) #define OP_CONVERT_FUNCTION_NAME(name) GetOpOnnxConvertInfo_##name void RegisterOpConverters(const std::function &fn) { - fn(OP_CONVERT_FUNCTION_NAME(TensorAdd)()); + fn(OP_CONVERT_FUNCTION_NAME(Add)()); fn(OP_CONVERT_FUNCTION_NAME(Mul)()); fn(OP_CONVERT_FUNCTION_NAME(ReLU)()); diff --git a/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.cc b/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.cc index b015c944bc..f9990f4445 100644 --- a/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.cc +++ b/mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.cc @@ -29,7 +29,7 @@ REG_ADPT_DESC(StateSetItem, prim::kPrimStateSetItem->name(), ADPT_DESC(Assign)) INPUT_MAP(Add) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; ATTR_MAP(Add) = EMPTY_ATTR_MAP; OUTPUT_MAP(Add) = {{0, OUTPUT_DESC(y)}}; -REG_ADPT_DESC(Add, prim::kPrimTensorAdd->name(), +REG_ADPT_DESC(Add, prim::kPrimAdd->name(), std::make_shared( std::make_shared>(ExtraAttr({{"mode", MakeValue(static_cast(1))}})), std::make_shared>(ExtraAttr({{"mode", MakeValue(static_cast(1))}})))) diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index 1eef458eed..3307b0399f 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -215,7 +215,7 @@ constexpr auto kAvgPoolGradGpuOpName = "AvgPoolGradGpu"; constexpr auto kmaxPoolGradOpName = "MaxPoolGrad"; constexpr auto kMaxPoolWithArgmaxOpName = "MaxPoolWithArgmax"; constexpr auto kMaxPoolGradWithArgmaxOpName = "MaxPoolGradWithArgmax"; -constexpr auto kTensorAddOpName = "TensorAdd"; +constexpr auto kTensorAddOpName = "Add"; constexpr auto kCastOpName = "Cast"; constexpr auto kGreaterEqualOpName = "GreaterEqual"; constexpr auto kAbsOpName = "Abs"; diff --git a/mindspore/compression/export/quant_export.py b/mindspore/compression/export/quant_export.py index 2aa8284cbe..5ded0bb29f 100644 --- a/mindspore/compression/export/quant_export.py +++ b/mindspore/compression/export/quant_export.py @@ -46,7 +46,7 @@ class ExportToQuantInferNetwork: Returns: Cell, Infer network. """ - __quant_op_name__ = ["TensorAdd", "Sub", "Mul", "RealDiv"] + __quant_op_name__ = ["Add", "Sub", "Mul", "RealDiv"] def __init__(self, network, mean, std_dev, *inputs, is_mindir=False): network = Validator.check_isinstance('network', network, (nn.Cell,)) @@ -225,7 +225,7 @@ class ExportManualQuantNetwork(ExportToQuantInferNetwork): Returns: Cell, Infer network. """ - __quant_op_name__ = ["TensorAdd", "Sub", "Mul", "RealDiv"] + __quant_op_name__ = ["Add", "Sub", "Mul", "RealDiv"] def __init__(self, network, mean, std_dev, *inputs, is_mindir=False): super(ExportManualQuantNetwork, self).__init__(network, mean, std_dev, *inputs, is_mindir) diff --git a/mindspore/compression/quant/qat.py b/mindspore/compression/quant/qat.py index f95c43be8f..c2607fcece 100644 --- a/mindspore/compression/quant/qat.py +++ b/mindspore/compression/quant/qat.py @@ -173,7 +173,7 @@ class QuantizationAwareTraining(Quantizer): >>> quantizer = QuantizationAwareTraining(bn_fold=False, per_channel=[True, False], symmetric=[True, False]) >>> net_qat = quantizer.quantize(net) """ - __quant_op_name__ = ["TensorAdd", "Sub", "Mul", "RealDiv"] + __quant_op_name__ = ["Add", "Sub", "Mul", "RealDiv"] def __init__(self, bn_fold=True, diff --git a/mindspore/core/abstract/infer_functions.h b/mindspore/core/abstract/infer_functions.h index a84a0d85a4..45ac7e988c 100644 --- a/mindspore/core/abstract/infer_functions.h +++ b/mindspore/core/abstract/infer_functions.h @@ -93,8 +93,8 @@ AbstractBasePtr InferImplMinOrMaxGrad(const AnalysisEnginePtr &, const Primitive AbstractBasePtr InferImplMul(const AnalysisEnginePtr &, const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list); -AbstractBasePtr InferImplTensorAdd(const AnalysisEnginePtr &, const PrimitivePtr &primitive, - const AbstractBasePtrList &args_spec_list); +AbstractBasePtr InferImplAdd(const AnalysisEnginePtr &, const PrimitivePtr &primitive, + const AbstractBasePtrList &args_spec_list); AbstractBasePtr InferImplSquare(const AnalysisEnginePtr &, const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list); diff --git a/mindspore/core/abstract/prim_maths.cc b/mindspore/core/abstract/prim_maths.cc index d1cd002cfb..fa3e9f20bd 100644 --- a/mindspore/core/abstract/prim_maths.cc +++ b/mindspore/core/abstract/prim_maths.cc @@ -60,8 +60,8 @@ AbstractBasePtr InferImplSqrtGrad(const AnalysisEnginePtr &, const PrimitivePtr return out->Broaden(); } -AbstractBasePtr InferImplTensorAdd(const AnalysisEnginePtr &, const PrimitivePtr &primitive, - const AbstractBasePtrList &args_spec_list) { +AbstractBasePtr InferImplAdd(const AnalysisEnginePtr &, const PrimitivePtr &primitive, + const AbstractBasePtrList &args_spec_list) { // Inputs: two tensors. const std::string op_name = primitive->name(); CheckArgsSize(op_name, args_spec_list, 2); diff --git a/mindspore/core/abstract/primitive_infer_map.cc b/mindspore/core/abstract/primitive_infer_map.cc index b7d8d8bc38..ec970d6d9c 100644 --- a/mindspore/core/abstract/primitive_infer_map.cc +++ b/mindspore/core/abstract/primitive_infer_map.cc @@ -38,7 +38,7 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() { {prim::kPrimMaximumGrad, {InferImplMinOrMaxGrad, true}}, {prim::kPrimMinimumGrad, {InferImplMinOrMaxGrad, true}}, {prim::kPrimMul, {InferImplMul, true}}, - {prim::kPrimTensorAdd, {InferImplTensorAdd, true}}, + {prim::kPrimAdd, {InferImplAdd, true}}, {prim::kPrimSquare, {InferImplSquare, true}}, {prim::kPrimSqrt, {InferImplSqrt, true}}, {prim::kPrimSqrtGrad, {InferImplSqrtGrad, true}}, diff --git a/mindspore/core/base/core_ops.h b/mindspore/core/base/core_ops.h index cfaac47362..21d5c50c4a 100644 --- a/mindspore/core/base/core_ops.h +++ b/mindspore/core/base/core_ops.h @@ -237,7 +237,7 @@ inline const PrimitivePtr kPrimSparseTensorGetIndices = std::make_shared("SparseTensorGetDenseShape"); // Maths -inline const PrimitivePtr kPrimTensorAdd = std::make_shared("TensorAdd"); +inline const PrimitivePtr kPrimAdd = std::make_shared("Add"); inline const PrimitivePtr kPrimMatMul = std::make_shared("MatMul"); inline const PrimitivePtr kPrimBatchMatMul = std::make_shared("BatchMatMul"); inline const PrimitivePtr kPrimMaximumGrad = std::make_shared("MaximumGrad"); diff --git a/mindspore/core/c_ops/add.cc b/mindspore/core/c_ops/add.cc index e3b3eca615..bb0c06723e 100644 --- a/mindspore/core/c_ops/add.cc +++ b/mindspore/core/c_ops/add.cc @@ -49,6 +49,6 @@ AbstractBasePtr AddInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr return std::make_shared(InferType(primitive, input_args), InferShape(primitive, input_args)->shape()); } -REGISTER_PRIMITIVE_EVAL_IMPL(Add, prim::kPrimTensorAdd, AddInfer); +REGISTER_PRIMITIVE_EVAL_IMPL(Add, prim::kPrimAdd, AddInfer); REGISTER_PRIMITIVE_C(kNameAdd, Add); } // namespace mindspore diff --git a/mindspore/core/ir/pattern_matcher.h b/mindspore/core/ir/pattern_matcher.h index 778ab5185e..af9987e79b 100644 --- a/mindspore/core/ir/pattern_matcher.h +++ b/mindspore/core/ir/pattern_matcher.h @@ -989,7 +989,7 @@ class PConstant : public PBase > { } // Arithmetic operations -BIN_OPERATION_PATTERN(operator+, prim::kPrimTensorAdd, true); +BIN_OPERATION_PATTERN(operator+, prim::kPrimAdd, true); BIN_OPERATION_PATTERN(operator*, prim::kPrimMul, true); BIN_OPERATION_PATTERN(operator/, prim::kPrimRealDiv, false); BIN_OPERATION_PATTERN(operator-, prim::kPrimSub, false); diff --git a/mindspore/nn/_graph_kernels/graph_kernels.py b/mindspore/nn/_graph_kernels/graph_kernels.py index b314476878..869a67bd59 100644 --- a/mindspore/nn/_graph_kernels/graph_kernels.py +++ b/mindspore/nn/_graph_kernels/graph_kernels.py @@ -225,7 +225,7 @@ class LambNextMV(GraphKernel): def __init__(self): super(LambNextMV, self).__init__() self.mul = P.Mul() - self.add = P.TensorAdd() + self.add = P.Add() self.div = P.RealDiv() self.sqrt = P.Sqrt() self.rsqrt = P.Rsqrt() diff --git a/mindspore/nn/layer/activation.py b/mindspore/nn/layer/activation.py index 5009006666..7bf0e08302 100644 --- a/mindspore/nn/layer/activation.py +++ b/mindspore/nn/layer/activation.py @@ -651,7 +651,7 @@ class LogSigmoid(Cell): super(LogSigmoid, self).__init__() self.mul = P.Mul() self.exp = P.Exp() - self.add = P.TensorAdd() + self.add = P.Add() self.rec = P.Reciprocal() self.log = P.Log() diff --git a/mindspore/nn/layer/embedding.py b/mindspore/nn/layer/embedding.py index ba3c1e7c66..64d639c7f6 100755 --- a/mindspore/nn/layer/embedding.py +++ b/mindspore/nn/layer/embedding.py @@ -441,13 +441,13 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup): self.mul = P.Mul() self.inf_mask_mul = P.Mul() - self.bias_add = P.TensorAdd() - self.inf_add = P.TensorAdd() + self.bias_add = P.Add() + self.inf_add = P.Add() self.merge_op = None self.count_op = P.UnsortedSegmentSum() self.abs = P.Abs() self.equal = P.Equal() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.div_no_nan = P.DivNoNan() self.expand = P.ExpandDims() diff --git a/mindspore/nn/layer/quant.py b/mindspore/nn/layer/quant.py index 219f5ede83..62787a9a91 100644 --- a/mindspore/nn/layer/quant.py +++ b/mindspore/nn/layer/quant.py @@ -99,8 +99,8 @@ class BatchNormFoldCell(Cell): else: batch_mean = P.ZerosLike()(variance) batch_std = P.OnesLike()(variance) - running_mean = P.TensorAdd()(mean, 0.) - running_std = P.Sqrt()(P.TensorAdd()(variance, self.epsilon)) + running_mean = P.Add()(mean, 0.) + running_std = P.Sqrt()(P.Add()(variance, self.epsilon)) return batch_mean, batch_std, running_mean, running_std @@ -559,7 +559,7 @@ class Conv2dBnFoldQuantOneConv(Cell): return s def construct(self, x): - running_std = P.Sqrt()(P.TensorAdd()(self.moving_variance, self.eps)) + running_std = P.Sqrt()(P.Add()(self.moving_variance, self.eps)) scale_factor = self.gamma / running_std if self.channel_axis: scale_factor = self.reshape(scale_factor, (1, -1, 1, 1)) @@ -1236,7 +1236,7 @@ class TensorAddQuant(Cell): ema=True, ema_decay=ema_decay, quant_dtype=quant_dtype) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x1, x2): x = self.add(x1, x2) diff --git a/mindspore/ops/_grad/grad_math_ops.py b/mindspore/ops/_grad/grad_math_ops.py index 75ce1d5e24..e6be460752 100755 --- a/mindspore/ops/_grad/grad_math_ops.py +++ b/mindspore/ops/_grad/grad_math_ops.py @@ -155,9 +155,9 @@ def bprop_batchmatmul(self): return bprop -@bprop_getters.register(P.TensorAdd) +@bprop_getters.register(P.Add) def get_bprop_tensor_add(self): - """Grad definition for `TensorAdd` operation.""" + """Grad definition for `Add` operation.""" def bprop(x, y, out, dout): return binop_grad_common(x, y, dout, dout) diff --git a/mindspore/ops/_op_impl/akg/ascend/add.py b/mindspore/ops/_op_impl/akg/ascend/add.py index d8689eed6d..aa0e0ea762 100644 --- a/mindspore/ops/_op_impl/akg/ascend/add.py +++ b/mindspore/ops/_op_impl/akg/ascend/add.py @@ -13,10 +13,10 @@ # limitations under the License. # ============================================================================ -"""TensorAdd op""" +"""Add op""" from mindspore.ops.op_info_register import op_info_register, AkgAscendRegOp, DataType as DT -op_info = AkgAscendRegOp("TensorAdd") \ +op_info = AkgAscendRegOp("Add") \ .fusion_type("ELEMWISE") \ .input(0, "x") \ .input(1, "y") \ @@ -38,5 +38,5 @@ op_info = AkgAscendRegOp("TensorAdd") \ @op_info_register(op_info) def _add_akg(): - """TensorAdd Akg register""" + """Add Akg register""" return diff --git a/mindspore/ops/_op_impl/tbe/tensor_add.py b/mindspore/ops/_op_impl/tbe/tensor_add.py index a1f21bee77..f4bc26c62c 100644 --- a/mindspore/ops/_op_impl/tbe/tensor_add.py +++ b/mindspore/ops/_op_impl/tbe/tensor_add.py @@ -13,10 +13,10 @@ # limitations under the License. # ============================================================================ -"""TensorAdd op""" +"""Add op""" from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType -tensor_add_op_info = TBERegOp("TensorAdd") \ +tensor_add_op_info = TBERegOp("Add") \ .fusion_type("ELEMWISE") \ .async_flag(False) \ .binfile_name("add.so") \ diff --git a/mindspore/ops/_op_impl/tbe/tensor_add_ds.py b/mindspore/ops/_op_impl/tbe/tensor_add_ds.py index 3a39acaa7c..604bb3c196 100644 --- a/mindspore/ops/_op_impl/tbe/tensor_add_ds.py +++ b/mindspore/ops/_op_impl/tbe/tensor_add_ds.py @@ -16,7 +16,7 @@ """TensorAdd op""" from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType -tensor_add_op_info = TBERegOp("TensorAdd") \ +tensor_add_op_info = TBERegOp("Add") \ .fusion_type("ELEMWISE") \ .async_flag(False) \ .binfile_name("add.so") \ diff --git a/mindspore/ops/composite/base.py b/mindspore/ops/composite/base.py index 3ceb69fac0..79bf49a59b 100644 --- a/mindspore/ops/composite/base.py +++ b/mindspore/ops/composite/base.py @@ -395,7 +395,7 @@ class MultitypeFuncGraph(MultitypeFuncGraph_): >>> from mindspore.ops import Primitive, operations as P >>> from mindspore import dtype as mstype >>> - >>> tensor_add = P.TensorAdd() + >>> tensor_add = P.Add() >>> add = MultitypeFuncGraph('add') >>> @add.register("Number", "Number") ... def add_scala(x, y): diff --git a/mindspore/ops/functional.py b/mindspore/ops/functional.py index 670af6fab0..792431b286 100644 --- a/mindspore/ops/functional.py +++ b/mindspore/ops/functional.py @@ -47,7 +47,7 @@ merge = P.Merge() geswitch = P.GeSwitch() addn = P.AddN() absolute = P.Abs() -tensor_add = P.TensorAdd() +tensor_add = P.Add() neg_tensor = P.Neg() tensor_lt = P.Less() tensor_le = P.LessEqual() diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 337c39acdb..4c2f76aab1 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -54,7 +54,7 @@ from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AccumulateNV2, AssignAdd, A NPUGetFloatStatus, Pow, RealDiv, IsNan, IsInf, IsFinite, FloatStatus, Reciprocal, CumSum, HistogramFixedWidth, SquaredDifference, Xdivy, Xlogy, Sin, Sqrt, Rsqrt, BesselI0e, BesselI1e, TruncateDiv, TruncateMod, - Square, Sub, TensorAdd, Sign, Round, SquareSumAll, Atan, Atanh, Cosh, Sinh, Eps, Tan, + Square, Sub, TensorAdd, Add, Sign, Round, SquareSumAll, Atan, Atanh, Cosh, Sinh, Eps, Tan, MatrixInverse) from .random_ops import (RandomChoiceWithMask, StandardNormal, Gamma, Poisson, UniformInt, UniformReal, @@ -102,6 +102,7 @@ __all__ = [ 'Sort', 'EditDistance', 'CropAndResize', + 'Add', 'TensorAdd', 'Argmax', 'Argmin', diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index a300a5411d..182b6059a1 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -25,6 +25,7 @@ import numbers import numpy as np +from mindspore import log as logger from .._utils import get_concat_offset from ..operations.math_ops import _infer_shape_reduce from ..primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op diff --git a/mindspore/ops/operations/control_ops.py b/mindspore/ops/operations/control_ops.py index 336c1f42b0..b67669f588 100644 --- a/mindspore/ops/operations/control_ops.py +++ b/mindspore/ops/operations/control_ops.py @@ -106,7 +106,7 @@ class GeSwitch(PrimitiveWithInfer): ... def __init__(self): ... super(Net, self).__init__() ... self.square = ops.Square() - ... self.add = ops.TensorAdd() + ... self.add = ops.Add() ... self.value = Tensor(np.full((1), 3), mindspore.float32) ... self.switch = ops.GeSwitch() ... self.merge = ops.Merge() diff --git a/mindspore/ops/operations/debug_ops.py b/mindspore/ops/operations/debug_ops.py index baee3da117..5bf6bfc13a 100644 --- a/mindspore/ops/operations/debug_ops.py +++ b/mindspore/ops/operations/debug_ops.py @@ -66,7 +66,7 @@ class ScalarSummary(PrimitiveWithInfer): ... def __init__(self,): ... super(SummaryDemo, self).__init__() ... self.summary = ops.ScalarSummary() - ... self.add = ops.TensorAdd() + ... self.add = ops.Add() ... ... def construct(self, x, y): ... name = "x" @@ -149,7 +149,7 @@ class TensorSummary(PrimitiveWithInfer): ... def __init__(self,): ... super(SummaryDemo, self).__init__() ... self.summary = ops.TensorSummary() - ... self.add = ops.TensorAdd() + ... self.add = ops.Add() ... ... def construct(self, x, y): ... x = self.add(x, y) @@ -191,7 +191,7 @@ class HistogramSummary(PrimitiveWithInfer): ... def __init__(self,): ... super(SummaryDemo, self).__init__() ... self.summary = ops.HistogramSummary() - ... self.add = ops.TensorAdd() + ... self.add = ops.Add() ... ... def construct(self, x, y): ... x = self.add(x, y) @@ -409,7 +409,7 @@ class Assert(PrimitiveWithInfer): ... def __init__(self): ... super(AssertDemo, self).__init__() ... self.assert1 = ops.Assert(summarize=10) - ... self.add = ops.TensorAdd() + ... self.add = ops.Add() ... ... def construct(self, x, y): ... data = self.add(x, y) diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 502e347f9e..f281282677 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -18,6 +18,7 @@ import copy import numpy as np +from mindspore import log as logger from ... import context from .. import signature as sig from ..._checkparam import Validator as validator @@ -114,7 +115,7 @@ class _BitwiseBinaryOp(_MathBinaryOp): return _BitwiseBinaryOp._check_bitwise_op_input_type(x1_type, x2_type, self.name) -class TensorAdd(_MathBinaryOp): +class Add(_MathBinaryOp): r""" Adds two input tensors element-wise. @@ -143,7 +144,7 @@ class TensorAdd(_MathBinaryOp): ``Ascend`` ``GPU`` ``CPU`` Examples: - >>> add = ops.TensorAdd() + >>> add = ops.Add() >>> input_x = Tensor(np.array([1, 2, 3]).astype(np.float32)) >>> input_y = Tensor(np.array([4, 5, 6]).astype(np.float32)) >>> output = add(input_x, input_y) @@ -160,6 +161,10 @@ class TensorAdd(_MathBinaryOp): return Tensor(out) return None +def TensorAdd(): + """Warning: This will be changed later""" + logger.warning("WARN_DEPRECATED: The usage of TensorAdd is deprecated. Please use Add.") + return Add() class AssignAdd(PrimitiveWithInfer): """ diff --git a/model_zoo/official/cv/centerface/src/mobile_v2.py b/model_zoo/official/cv/centerface/src/mobile_v2.py index 2b84a002a3..367225c727 100644 --- a/model_zoo/official/cv/centerface/src/mobile_v2.py +++ b/model_zoo/official/cv/centerface/src/mobile_v2.py @@ -16,7 +16,7 @@ import mindspore.nn as nn from mindspore.ops import operations as P -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add from src.var_init import KaimingNormal @@ -91,7 +91,7 @@ class InvertedResidual(nn.Cell): ]) self.conv = nn.SequentialCell(layers) - self.add = TensorAdd() + self.add = Add() self.cast = P.Cast() def construct(self, x): diff --git a/model_zoo/official/cv/cnnctc/src/cnn_ctc.py b/model_zoo/official/cv/cnnctc/src/cnn_ctc.py index cd8a2f63ef..c9f76a54ca 100644 --- a/model_zoo/official/cv/cnnctc/src/cnn_ctc.py +++ b/model_zoo/official/cv/cnnctc/src/cnn_ctc.py @@ -198,7 +198,7 @@ class BasicBlock(nn.Cell): self.bn2 = ms_fused_bn(planes) self.relu = P.ReLU() self.downsample = downsample - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): residual = x diff --git a/model_zoo/official/cv/deeplabv3/src/nets/deeplab_v3/deeplab_v3.py b/model_zoo/official/cv/deeplabv3/src/nets/deeplab_v3/deeplab_v3.py index 2895a34309..354ec9a133 100644 --- a/model_zoo/official/cv/deeplabv3/src/nets/deeplab_v3/deeplab_v3.py +++ b/model_zoo/official/cv/deeplabv3/src/nets/deeplab_v3/deeplab_v3.py @@ -102,7 +102,7 @@ class Bottleneck(nn.Cell): self.relu = nn.ReLU() self.downsample = downsample - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/model_zoo/official/cv/faster_rcnn/src/FasterRcnn/resnet50.py b/model_zoo/official/cv/faster_rcnn/src/FasterRcnn/resnet50.py index cf8f8e8387..c5ac05c89b 100644 --- a/model_zoo/official/cv/faster_rcnn/src/FasterRcnn/resnet50.py +++ b/model_zoo/official/cv/faster_rcnn/src/FasterRcnn/resnet50.py @@ -222,7 +222,7 @@ class ResidualBlockUsing(nn.Cell): self.bn_down_sample = self.bn_down_sample.set_train() if not weights_update: self.conv_down_sample.weight.requires_grad = False - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/model_zoo/official/cv/maskrcnn/src/maskrcnn/resnet50.py b/model_zoo/official/cv/maskrcnn/src/maskrcnn/resnet50.py index 32be5478ff..26f520e850 100644 --- a/model_zoo/official/cv/maskrcnn/src/maskrcnn/resnet50.py +++ b/model_zoo/official/cv/maskrcnn/src/maskrcnn/resnet50.py @@ -218,7 +218,7 @@ class ResidualBlockUsing(nn.Cell): self.bn_down_sample = self.bn_down_sample.set_train() if not weights_update: self.conv_down_sample.weight.requires_grad = False - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/model_zoo/official/cv/mobilenetv2/src/mobilenetV2.py b/model_zoo/official/cv/mobilenetv2/src/mobilenetV2.py index 1ea94c64d0..de4bdd672b 100644 --- a/model_zoo/official/cv/mobilenetv2/src/mobilenetV2.py +++ b/model_zoo/official/cv/mobilenetv2/src/mobilenetV2.py @@ -16,7 +16,7 @@ import numpy as np import mindspore.nn as nn from mindspore.ops import operations as P -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add from mindspore import Tensor __all__ = ['MobileNetV2', 'MobileNetV2Backbone', 'MobileNetV2Head', 'mobilenet_v2'] @@ -129,7 +129,7 @@ class InvertedResidual(nn.Cell): nn.BatchNorm2d(oup), ]) self.conv = nn.SequentialCell(layers) - self.add = TensorAdd() + self.add = Add() self.cast = P.Cast() def construct(self, x): diff --git a/model_zoo/official/cv/mobilenetv2/src/mobilenetV2_fusion.py b/model_zoo/official/cv/mobilenetv2/src/mobilenetV2_fusion.py index 8de440d1a2..542e210bd3 100644 --- a/model_zoo/official/cv/mobilenetv2/src/mobilenetV2_fusion.py +++ b/model_zoo/official/cv/mobilenetv2/src/mobilenetV2_fusion.py @@ -120,7 +120,7 @@ class InvertedResidual(nn.Cell): nn.Conv2dBnAct(hidden_dim, oup, kernel_size=1, stride=1, pad_mode='pad', padding=0, group=1, has_bn=True) ]) self.conv = nn.SequentialCell(layers) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): out = self.conv(x) diff --git a/model_zoo/official/cv/mobilenetv2_quant/src/mobilenetV2.py b/model_zoo/official/cv/mobilenetv2_quant/src/mobilenetV2.py index 50d5728321..e0816b014b 100644 --- a/model_zoo/official/cv/mobilenetv2_quant/src/mobilenetV2.py +++ b/model_zoo/official/cv/mobilenetv2_quant/src/mobilenetV2.py @@ -123,7 +123,7 @@ class InvertedResidual(nn.Cell): nn.Conv2dBnAct(hidden_dim, oup, kernel_size=1, stride=1, pad_mode='pad', padding=0, group=1, has_bn=True) ]) self.conv = nn.SequentialCell(layers) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): out = self.conv(x) diff --git a/model_zoo/official/cv/mobilenetv3/src/mobilenetV3.py b/model_zoo/official/cv/mobilenetv3/src/mobilenetV3.py index 0bf59b9fb1..f370168ba5 100644 --- a/model_zoo/official/cv/mobilenetv3/src/mobilenetV3.py +++ b/model_zoo/official/cv/mobilenetv3/src/mobilenetV3.py @@ -197,7 +197,7 @@ class ResUnit(nn.Cell): padding=0, act_type=act_type, use_act=False) if num_in != num_out or stride != 1: self.use_short_cut_conv = False - self.add = P.TensorAdd() if self.use_short_cut_conv else None + self.add = P.Add() if self.use_short_cut_conv else None def construct(self, x): """construct""" diff --git a/model_zoo/official/cv/psenet/src/ETSNET/dice_loss.py b/model_zoo/official/cv/psenet/src/ETSNET/dice_loss.py index 4bfb27dd02..3616104a0c 100644 --- a/model_zoo/official/cv/psenet/src/ETSNET/dice_loss.py +++ b/model_zoo/official/cv/psenet/src/ETSNET/dice_loss.py @@ -49,7 +49,7 @@ class DiceLoss(_Loss): self.logical_or = P.LogicalOr() self.equal = P.Equal() self.zeros_like = P.ZerosLike() - self.add = P.TensorAdd() + self.add = P.Add() self.gather = P.Gather() def ohem_batch(self, scores, gt_texts, training_masks): diff --git a/model_zoo/official/cv/psenet/src/ETSNET/resnet50.py b/model_zoo/official/cv/psenet/src/ETSNET/resnet50.py index a12c4ac0ba..4067cac50b 100644 --- a/model_zoo/official/cv/psenet/src/ETSNET/resnet50.py +++ b/model_zoo/official/cv/psenet/src/ETSNET/resnet50.py @@ -61,7 +61,7 @@ class ResidualBlock(nn.Cell): kernel_size=1, stride=stride) self.bn_down_sample = _bn(out_channels, momentum=momentum) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/model_zoo/official/cv/resnet/src/resnet.py b/model_zoo/official/cv/resnet/src/resnet.py index 73ef4b5cec..ae1a382f61 100755 --- a/model_zoo/official/cv/resnet/src/resnet.py +++ b/model_zoo/official/cv/resnet/src/resnet.py @@ -152,7 +152,7 @@ class ResidualBlock(nn.Cell): else: self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride, use_se=self.use_se), _bn(out_channel)]) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/model_zoo/official/cv/resnet/src/resnet_gpu_benchmark.py b/model_zoo/official/cv/resnet/src/resnet_gpu_benchmark.py index f323a2682b..81fbe042a7 100644 --- a/model_zoo/official/cv/resnet/src/resnet_gpu_benchmark.py +++ b/model_zoo/official/cv/resnet/src/resnet_gpu_benchmark.py @@ -119,7 +119,7 @@ class ResidualBlock(nn.Cell): if self.down_sample: self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride), _bn(out_channel)]) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/model_zoo/official/cv/resnet50_quant/models/resnet_quant.py b/model_zoo/official/cv/resnet50_quant/models/resnet_quant.py index 82bbac02c3..aaf84b0569 100755 --- a/model_zoo/official/cv/resnet50_quant/models/resnet_quant.py +++ b/model_zoo/official/cv/resnet50_quant/models/resnet_quant.py @@ -85,7 +85,7 @@ class ResidualBlock(nn.Cell): self.down_sample_layer = nn.Conv2dBnAct(in_channel, out_channel, kernel_size=1, stride=stride, pad_mode='same', padding=0, has_bn=True, activation='relu') - self.add = P.TensorAdd() + self.add = P.Add() self.relu = P.ReLU() def construct(self, x): diff --git a/model_zoo/official/cv/resnet_thor/src/resnet_thor.py b/model_zoo/official/cv/resnet_thor/src/resnet_thor.py index c8512ae9d1..985d8b0fd8 100644 --- a/model_zoo/official/cv/resnet_thor/src/resnet_thor.py +++ b/model_zoo/official/cv/resnet_thor/src/resnet_thor.py @@ -215,7 +215,7 @@ class ResidualBlock(nn.Cell): frequency=frequency, batch_size=batch_size), _bn(out_channel)]) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/model_zoo/official/cv/resnet_thor/src/thor_layer.py b/model_zoo/official/cv/resnet_thor/src/thor_layer.py index e6c828017d..96746e1fa9 100644 --- a/model_zoo/official/cv/resnet_thor/src/thor_layer.py +++ b/model_zoo/official/cv/resnet_thor/src/thor_layer.py @@ -333,7 +333,7 @@ class Dense_Thor_GPU(Cell): self.gather = P.Gather() self.freq = Tensor(frequency, mstype.int32) self.axis = 0 - self.add = P.TensorAdd() + self.add = P.Add() self.sqrt = P.Sqrt() self.cholesky = P.CholeskyTrsm(split_dim=split_dim) self.vector_matmul = P.BatchMatMul(transpose_a=True) @@ -690,7 +690,7 @@ class Dense_Thor(Cell): self.exp = P.Exp() self.dampingA = Tensor(np.identity(2048), mstype.float32) self.dampingG = Tensor(np.identity(1024), mstype.float32) - self.add = P.TensorAdd() + self.add = P.Add() self.sqrt = P.Sqrt() self.getG = P.InsertGradientOf(self.save_gradient) diff --git a/model_zoo/official/cv/resnext50/src/backbone/resnet.py b/model_zoo/official/cv/resnext50/src/backbone/resnet.py index 9c880154ea..c8d87aa037 100644 --- a/model_zoo/official/cv/resnext50/src/backbone/resnet.py +++ b/model_zoo/official/cv/resnext50/src/backbone/resnet.py @@ -16,7 +16,7 @@ ResNet based ResNext """ import mindspore.nn as nn -from mindspore.ops.operations import TensorAdd, Split, Concat +from mindspore.ops.operations import Add, Split, Concat from mindspore.ops import operations as P from mindspore.common.initializer import TruncatedNormal @@ -105,7 +105,7 @@ class BasicBlock(nn.Cell): self.down_sample = down_sample self.down_sample_flag = True - self.add = TensorAdd() + self.add = Add() def construct(self, x): identity = x @@ -176,7 +176,7 @@ class Bottleneck(nn.Cell): self.down_sample_flag = True self.cast = P.Cast() - self.add = TensorAdd() + self.add = Add() def construct(self, x): identity = x diff --git a/model_zoo/official/cv/retinaface_resnet50/src/network.py b/model_zoo/official/cv/retinaface_resnet50/src/network.py index f4f08fa4c3..429739fd55 100644 --- a/model_zoo/official/cv/retinaface_resnet50/src/network.py +++ b/model_zoo/official/cv/retinaface_resnet50/src/network.py @@ -95,7 +95,7 @@ class ResidualBlock(nn.Cell): if self.down_sample: self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride), _bn(out_channel)]) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/model_zoo/official/cv/shufflenetv1/src/shufflenetv1.py b/model_zoo/official/cv/shufflenetv1/src/shufflenetv1.py index 427e07210a..d5066e9b5c 100644 --- a/model_zoo/official/cv/shufflenetv1/src/shufflenetv1.py +++ b/model_zoo/official/cv/shufflenetv1/src/shufflenetv1.py @@ -68,7 +68,7 @@ class ShuffleV1Block(nn.Cell): outputs = oup self.relu = nn.ReLU() - self.add = P.TensorAdd() + self.add = P.Add() self.concat = P.Concat(1) self.shape = P.Shape() self.transpose = P.Transpose() diff --git a/model_zoo/official/cv/squeezenet/src/squeezenet.py b/model_zoo/official/cv/squeezenet/src/squeezenet.py index dd40d8d8a2..396e7de671 100644 --- a/model_zoo/official/cv/squeezenet/src/squeezenet.py +++ b/model_zoo/official/cv/squeezenet/src/squeezenet.py @@ -170,7 +170,7 @@ class SqueezeNet_Residual(nn.Cell): self.relu = nn.ReLU() self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2) - self.add = P.TensorAdd() + self.add = P.Add() self.dropout = nn.Dropout(keep_prob=0.5) self.mean = P.ReduceMean(keep_dims=True) self.flatten = nn.Flatten() diff --git a/model_zoo/official/cv/ssd/src/ssd.py b/model_zoo/official/cv/ssd/src/ssd.py index ebaa9f6948..29fdafa053 100644 --- a/model_zoo/official/cv/ssd/src/ssd.py +++ b/model_zoo/official/cv/ssd/src/ssd.py @@ -133,7 +133,7 @@ class InvertedResidual(nn.Cell): _bn(oup), ]) self.conv = nn.SequentialCell(layers) - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.last_relu = last_relu self.relu = nn.ReLU6() diff --git a/model_zoo/official/cv/xception/src/Xception.py b/model_zoo/official/cv/xception/src/Xception.py index f9d26a0767..6aaba68689 100644 --- a/model_zoo/official/cv/xception/src/Xception.py +++ b/model_zoo/official/cv/xception/src/Xception.py @@ -68,7 +68,7 @@ class Block(nn.Cell): if strides != 1: rep.append(nn.MaxPool2d(3, strides, pad_mode="same")) self.rep = nn.SequentialCell(*rep) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, inp): x = self.rep(inp) diff --git a/model_zoo/official/cv/yolov3_darknet53/src/darknet.py b/model_zoo/official/cv/yolov3_darknet53/src/darknet.py index 7e2e04b1fd..16cdb90354 100644 --- a/model_zoo/official/cv/yolov3_darknet53/src/darknet.py +++ b/model_zoo/official/cv/yolov3_darknet53/src/darknet.py @@ -62,7 +62,7 @@ class ResidualBlock(nn.Cell): out_chls = out_channels//2 self.conv1 = conv_block(in_channels, out_chls, kernel_size=1, stride=1) self.conv2 = conv_block(out_chls, out_channels, kernel_size=3, stride=1) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/model_zoo/official/cv/yolov3_darknet53_quant/src/darknet.py b/model_zoo/official/cv/yolov3_darknet53_quant/src/darknet.py index 0123868308..0cfcd43a68 100644 --- a/model_zoo/official/cv/yolov3_darknet53_quant/src/darknet.py +++ b/model_zoo/official/cv/yolov3_darknet53_quant/src/darknet.py @@ -59,7 +59,7 @@ class ResidualBlock(nn.Cell): out_chls = out_channels//2 self.conv1 = conv_block(in_channels, out_chls, kernel_size=1, stride=1) self.conv2 = conv_block(out_chls, out_channels, kernel_size=3, stride=1) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py b/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py index 00ba9c080c..f1bfbe1455 100644 --- a/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py +++ b/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py @@ -107,7 +107,7 @@ class BasicBlock(nn.Cell): self.downsample = (in_channels != out_channels) if self.downsample: self.down_sample_layer = _conv2d(in_channels, out_channels, 1, stride=stride) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/model_zoo/official/cv/yolov4/src/cspdarknet53.py b/model_zoo/official/cv/yolov4/src/cspdarknet53.py index deecbe91cd..e58429c138 100644 --- a/model_zoo/official/cv/yolov4/src/cspdarknet53.py +++ b/model_zoo/official/cv/yolov4/src/cspdarknet53.py @@ -76,7 +76,7 @@ class ResidualBlock(nn.Cell): out_chls = out_channels self.conv1 = conv_block(in_channels, out_chls, kernel_size=1, stride=1) self.conv2 = conv_block(out_chls, out_channels, kernel_size=3, stride=1) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x @@ -111,7 +111,7 @@ class CspDarkNet53(nn.Cell): self.outchannel = 1024 self.detect = detect self.concat = P.Concat(axis=1) - self.add = P.TensorAdd() + self.add = P.Add() self.conv0 = conv_block(3, 32, kernel_size=3, stride=1) self.conv1 = conv_block(32, 64, kernel_size=3, stride=2) diff --git a/model_zoo/official/nlp/bert/src/bert_model.py b/model_zoo/official/nlp/bert/src/bert_model.py index e3dab3f3e2..05dc6862c8 100644 --- a/model_zoo/official/nlp/bert/src/bert_model.py +++ b/model_zoo/official/nlp/bert/src/bert_model.py @@ -188,7 +188,7 @@ class EmbeddingPostprocessor(nn.Cell): use_one_hot=False) self.layernorm = nn.LayerNorm((embedding_size,)) self.position_ids = Tensor(np.arange(seq).reshape(-1, seq).astype(np.int32)) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, token_type_ids, word_embeddings): """Postprocessors apply positional and token type embeddings to word embeddings.""" @@ -226,7 +226,7 @@ class BertOutput(nn.Cell): weight_init=TruncatedNormal(initializer_range)).to_float(compute_type) self.dropout = nn.Dropout(1 - dropout_prob) self.dropout_prob = dropout_prob - self.add = P.TensorAdd() + self.add = P.Add() self.layernorm = nn.LayerNorm((out_channels,)).to_float(compute_type) self.cast = P.Cast() @@ -444,7 +444,7 @@ class BertAttention(nn.Cell): if self.has_attention_mask: self.expand_dims = P.ExpandDims() self.sub = P.Sub() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.get_dtype = P.DType() if do_return_2d_tensor: diff --git a/model_zoo/official/nlp/bert_thor/src/bert_model.py b/model_zoo/official/nlp/bert_thor/src/bert_model.py index dadf16f7f6..9fbf7fe1b5 100644 --- a/model_zoo/official/nlp/bert_thor/src/bert_model.py +++ b/model_zoo/official/nlp/bert_thor/src/bert_model.py @@ -227,7 +227,7 @@ class EmbeddingPostprocessor(nn.Cell): frequency=frequency) self.position_ids = Tensor(np.arange(seq).reshape(-1, seq).astype(np.int32)) self.layernorm = nn.LayerNorm((embedding_size,)) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, token_type_ids, word_embeddings): """construct of EmbeddingPostprocessor""" @@ -275,7 +275,7 @@ class BertOutput(nn.Cell): batch_size=batch_size).to_float(compute_type) self.dropout = nn.Dropout(1 - dropout_prob) self.dropout_prob = dropout_prob - self.add = P.TensorAdd() + self.add = P.Add() self.layernorm = nn.LayerNorm((out_channels,)).to_float(compute_type) self.cast = P.Cast() @@ -522,7 +522,7 @@ class BertAttention(nn.Cell): if self.has_attention_mask: self.expand_dims = P.ExpandDims() self.sub = P.Sub() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.get_dtype = P.DType() if do_return_2d_tensor: diff --git a/model_zoo/official/nlp/gnmt_v2/src/gnmt_model/beam_search.py b/model_zoo/official/nlp/gnmt_v2/src/gnmt_model/beam_search.py index 99a0ced031..92e6096296 100644 --- a/model_zoo/official/nlp/gnmt_v2/src/gnmt_model/beam_search.py +++ b/model_zoo/official/nlp/gnmt_v2/src/gnmt_model/beam_search.py @@ -35,7 +35,7 @@ class LengthPenalty(nn.Cell): def __init__(self, weight=1.0, compute_type=mstype.float32): super(LengthPenalty, self).__init__() self.weight = weight - self.add = P.TensorAdd() + self.add = P.Add() self.pow = P.Pow() self.div = P.RealDiv() self.five = Tensor(5.0, mstype.float32) @@ -188,7 +188,7 @@ class BeamSearchDecoder(nn.Cell): self.decoder = decoder self.is_using_while = is_using_while - self.add = P.TensorAdd() + self.add = P.Add() self.expand = P.ExpandDims() self.reshape = P.Reshape() self.shape_flat = (-1,) diff --git a/model_zoo/official/nlp/mass/src/transformer/beam_search.py b/model_zoo/official/nlp/mass/src/transformer/beam_search.py index dfd8b26b50..61d1599963 100644 --- a/model_zoo/official/nlp/mass/src/transformer/beam_search.py +++ b/model_zoo/official/nlp/mass/src/transformer/beam_search.py @@ -36,7 +36,7 @@ class LengthPenalty(nn.Cell): super(LengthPenalty, self).__init__() self.weight = weight - self.add = P.TensorAdd() + self.add = P.Add() self.pow = P.Pow() self.div = P.RealDiv() @@ -178,7 +178,7 @@ class BeamSearchDecoder(nn.Cell): self.decoder = decoder - self.add = P.TensorAdd() + self.add = P.Add() self.expand = P.ExpandDims() self.reshape = P.Reshape() self.shape_flat = (-1,) diff --git a/model_zoo/official/nlp/mass/src/transformer/multi_head_attention.py b/model_zoo/official/nlp/mass/src/transformer/multi_head_attention.py index d7db71b872..4871066d81 100644 --- a/model_zoo/official/nlp/mass/src/transformer/multi_head_attention.py +++ b/model_zoo/official/nlp/mass/src/transformer/multi_head_attention.py @@ -138,7 +138,7 @@ class MultiHeadAttention(nn.Cell): if self.has_attention_mask: self.expand_dims = P.ExpandDims() self.sub = P.Sub() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.get_dtype = P.DType() diff --git a/model_zoo/official/nlp/mass/src/transformer/positional_embedding.py b/model_zoo/official/nlp/mass/src/transformer/positional_embedding.py index 7714e9f620..3d89429898 100644 --- a/model_zoo/official/nlp/mass/src/transformer/positional_embedding.py +++ b/model_zoo/official/nlp/mass/src/transformer/positional_embedding.py @@ -64,7 +64,7 @@ class PositionalEmbedding(nn.Cell): embedding_size, max_position_embeddings=512): super(PositionalEmbedding, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.expand_dims = P.ExpandDims() self.position_embedding_table = Tensor( position_encoding(max_position_embeddings, embedding_size), diff --git a/model_zoo/official/nlp/mass/src/transformer/residual_conn.py b/model_zoo/official/nlp/mass/src/transformer/residual_conn.py index ba88ce9966..4b2a8891a5 100644 --- a/model_zoo/official/nlp/mass/src/transformer/residual_conn.py +++ b/model_zoo/official/nlp/mass/src/transformer/residual_conn.py @@ -30,7 +30,7 @@ class ResidualConnection(nn.Cell): def __init__(self, dropout_prob=0.1): super(ResidualConnection, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.dropout = nn.Dropout(1.0 - dropout_prob) def construct(self, hidden_tensor, residual): diff --git a/model_zoo/official/nlp/prophetnet/src/transformer/positional_embedding.py b/model_zoo/official/nlp/prophetnet/src/transformer/positional_embedding.py index 7714e9f620..3d89429898 100644 --- a/model_zoo/official/nlp/prophetnet/src/transformer/positional_embedding.py +++ b/model_zoo/official/nlp/prophetnet/src/transformer/positional_embedding.py @@ -64,7 +64,7 @@ class PositionalEmbedding(nn.Cell): embedding_size, max_position_embeddings=512): super(PositionalEmbedding, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.expand_dims = P.ExpandDims() self.position_embedding_table = Tensor( position_encoding(max_position_embeddings, embedding_size), diff --git a/model_zoo/official/nlp/prophetnet/src/transformer/residual_conn.py b/model_zoo/official/nlp/prophetnet/src/transformer/residual_conn.py index 9d75a9b0c2..5cf887f894 100644 --- a/model_zoo/official/nlp/prophetnet/src/transformer/residual_conn.py +++ b/model_zoo/official/nlp/prophetnet/src/transformer/residual_conn.py @@ -30,7 +30,7 @@ class ResidualConnection(nn.Cell): def __init__(self, dropout_prob=0.1): super(ResidualConnection, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.dropout = nn.Dropout(1 - dropout_prob) def construct(self, hidden_tensor, residual): diff --git a/model_zoo/official/nlp/tinybert/src/tinybert_model.py b/model_zoo/official/nlp/tinybert/src/tinybert_model.py index c36de53c95..3a3ef8f952 100644 --- a/model_zoo/official/nlp/tinybert/src/tinybert_model.py +++ b/model_zoo/official/nlp/tinybert/src/tinybert_model.py @@ -232,7 +232,7 @@ class BertOutput(nn.Cell): self.dense = nn.Dense(in_channels, out_channels, weight_init=TruncatedNormal(initializer_range)).to_float(compute_type) self.dropout = nn.Dropout(1 - dropout_prob) - self.add = P.TensorAdd() + self.add = P.Add() self.is_gpu = context.get_context('device_target') == "GPU" if self.is_gpu: self.layernorm = nn.LayerNorm((out_channels,)).to_float(mstype.float32) @@ -451,7 +451,7 @@ class BertAttention(nn.Cell): if self.has_attention_mask: self.expand_dims = P.ExpandDims() self.sub = P.Sub() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.get_dtype = P.DType() if do_return_2d_tensor: diff --git a/model_zoo/official/nlp/transformer/src/beam_search.py b/model_zoo/official/nlp/transformer/src/beam_search.py index 1157cb259a..e194f70428 100644 --- a/model_zoo/official/nlp/transformer/src/beam_search.py +++ b/model_zoo/official/nlp/transformer/src/beam_search.py @@ -35,7 +35,7 @@ class LengthPenalty(nn.Cell): compute_type=mstype.float32): super(LengthPenalty, self).__init__() self.weight = weight - self.add = P.TensorAdd() + self.add = P.Add() self.pow = P.Pow() self.div = P.RealDiv() self.cast = P.Cast() @@ -142,7 +142,7 @@ class BeamSearchDecoder(nn.Cell): self.max_decode_length = max_decode_length self.decoder = decoder - self.add = P.TensorAdd() + self.add = P.Add() self.expand = P.ExpandDims() self.reshape = P.Reshape() self.shape_flat = (-1,) diff --git a/model_zoo/official/nlp/transformer/src/transformer_model.py b/model_zoo/official/nlp/transformer/src/transformer_model.py index d98033199c..fcadd7ec2b 100644 --- a/model_zoo/official/nlp/transformer/src/transformer_model.py +++ b/model_zoo/official/nlp/transformer/src/transformer_model.py @@ -188,7 +188,7 @@ class EmbeddingPostprocessor(nn.Cell): super(EmbeddingPostprocessor, self).__init__() self.scores_mul = Tensor([math.sqrt(float(embedding_size))], dtype=mstype.float32) self.multiply = P.Mul() - self.add = P.TensorAdd() + self.add = P.Add() self.dropout = nn.Dropout(1 - dropout_prob, dtype=mstype.float32) self.use_dropout = dropout_prob > 0 self.expand_dims = P.ExpandDims() @@ -251,7 +251,7 @@ class LayerPostprocess(nn.Cell): def __init__(self, dropout_prob=0.1): super(LayerPostprocess, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.dropout = nn.Dropout(1 - dropout_prob) self.use_dropout = dropout_prob > 0 @@ -357,7 +357,7 @@ class MultiheadAttention(nn.Cell): if self.has_attention_mask: self.expand_dims = P.ExpandDims() self.sub = P.Sub() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.get_dtype = P.DType() diff --git a/model_zoo/research/audio/deepspeech2/src/deepspeech2.py b/model_zoo/research/audio/deepspeech2/src/deepspeech2.py index 5851e03a41..bee25b51d5 100644 --- a/model_zoo/research/audio/deepspeech2/src/deepspeech2.py +++ b/model_zoo/research/audio/deepspeech2/src/deepspeech2.py @@ -207,7 +207,7 @@ class DeepSpeechModel(nn.Cell): self.reshape_op = P.Reshape() self.shape_op = P.Shape() self.transpose_op = P.Transpose() - self.add = P.TensorAdd() + self.add = P.Add() self.div = P.Div() sample_rate = self.audio_conf.sample_rate diff --git a/model_zoo/research/audio/wavenet/wavenet_vocoder/modules.py b/model_zoo/research/audio/wavenet/wavenet_vocoder/modules.py index 3f97f4a073..0e05895b62 100644 --- a/model_zoo/research/audio/wavenet/wavenet_vocoder/modules.py +++ b/model_zoo/research/audio/wavenet/wavenet_vocoder/modules.py @@ -75,7 +75,7 @@ class ResidualConv1dGLU(nn.Cell): self.tanh = P.Tanh() self.sigmoid = P.Sigmoid() self.mul = P.Mul() - self.add = P.TensorAdd() + self.add = P.Add() if skip_out_channels is None: skip_out_channels = residual_channels diff --git a/model_zoo/research/cv/FaceAttribute/src/FaceAttribute/resnet18.py b/model_zoo/research/cv/FaceAttribute/src/FaceAttribute/resnet18.py index 9a77806a3c..15ffc59441 100644 --- a/model_zoo/research/cv/FaceAttribute/src/FaceAttribute/resnet18.py +++ b/model_zoo/research/cv/FaceAttribute/src/FaceAttribute/resnet18.py @@ -14,7 +14,7 @@ # ============================================================================ """Face attribute resnet18 backbone.""" import mindspore.nn as nn -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add from mindspore.ops import operations as P from mindspore.nn import Cell @@ -41,7 +41,7 @@ class IRBlock(Cell): else: self.downsample = downsample - self.add = TensorAdd() + self.add = Add() self.cast = P.Cast() self.relu2 = P.ReLU() diff --git a/model_zoo/research/cv/FaceAttribute/src/FaceAttribute/resnet18_softmax.py b/model_zoo/research/cv/FaceAttribute/src/FaceAttribute/resnet18_softmax.py index f73a45ab73..8c0e68d940 100644 --- a/model_zoo/research/cv/FaceAttribute/src/FaceAttribute/resnet18_softmax.py +++ b/model_zoo/research/cv/FaceAttribute/src/FaceAttribute/resnet18_softmax.py @@ -14,7 +14,7 @@ # ============================================================================ """Face attribute resnet18 backbone.""" import mindspore.nn as nn -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add from mindspore.ops import operations as P from mindspore.nn import Cell @@ -41,7 +41,7 @@ class IRBlock(Cell): else: self.downsample = downsample - self.add = TensorAdd() + self.add = Add() self.cast = P.Cast() self.relu2 = P.ReLU() diff --git a/model_zoo/research/cv/FaceQualityAssessment/src/face_qa.py b/model_zoo/research/cv/FaceQualityAssessment/src/face_qa.py index ac4d2fcd03..4406c79049 100644 --- a/model_zoo/research/cv/FaceQualityAssessment/src/face_qa.py +++ b/model_zoo/research/cv/FaceQualityAssessment/src/face_qa.py @@ -14,7 +14,7 @@ # ============================================================================ """Face Quality Assessment backbone.""" import mindspore.nn as nn -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add from mindspore.ops import operations as P from mindspore.nn import Dense, Cell @@ -78,7 +78,7 @@ class Block1(Cell): self.bk1_relu4 = P.ReLU() self.cast = P.Cast() - self.add = TensorAdd() + self.add = Add() def construct(self, x): '''construct''' @@ -129,7 +129,7 @@ class Block2(Cell): self.bk2_relu4 = P.ReLU() self.cast = P.Cast() - self.add = TensorAdd() + self.add = Add() def construct(self, x): '''construct''' diff --git a/model_zoo/research/cv/FaceRecognition/src/backbone/resnet.py b/model_zoo/research/cv/FaceRecognition/src/backbone/resnet.py index e71d66e55b..87d5f4d6c2 100644 --- a/model_zoo/research/cv/FaceRecognition/src/backbone/resnet.py +++ b/model_zoo/research/cv/FaceRecognition/src/backbone/resnet.py @@ -17,7 +17,7 @@ import math import numpy as np import mindspore.nn as nn from mindspore.nn import Cell -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add from mindspore.ops import operations as P from mindspore.common.initializer import initializer from mindspore.common import dtype as mstype @@ -96,7 +96,7 @@ class IRBlock(Cell): self.use_se = use_se if use_se == 1: self.se = SEBlock(planes, act_type=act_type) - self.add = TensorAdd() + self.add = Add() self.cast = P.Cast() def construct(self, x): diff --git a/model_zoo/research/cv/FaceRecognitionForTracking/src/reid.py b/model_zoo/research/cv/FaceRecognitionForTracking/src/reid.py index 60c851b984..3495f2895e 100644 --- a/model_zoo/research/cv/FaceRecognitionForTracking/src/reid.py +++ b/model_zoo/research/cv/FaceRecognitionForTracking/src/reid.py @@ -16,7 +16,7 @@ import math import mindspore.nn as nn -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add from mindspore.ops import operations as P from mindspore.ops import functional as F from mindspore.nn import Dense, Cell @@ -76,7 +76,7 @@ class BaseBlock(Cell): self.relu2 = P.ReLU() self.cast = P.Cast() - self.add = TensorAdd() + self.add = Add() def construct(self, x): '''Construct function.''' diff --git a/model_zoo/research/cv/FaceRecognitionForTracking/src/reid_for_export.py b/model_zoo/research/cv/FaceRecognitionForTracking/src/reid_for_export.py index adfb496fe4..517feb347a 100644 --- a/model_zoo/research/cv/FaceRecognitionForTracking/src/reid_for_export.py +++ b/model_zoo/research/cv/FaceRecognitionForTracking/src/reid_for_export.py @@ -16,7 +16,7 @@ import math import mindspore.nn as nn -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add from mindspore.ops import operations as P from mindspore.ops import functional as F from mindspore.nn import Dense, Cell @@ -77,7 +77,7 @@ class BaseBlock(Cell): self.relu2 = P.ReLU() self.cast = P.Cast() - self.add = TensorAdd() + self.add = Add() def construct(self, x): '''Construct function.''' diff --git a/model_zoo/research/cv/centernet/src/decode.py b/model_zoo/research/cv/centernet/src/decode.py index c41a644d31..5d4e248cac 100644 --- a/model_zoo/research/cv/centernet/src/decode.py +++ b/model_zoo/research/cv/centernet/src/decode.py @@ -330,7 +330,7 @@ class MultiPoseDecode(nn.Cell): self.trans_gather_feature = TransposeGatherFeature() self.expand_dims = ops.ExpandDims() self.reshape = ops.Reshape() - self.add = ops.TensorAdd() + self.add = ops.Add() self.dtype = ops.DType() self.cast = ops.Cast() self.thresh = 0.1 diff --git a/model_zoo/research/cv/ghostnet/src/ghostnet.py b/model_zoo/research/cv/ghostnet/src/ghostnet.py index 14e8613f83..c007118e2b 100644 --- a/model_zoo/research/cv/ghostnet/src/ghostnet.py +++ b/model_zoo/research/cv/ghostnet/src/ghostnet.py @@ -269,7 +269,7 @@ class GhostBottleneck(nn.Cell): ConvUnit(num_in, num_out, kernel_size=1, stride=1, padding=0, num_groups=1, use_act=False), ]) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): r"""construct of ghostnet""" diff --git a/model_zoo/research/cv/ghostnet/src/ghostnet600.py b/model_zoo/research/cv/ghostnet/src/ghostnet600.py index 699e234e9f..c9ca17627f 100644 --- a/model_zoo/research/cv/ghostnet/src/ghostnet600.py +++ b/model_zoo/research/cv/ghostnet/src/ghostnet600.py @@ -269,7 +269,7 @@ class GhostBottleneck(nn.Cell): ConvUnit(num_in, num_out, kernel_size=1, stride=1, padding=0, num_groups=1, use_act=False), ]) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): """construct""" diff --git a/model_zoo/research/cv/ghostnet_quant/src/ghostnet.py b/model_zoo/research/cv/ghostnet_quant/src/ghostnet.py index 1ae33d39d5..002c7db42a 100644 --- a/model_zoo/research/cv/ghostnet_quant/src/ghostnet.py +++ b/model_zoo/research/cv/ghostnet_quant/src/ghostnet.py @@ -270,7 +270,7 @@ class GhostBottleneck(nn.Cell): ConvUnit(num_in, num_out, kernel_size=1, stride=1, padding=0, num_groups=1, use_act=False), ]) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): r"""construct of GhostNet BottleNeck""" diff --git a/model_zoo/research/cv/resnet50_adv_pruning/src/resnet_imgnet.py b/model_zoo/research/cv/resnet50_adv_pruning/src/resnet_imgnet.py index 3c086895c2..75f9e3f18f 100644 --- a/model_zoo/research/cv/resnet50_adv_pruning/src/resnet_imgnet.py +++ b/model_zoo/research/cv/resnet50_adv_pruning/src/resnet_imgnet.py @@ -109,7 +109,7 @@ class ResidualBlock(nn.Cell): if self.down_sample: self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride), _bn(out_channel)]) - self.add = P.TensorAdd() + self.add = P.Add() self.op = P.ScatterNd() self.transpose = P.Transpose() diff --git a/model_zoo/research/cv/squeezenet/src/squeezenet.py b/model_zoo/research/cv/squeezenet/src/squeezenet.py index dd40d8d8a2..396e7de671 100644 --- a/model_zoo/research/cv/squeezenet/src/squeezenet.py +++ b/model_zoo/research/cv/squeezenet/src/squeezenet.py @@ -170,7 +170,7 @@ class SqueezeNet_Residual(nn.Cell): self.relu = nn.ReLU() self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2) - self.add = P.TensorAdd() + self.add = P.Add() self.dropout = nn.Dropout(keep_prob=0.5) self.mean = P.ReduceMean(keep_dims=True) self.flatten = nn.Flatten() diff --git a/model_zoo/research/cv/ssd_ghostnet/src/ssd_ghostnet.py b/model_zoo/research/cv/ssd_ghostnet/src/ssd_ghostnet.py index ccfbece691..a1a815b91b 100644 --- a/model_zoo/research/cv/ssd_ghostnet/src/ssd_ghostnet.py +++ b/model_zoo/research/cv/ssd_ghostnet/src/ssd_ghostnet.py @@ -273,7 +273,7 @@ class GhostBottleneck(nn.Cell): ConvBNReLU(num_in, num_out, kernel_size=1, stride=1, groups=1, use_act=False), ]) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): """construct""" @@ -345,7 +345,7 @@ class InvertedResidual(nn.Cell): _bn(oup), ]) self.conv = nn.SequentialCell(layers) - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.last_relu = last_relu self.relu = nn.ReLU6() diff --git a/model_zoo/research/nlp/ternarybert/src/tinybert_model.py b/model_zoo/research/nlp/ternarybert/src/tinybert_model.py index 9b7f4fecd7..afe06c7a97 100644 --- a/model_zoo/research/nlp/ternarybert/src/tinybert_model.py +++ b/model_zoo/research/nlp/ternarybert/src/tinybert_model.py @@ -439,7 +439,7 @@ class BertOutput(nn.Cell): self.dense = nn.Dense(in_channels, out_channels, weight_init=TruncatedNormal(initializer_range)).to_float(compute_type) self.dropout = nn.Dropout(1 - dropout_prob) - self.add = P.TensorAdd() + self.add = P.Add() self.is_gpu = context.get_context('device_target') == "GPU" if self.is_gpu: self.layernorm = nn.LayerNorm((out_channels,)).to_float(mstype.float32) @@ -686,7 +686,7 @@ class BertAttention(nn.Cell): if self.has_attention_mask: self.expand_dims = P.ExpandDims() self.sub = P.Sub() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.get_dtype = P.DType() if do_return_2d_tensor: diff --git a/tests/mindspore_test_framework/apps/bert_attention_submodules.py b/tests/mindspore_test_framework/apps/bert_attention_submodules.py index 83729d9e70..9b1382fbb9 100644 --- a/tests/mindspore_test_framework/apps/bert_attention_submodules.py +++ b/tests/mindspore_test_framework/apps/bert_attention_submodules.py @@ -173,7 +173,7 @@ class BertAttentionMask(nn.Cell): if self.has_attention_mask: self.expand_dims = P.ExpandDims() self.sub = P.Sub() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.get_dtype = P.DType() @@ -204,7 +204,7 @@ class BertAttentionMaskBackward(nn.Cell): if self.has_attention_mask: self.expand_dims = P.ExpandDims() self.sub = P.Sub() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.get_dtype = P.DType() diff --git a/tests/mindspore_test_framework/pipeline/forward/compare_forward.py b/tests/mindspore_test_framework/pipeline/forward/compare_forward.py index 485af12c15..49c6050e69 100644 --- a/tests/mindspore_test_framework/pipeline/forward/compare_forward.py +++ b/tests/mindspore_test_framework/pipeline/forward/compare_forward.py @@ -35,8 +35,8 @@ The pipeline is suitable for configs in a case-by-case style. Example: verification_set = [ - ('TensorAdd', { - 'block': (P.TensorAdd(), {'reduce_output': False}), + ('Add', { + 'block': (P.Add(), {'reduce_output': False}), 'desc_inputs': [[1, 3, 3, 4], [1, 3, 3, 4]], 'desc_bprop': [[1, 3, 3, 4]], 'desc_expect': { @@ -57,8 +57,8 @@ The pipeline is suitable for configs in a case-by-case style. Example: verification_set = [ - ('TensorAdd', { - 'block': (P.TensorAdd(), {'reduce_output': False}), + ('Add', { + 'block': (P.Add(), {'reduce_output': False}), 'desc_inputs': [[1, 3, 3, 4], [1, 3, 3, 4]], 'desc_bprop': [[1, 3, 3, 4]], 'desc_expect': { @@ -83,7 +83,7 @@ Example: { 'id':'add', 'group':'op-test', - 'block':(P.TensorAdd(), {'reduce_output': False}), + 'block':(P.Add(), {'reduce_output': False}), } ], 'inputs': [ @@ -122,7 +122,7 @@ Example: { 'id':'add', 'group':'op-test', - 'block':(P.TensorAdd(), {'reduce_output': False}), + 'block':(P.Add(), {'reduce_output': False}), } ], 'inputs': [ diff --git a/tests/mindspore_test_framework/pipeline/forward/compile_forward.py b/tests/mindspore_test_framework/pipeline/forward/compile_forward.py index b88fda7e4e..d281524418 100644 --- a/tests/mindspore_test_framework/pipeline/forward/compile_forward.py +++ b/tests/mindspore_test_framework/pipeline/forward/compile_forward.py @@ -31,8 +31,8 @@ The pipeline is suitable for config in a case-by-case style. Example: Examples: verification_set = [ - ('TensorAdd', { - 'block': (P.TensorAdd(), {'reduce_output': False}), + ('Add', { + 'block': (P.Add(), {'reduce_output': False}), 'desc_inputs': [[1, 3, 3, 4], [1, 3, 3, 4]], 'desc_bprop': [[1, 3, 3, 4]], }) @@ -49,8 +49,8 @@ The pipeline is suitable for config in a case-by-case style. Example: Examples: verification_set = [ - ('TensorAdd', { - 'block': (P.TensorAdd(), {'reduce_output': False}), + ('Add', { + 'block': (P.Add(), {'reduce_output': False}), 'desc_inputs': [[1, 3, 3, 4], [1, 3, 3, 4]], 'desc_bprop': [[1, 3, 3, 4]], }) diff --git a/tests/mindspore_test_framework/pipeline/gradient/compare_gradient.py b/tests/mindspore_test_framework/pipeline/gradient/compare_gradient.py index 53483f3876..0a3bb31a6d 100644 --- a/tests/mindspore_test_framework/pipeline/gradient/compare_gradient.py +++ b/tests/mindspore_test_framework/pipeline/gradient/compare_gradient.py @@ -37,8 +37,8 @@ case-by-case style config. Example: verification_set = [ - ('TensorAdd', { - 'block': (P.TensorAdd(), {'reduce_output': False}), + ('Add', { + 'block': (P.Add(), {'reduce_output': False}), 'desc_inputs': [[1, 3, 3, 4], [1, 3, 3, 4]], 'desc_bprop': [[1, 3, 3, 4]], 'desc_expect': { @@ -139,8 +139,8 @@ suitable for case-by-case style config. Example: verification_set = [ - ('TensorAdd', { - 'block': (P.TensorAdd(), {'reduce_output': False}), + ('Add', { + 'block': (P.Add(), {'reduce_output': False}), 'desc_inputs': [[1, 3, 3, 4], [1, 3, 3, 4]], 'desc_bprop': [[1, 3, 3, 4]] }) @@ -228,8 +228,8 @@ config in a case-by-case style. Example: verification_set = [ - ('TensorAdd', { - 'block': (P.TensorAdd(), {'reduce_output': False}), + ('Add', { + 'block': (P.Add(), {'reduce_output': False}), 'desc_inputs': [[1, 3, 3, 4], [1, 3, 3, 4]], 'desc_bprop': [[1, 3, 3, 4]], 'desc_expect': { diff --git a/tests/mindspore_test_framework/pipeline/gradient/compile_gradient.py b/tests/mindspore_test_framework/pipeline/gradient/compile_gradient.py index 2ce6160152..4ee52110be 100644 --- a/tests/mindspore_test_framework/pipeline/gradient/compile_gradient.py +++ b/tests/mindspore_test_framework/pipeline/gradient/compile_gradient.py @@ -28,8 +28,8 @@ Check if compiling gradient anf graph is ok. This pipeline is suitable for case- Example: verification_set = [ - ('TensorAdd', { - 'block': (P.TensorAdd(), {'reduce_output': False}), + ('Add', { + 'block': (P.Add(), {'reduce_output': False}), 'desc_inputs': [[1, 3, 3, 4], [1, 3, 3, 4]], 'desc_bprop': [[1, 3, 3, 4]] }) @@ -44,8 +44,8 @@ Check if compiling gradient ge graph is ok. This pipeline is suitable for case-b Example: verification_set = [ - ('TensorAdd', { - 'block': (P.TensorAdd(), {'reduce_output': False}), + ('Add', { + 'block': (P.Add(), {'reduce_output': False}), 'desc_inputs': [[1, 3, 3, 4], [1, 3, 3, 4]], 'desc_bprop': [[1, 3, 3, 4]] }) diff --git a/tests/perf_test/resnet_example.py b/tests/perf_test/resnet_example.py index 34413109de..c034e4c5a1 100644 --- a/tests/perf_test/resnet_example.py +++ b/tests/perf_test/resnet_example.py @@ -62,7 +62,7 @@ class ResidualBlock(nn.Cell): self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = nn.BatchNorm2d(out_channels) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): """ diff --git a/tests/st/auto_parallel/optimizer_parallel.py b/tests/st/auto_parallel/optimizer_parallel.py index fdcdd3ef87..295365b9df 100644 --- a/tests/st/auto_parallel/optimizer_parallel.py +++ b/tests/st/auto_parallel/optimizer_parallel.py @@ -187,8 +187,8 @@ class OptimizerSemiAutoAndAutoParallel6Net(Cell): has_bias=True) self.relu = ReLU() self.sigmoid = P.Sigmoid() - self.add1 = P.TensorAdd() - self.add2 = P.TensorAdd() + self.add1 = P.Add() + self.add2 = P.Add() self.mul1 = P.Mul().add_prim_attr('primitive_target', 'CPU') self.mul2 = P.Mul() self.mul3 = P.Mul() diff --git a/tests/st/auto_parallel/parallel_strategy_search.py b/tests/st/auto_parallel/parallel_strategy_search.py index 561ec9ea4a..ffb1c80b1b 100644 --- a/tests/st/auto_parallel/parallel_strategy_search.py +++ b/tests/st/auto_parallel/parallel_strategy_search.py @@ -214,7 +214,7 @@ class ParallelStrategySearchNet(Cell): self.prelu = P.PReLU() self.matmul = P.MatMul(transpose_b=transpose_b) self.l2norm = P.L2Normalize(axis=(1 - axis)) - self.tensoradd = P.TensorAdd() + self.tensoradd = P.Add() self.strided_slice = P.StridedSlice() self.dense = Dense(in_channels=6, out_channels=num_class, diff --git a/tests/st/auto_parallel/resnet50_expand_loss.py b/tests/st/auto_parallel/resnet50_expand_loss.py index de78b90728..a98215794f 100644 --- a/tests/st/auto_parallel/resnet50_expand_loss.py +++ b/tests/st/auto_parallel/resnet50_expand_loss.py @@ -88,7 +88,7 @@ class BasicBlock(nn.Cell): padding=0), _fused_bn(out_channels, momentum=momentum)]) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x @@ -132,7 +132,7 @@ class ResidualBlock(nn.Cell): elif self.stride != 1: self.maxpool_down = nn.MaxPool2d(kernel_size=1, stride=2, pad_mode='same') - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/st/control/test_cont_grad.py b/tests/st/control/test_cont_grad.py index 140a572112..692ac62c30 100644 --- a/tests/st/control/test_cont_grad.py +++ b/tests/st/control/test_cont_grad.py @@ -790,7 +790,7 @@ def test_if_by_if_forward(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -826,7 +826,7 @@ def test_if_by_if_forward_control_tuple_switch(): class Branch3Net(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -840,7 +840,7 @@ def test_if_by_if_forward_control_tuple_switch(): class Branch2Net(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -856,7 +856,7 @@ def test_if_by_if_forward_control_tuple_switch(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -887,7 +887,7 @@ def test_if_by_if_forward_control_inside_net(): class Branch3Net(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -903,7 +903,7 @@ def test_if_by_if_forward_control_inside_net(): class Branch2Net(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -919,7 +919,7 @@ def test_if_by_if_forward_control_inside_net(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -947,14 +947,14 @@ def test_if_by_if_forward_use_namespace(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() def construct(self, a, b, x): if a < b: - a = P.TensorAdd()(a, b) + a = P.Add()(a, b) else: a = P.Sub()(a, b) if a == x: @@ -962,9 +962,9 @@ def test_if_by_if_forward_use_namespace(): else: a = P.RealDiv()(a, b) if b == x: - b = P.TensorAdd()(a, b) + b = P.Add()(a, b) else: - b = P.TensorAdd()(a, x) + b = P.Add()(a, x) a = a * b out = a + b + x return out @@ -982,13 +982,13 @@ def test_if_by_if_forward_use_global_op(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() def construct(self, a, b, x): - add = P.TensorAdd() + add = P.Add() sub = P.Sub() mul = P.Mul() div = P.RealDiv() @@ -1021,7 +1021,7 @@ def test_for_with_if_by_if_forward(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() def construct(self, a, b, x): @@ -1048,7 +1048,7 @@ def test_for_with_if_by_if_forward_namespace(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -1056,7 +1056,7 @@ def test_for_with_if_by_if_forward_namespace(): def construct(self, a, b, x): for _ in range(0, 6): if a < b: - a = P.TensorAdd()(a, b) + a = P.Add()(a, b) else: b = P.Sub()(b, x) a = a * b @@ -1077,13 +1077,13 @@ def test_if_by_if_forward_const_branch_inner(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() def construct(self, a, b, x): - add = P.TensorAdd() + add = P.Add() sub = P.Sub() mul = P.Mul() div = P.RealDiv() @@ -1118,13 +1118,13 @@ def test_if_by_if_forward_all_const_branch(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() def construct(self, a, b, x): - add = P.TensorAdd() + add = P.Add() sub = P.Sub() mul = P.Mul() div = P.RealDiv() @@ -1160,7 +1160,7 @@ def test_if_const_grad(): class MyNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, *inputs): out = self.add(*inputs) @@ -1195,7 +1195,7 @@ def test_if_by_if_const_grad(): class MyNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, *inputs): out = self.add(*inputs) @@ -1234,7 +1234,7 @@ def test_while_const_grad(): class MyNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, *inputs): out = self.add(*inputs) @@ -1267,7 +1267,7 @@ def test_if_by_while_const_grad(): class MyNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, *inputs): out = self.add(*inputs) diff --git a/tests/st/control/test_if_by_if.py b/tests/st/control/test_if_by_if.py index 41c143c1d9..a1b77c8e52 100644 --- a/tests/st/control/test_if_by_if.py +++ b/tests/st/control/test_if_by_if.py @@ -17,7 +17,7 @@ def test_if_by_if_basic(): def __init__(self): super().__init__() self.mul = P.Mul() - self.add = P.TensorAdd() + self.add = P.Add() a = np.full((1,), 5, dtype=np.float32) self.a = Parameter(Tensor(a), name='a') b = np.full((1,), 4, dtype=np.float32) @@ -37,7 +37,7 @@ def test_if_by_if_basic(): context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") self.subnet = SubNet() self.relu = P.ReLU() - self.add = P.TensorAdd() + self.add = P.Add() a = np.full((1,), 5, dtype=np.float32) self.a = Parameter(Tensor(a), name='a') b = np.full((1,), 4, dtype=np.float32) diff --git a/tests/st/cpp/model/test_tensor_add.cc b/tests/st/cpp/model/test_tensor_add.cc index fda2dba0e1..cb5a70b256 100644 --- a/tests/st/cpp/model/test_tensor_add.cc +++ b/tests/st/cpp/model/test_tensor_add.cc @@ -22,16 +22,16 @@ using namespace mindspore::api; -static const char tensor_add_file[] = "/home/workspace/mindspore_dataset/mindir/tensor_add/tensor_add.mindir"; +static const char tensor_add_file[] = "/home/workspace/mindspore_dataset/mindir/add/add.mindir"; static const std::vector input_data_1 = {1, 2, 3, 4}; static const std::vector input_data_2 = {2, 3, 4, 5}; -class TestTensorAdd : public ST::Common { +class TestAdd : public ST::Common { public: - TestTensorAdd() {} + TestAdd() {} }; -TEST_F(TestTensorAdd, InferMindIR) { +TEST_F(TestAdd, InferMindIR) { ContextAutoSet(); auto graph = Serialization::LoadModel(tensor_add_file, ModelType::kMindIR); diff --git a/tests/st/dump/test_data_dump.py b/tests/st/dump/test_data_dump.py index 7f7a1b54b5..f3ff046c88 100644 --- a/tests/st/dump/test_data_dump.py +++ b/tests/st/dump/test_data_dump.py @@ -34,7 +34,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Net(nn.Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x_, y_): return self.add(x_, y_) diff --git a/tests/st/export/text_air.py b/tests/st/export/text_air.py index b0de52d2da..8ea281a22c 100644 --- a/tests/st/export/text_air.py +++ b/tests/st/export/text_air.py @@ -26,7 +26,7 @@ from mindspore.nn.layer.conv import Conv2d from mindspore.nn.layer.normalization import BatchNorm2d from mindspore.nn.layer.pooling import MaxPool2d from mindspore.ops import operations as P -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add from mindspore.train.serialization import export context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") @@ -128,7 +128,7 @@ class ResidualBlock(Cell): self.bn3 = bn_with_initialize_last(out_channels) self.relu = P.ReLU() - self.add = TensorAdd() + self.add = Add() def construct(self, x): identity = x @@ -176,7 +176,7 @@ class ResidualBlockWithDown(Cell): self.conv_down_sample = conv1x1( in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = bn_with_initialize(out_channels) - self.add = TensorAdd() + self.add = Add() def construct(self, x): identity = x diff --git a/tests/st/fusion/test_add_relu_buffer_fusion.py b/tests/st/fusion/test_add_relu_buffer_fusion.py index 215e592735..a0a8d48de6 100644 --- a/tests/st/fusion/test_add_relu_buffer_fusion.py +++ b/tests/st/fusion/test_add_relu_buffer_fusion.py @@ -27,7 +27,7 @@ class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.softmax = P.Softmax(axis=1) - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.relu = P.ReLU() self.reduce_mean = P.ReduceMean() diff --git a/tests/st/fusion/test_conv_bn1_fusion.py b/tests/st/fusion/test_conv_bn1_fusion.py index 51d1fac71b..836ede2096 100644 --- a/tests/st/fusion/test_conv_bn1_fusion.py +++ b/tests/st/fusion/test_conv_bn1_fusion.py @@ -62,7 +62,7 @@ def test_conv_bn_add_relu_fusion(): self.conv1 = nn.Conv2d(input_channel, output_channel, kernel_size=1, stride=1, padding=0, has_bias=False, pad_mode="same") self.bn = nn.BatchNorm2d(output_channel, momentum=0.1, eps=0.0001) - self.add = P.TensorAdd() + self.add = P.Add() self.relu = P.ReLU() self.mean = P.ReduceMean(keep_dims=True) self.reshape = P.Reshape() diff --git a/tests/st/fusion/test_tbe_multi_inout_eltwise_fusion.py b/tests/st/fusion/test_tbe_multi_inout_eltwise_fusion.py index b770a867a9..b45099799f 100644 --- a/tests/st/fusion/test_tbe_multi_inout_eltwise_fusion.py +++ b/tests/st/fusion/test_tbe_multi_inout_eltwise_fusion.py @@ -25,7 +25,7 @@ context.set_context(mode=context.GRAPH_MODE, device_id=4, device_target="Ascend" class Net(nn.Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.relu = P.ReLU() self.biasadd = P.BiasAdd() diff --git a/tests/st/graph_kernel/model/test_split.py b/tests/st/graph_kernel/model/test_split.py index 1e824e0ee0..78b11baeaa 100644 --- a/tests/st/graph_kernel/model/test_split.py +++ b/tests/st/graph_kernel/model/test_split.py @@ -61,7 +61,7 @@ def graph_1(): b = gb.emit("Abs", a, 'b') c = gb.emit("Abs", b, 'c') d = gb.emit("Abs", c, 'd') - gb.emit('TensorAdd', [b, d], 'e') + gb.emit('Add', [b, d], 'e') return gb.get()[0] @@ -74,7 +74,7 @@ def graph_2(): b = gb.emit("Abs", a, 'b') c = gb.emit("Abs", a, 'c') d = gb.emit("Abs", b, 'd') - e = gb.emit('TensorAdd', [c, d], 'e') + e = gb.emit('Add', [c, d], 'e') gb.emit("Abs", e, 'f') return gb.get()[0] @@ -88,7 +88,7 @@ def graph_3(): b = gb.emit("Abs", a0, 'b') c = gb.emit("Abs", a1, 'c') d = gb.emit("Abs", b, 'd') - e = gb.emit('TensorAdd', [c, d], 'e') + e = gb.emit('Add', [c, d], 'e') gb.emit("Abs", e, 'f') return gb.get()[0] @@ -103,10 +103,10 @@ def graph_4(): c = gb.emit("Abs", b, 'c') d = gb.emit("Abs", a1, 'd') e = gb.emit("Abs", d, 'e') - f = gb.emit('TensorAdd', [c, e], 'f') + f = gb.emit('Add', [c, e], 'f') gb.emit('Abs', f, 'g') h = gb.emit("Abs", d, 'h') - i = gb.emit('TensorAdd', [c, h], 'i') + i = gb.emit('Add', [c, h], 'i') gb.emit("Abs", i, 'j') return gb.get()[0] @@ -121,10 +121,10 @@ def graph_5(): a = gb.emit("Abs", a0, 'a') b = gb.emit("Abs", a1, 'b') c = gb.emit("Abs", b, 'c') - d = gb.emit('TensorAdd', [a, c], 'd') + d = gb.emit('Add', [a, c], 'd') gb.emit("Abs", d, 'e') f = gb.emit("Abs", a2, 'f') - g = gb.emit('TensorAdd', [c, f], 'g') + g = gb.emit('Add', [c, f], 'g') gb.emit("Abs", g, 'h') return gb.get()[0] @@ -202,7 +202,7 @@ def graph_pat_4(): # g0 = gb.emit("Abs", g0, 'g0') # g0 = gb.emit("Abs", g0, 'g0') g0 = gb.emit("Abs", g0, 'g0') - g1 = gb.emit('TensorAdd', [f, g0], 'g1') + g1 = gb.emit('Add', [f, g0], 'g1') g2 = gb.emit("Abs", g1, 'g2') g3 = gb.emit("Abs", g2, 'g3') g4 = gb.emit("Abs", g3, 'g4') @@ -232,7 +232,7 @@ def graph_pat_6(): a = gb.emit("Abs", a0, 'a') b = gb.emit("Abs", a, 'b') c = gb.emit("Abs", a, 'c') - gb.emit("TensorAdd", [b, c], 'd') + gb.emit("Add", [b, c], 'd') gb.emit("Abs", c, 'f') # broke dimond return gb.get()[0] @@ -263,7 +263,7 @@ def graph_pat_8(): b = gb.emit("Abs", a, 'b') #c = gb.emit("Abs", b, 'b') c = gb.emit("ReduceSum", b, 'c', attrs={'reduce_axis': (1,)}) - gb.emit("TensorAdd", [b, c], 'd') + gb.emit("Add", [b, c], 'd') return gb.get()[0] diff --git a/tests/st/mem_reuse/resnet.py b/tests/st/mem_reuse/resnet.py index 19843efa70..6d5ec78550 100644 --- a/tests/st/mem_reuse/resnet.py +++ b/tests/st/mem_reuse/resnet.py @@ -113,7 +113,7 @@ class ResidualBlock(nn.Cell): self.bn3 = bn_with_initialize_last(out_channels) self.relu = P.ReLU() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x @@ -160,7 +160,7 @@ class ResidualBlockWithDown(nn.Cell): self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = bn_with_initialize(out_channels) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/st/model_zoo_tests/yolov3/src/yolov3.py b/tests/st/model_zoo_tests/yolov3/src/yolov3.py index 1fecaba83e..7ddf3ae695 100644 --- a/tests/st/model_zoo_tests/yolov3/src/yolov3.py +++ b/tests/st/model_zoo_tests/yolov3/src/yolov3.py @@ -107,7 +107,7 @@ class BasicBlock(nn.Cell): self.downsample = (in_channels != out_channels) if self.downsample: self.down_sample_layer = _conv2d(in_channels, out_channels, 1, stride=stride) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/st/model_zoo_tests/yolov3_darknet53/src/darknet.py b/tests/st/model_zoo_tests/yolov3_darknet53/src/darknet.py index 4a2eb1de78..42b9d985e9 100644 --- a/tests/st/model_zoo_tests/yolov3_darknet53/src/darknet.py +++ b/tests/st/model_zoo_tests/yolov3_darknet53/src/darknet.py @@ -62,7 +62,7 @@ class ResidualBlock(nn.Cell): out_chls = out_channels//2 self.conv1 = conv_block(in_channels, out_chls, kernel_size=1, stride=1) self.conv2 = conv_block(out_chls, out_channels, kernel_size=3, stride=1) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/st/networks/models/bert/src/bert_model.py b/tests/st/networks/models/bert/src/bert_model.py index 3d15627dcf..c420d3abf9 100644 --- a/tests/st/networks/models/bert/src/bert_model.py +++ b/tests/st/networks/models/bert/src/bert_model.py @@ -249,7 +249,7 @@ class BertOutput(nn.Cell): weight_init=TruncatedNormal(initializer_range)).to_float(compute_type) self.dropout = nn.Dropout(1 - dropout_prob) self.dropout_prob = dropout_prob - self.add = P.TensorAdd() + self.add = P.Add() self.layernorm = nn.LayerNorm((out_channels,)).to_float(compute_type) self.cast = P.Cast() @@ -474,7 +474,7 @@ class BertAttention(nn.Cell): if self.has_attention_mask: self.expand_dims = P.ExpandDims() self.sub = P.Sub() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.get_dtype = P.DType() if do_return_2d_tensor: diff --git a/tests/st/networks/models/deeplabv3/src/backbone/resnet_deeplab.py b/tests/st/networks/models/deeplabv3/src/backbone/resnet_deeplab.py index d9348de76d..7c2e459247 100644 --- a/tests/st/networks/models/deeplabv3/src/backbone/resnet_deeplab.py +++ b/tests/st/networks/models/deeplabv3/src/backbone/resnet_deeplab.py @@ -344,7 +344,7 @@ class BottleneckV1(nn.Cell): self.downsample = nn.SequentialCell([conv, bn]) else: self.downsample = Subsample(stride) - self.add = P.TensorAdd() + self.add = P.Add() self.relu = nn.ReLU() self.Reshape = P.Reshape() @@ -418,7 +418,7 @@ class BottleneckV2(nn.Cell): self.downsample = nn.SequentialCell([conv, bn]) else: self.downsample = Subsample(stride) - self.add = P.TensorAdd() + self.add = P.Add() self.relu = nn.ReLU() def construct(self, x): @@ -478,7 +478,7 @@ class BottleneckV3(nn.Cell): else: self.downsample = Subsample(stride) self.downsample = Subsample(stride) - self.add = P.TensorAdd() + self.add = P.Add() self.relu = nn.ReLU() def construct(self, x): diff --git a/tests/st/networks/models/resnet50/src/resnet.py b/tests/st/networks/models/resnet50/src/resnet.py index 001e1db0cf..163502a7e5 100755 --- a/tests/st/networks/models/resnet50/src/resnet.py +++ b/tests/st/networks/models/resnet50/src/resnet.py @@ -105,7 +105,7 @@ class ResidualBlock(nn.Cell): if self.down_sample: self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride), _bn(out_channel)]) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/st/networks/models/resnet50/src_thor/resnet.py b/tests/st/networks/models/resnet50/src_thor/resnet.py index d5c1df503a..e3066374ca 100644 --- a/tests/st/networks/models/resnet50/src_thor/resnet.py +++ b/tests/st/networks/models/resnet50/src_thor/resnet.py @@ -182,7 +182,7 @@ class ResidualBlock(nn.Cell): damping=damping, loss_scale=loss_scale, frequency=frequency), _bn(out_channel)]) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/st/networks/models/resnet50/src_thor/thor_layer.py b/tests/st/networks/models/resnet50/src_thor/thor_layer.py index 2ffe1882bc..84098d7575 100644 --- a/tests/st/networks/models/resnet50/src_thor/thor_layer.py +++ b/tests/st/networks/models/resnet50/src_thor/thor_layer.py @@ -395,7 +395,7 @@ class Dense_Thor(Cell): self.exp = P.Exp() self.dampingA = Tensor(np.identity(2048), mstype.float32) self.dampingG = Tensor(np.identity(1024), mstype.float32) - self.add = P.TensorAdd() + self.add = P.Add() self.sqrt = P.Sqrt() self.getG = P.InsertGradientOf(self.save_gradient) diff --git a/tests/st/networks/models/resnetv1_5.py b/tests/st/networks/models/resnetv1_5.py index 93e4cad403..1a6b3ae250 100644 --- a/tests/st/networks/models/resnetv1_5.py +++ b/tests/st/networks/models/resnetv1_5.py @@ -109,7 +109,7 @@ class ResidualBlock(nn.Cell): self.bn3 = bn_with_initialize_last(out_channels) self.relu = P.ReLU() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x @@ -156,7 +156,7 @@ class ResidualBlockWithDown(nn.Cell): self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = bn_with_initialize(out_channels) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/st/networks/test_gpu_resnet.py b/tests/st/networks/test_gpu_resnet.py index 8444bd55c4..4d59daa04f 100644 --- a/tests/st/networks/test_gpu_resnet.py +++ b/tests/st/networks/test_gpu_resnet.py @@ -33,7 +33,7 @@ from mindspore.nn.layer.normalization import BatchNorm2d from mindspore.nn.layer.pooling import MaxPool2d from mindspore.nn.optim import Momentum from mindspore.ops import operations as P -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add context.set_context(mode=context.GRAPH_MODE, device_target="GPU") @@ -134,7 +134,7 @@ class ResidualBlock(Cell): self.bn3 = bn_with_initialize_last(out_channels) self.relu = P.ReLU() - self.add = TensorAdd() + self.add = Add() def construct(self, x): identity = x @@ -182,7 +182,7 @@ class ResidualBlockWithDown(Cell): self.conv_down_sample = conv1x1( in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = bn_with_initialize(out_channels) - self.add = TensorAdd() + self.add = Add() def construct(self, x): identity = x diff --git a/tests/st/ops/ascend/test_add.py b/tests/st/ops/ascend/test_add.py index 6a07bb879f..abebe68ca5 100644 --- a/tests/st/ops/ascend/test_add.py +++ b/tests/st/ops/ascend/test_add.py @@ -25,7 +25,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Net(nn.Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x_, y_): return self.add(x_, y_) diff --git a/tests/st/ops/ascend/test_maxpool_with_argmax.py b/tests/st/ops/ascend/test_maxpool_with_argmax.py index d369634b04..57d7232a68 100644 --- a/tests/st/ops/ascend/test_maxpool_with_argmax.py +++ b/tests/st/ops/ascend/test_maxpool_with_argmax.py @@ -31,7 +31,7 @@ class Net(nn.Cell): strides=2) self.x = Parameter(initializer( 'normal', [1, 64, 112, 112]), name='w') - self.add = P.TensorAdd() + self.add = P.Add() @ms_function def construct(self): diff --git a/tests/st/ops/ascend/test_tbe_ops/test_add.py b/tests/st/ops/ascend/test_tbe_ops/test_add.py index bdf03da943..5898195ce0 100644 --- a/tests/st/ops/ascend/test_tbe_ops/test_add.py +++ b/tests/st/ops/ascend/test_tbe_ops/test_add.py @@ -23,7 +23,7 @@ from mindspore.ops import operations as P class Net(nn.Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x_, y_): return self.add(x_, y_) diff --git a/tests/st/ops/cpu/test_tensoradd.py b/tests/st/ops/cpu/test_tensoradd.py index ee6dfff67b..ddf8a70a5e 100644 --- a/tests/st/ops/cpu/test_tensoradd.py +++ b/tests/st/ops/cpu/test_tensoradd.py @@ -26,7 +26,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target='CPU') class TensorAdd(nn.Cell): def __init__(self): super(TensorAdd, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x, y): res = self.add(x, y) diff --git a/tests/st/ops/gpu/test_cudnn_inplace_fusion.py b/tests/st/ops/gpu/test_cudnn_inplace_fusion.py index 5d74b5e23b..c04b78797f 100644 --- a/tests/st/ops/gpu/test_cudnn_inplace_fusion.py +++ b/tests/st/ops/gpu/test_cudnn_inplace_fusion.py @@ -32,7 +32,7 @@ class Conv2dBpropInputInplace(nn.Cell): self.w1 = Parameter(initializer(w1, w1.shape), name='w1') self.conv2d_2 = P.Conv2DBackpropInput(out_channel=256, kernel_size=1) self.w2 = Parameter(initializer(w2, w2.shape), name='w2') - self.add = P.TensorAdd() + self.add = P.Add() self.maxpool = P.MaxPool(kernel_size=3, strides=2, pad_mode='SAME') self.maxpool_grad = G.MaxPoolGrad(kernel_size=3, strides=2, pad_mode='SAME') self.shape = (32, 64, 56, 56) diff --git a/tests/st/ops/gpu/test_relu_v2.py b/tests/st/ops/gpu/test_relu_v2.py index cefc3007b5..ac279dc2d2 100644 --- a/tests/st/ops/gpu/test_relu_v2.py +++ b/tests/st/ops/gpu/test_relu_v2.py @@ -62,7 +62,7 @@ def test_ReluV2(): class AddReluNet(nn.Cell): def __init__(self): super(AddReluNet, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.relu = P.ReLU() self.relu_grad = G.ReluGrad() @@ -103,7 +103,7 @@ def test_AddRelu(): class AddReluGradNet(nn.Cell): def __init__(self): super(AddReluGradNet, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.relu = P.ReLU() self.relu_grad = G.ReluGrad() diff --git a/tests/st/ops/gpu/test_tensoradd.py b/tests/st/ops/gpu/test_tensoradd.py index c9edd33156..5836618b59 100644 --- a/tests/st/ops/gpu/test_tensoradd.py +++ b/tests/st/ops/gpu/test_tensoradd.py @@ -32,7 +32,7 @@ class TensroAdd(nn.Cell): def __init__(self): super(TensroAdd, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.x = Parameter(initializer( Tensor(np.random.randn(2, 0).astype(np.float32)), [2, 0]), name='x') @@ -135,7 +135,7 @@ class Tensoradd_d(nn.Cell): def __init__(self): super(Tensoradd_d, self).__init__() self.test_dynamic = inner.GpuConvertToDynamicShape() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x, y): x = self.test_dynamic(x) diff --git a/tests/st/ops/graph_kernel/test_atomic_add.py b/tests/st/ops/graph_kernel/test_atomic_add.py index 6f3cd8c93c..b8786f46b1 100644 --- a/tests/st/ops/graph_kernel/test_atomic_add.py +++ b/tests/st/ops/graph_kernel/test_atomic_add.py @@ -35,7 +35,7 @@ class SumOutNet(Cell): class SingleOutNet(Cell): def __init__(self): super(SingleOutNet, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.mul = P.Mul() self.sum = P.ReduceSum() @@ -48,7 +48,7 @@ class SingleOutNet(Cell): class MultiOutNet(Cell): def __init__(self): super(MultiOutNet, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.mul = P.Mul() self.sum = P.ReduceSum() diff --git a/tests/st/ops/graph_kernel/test_cse.py b/tests/st/ops/graph_kernel/test_cse.py index ba838e8b50..3c64e092cc 100644 --- a/tests/st/ops/graph_kernel/test_cse.py +++ b/tests/st/ops/graph_kernel/test_cse.py @@ -24,7 +24,7 @@ import mindspore.ops.operations as P class Net(Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.mul = P.Mul() def construct(self, x): diff --git a/tests/st/ops/graph_kernel/test_fuse.py b/tests/st/ops/graph_kernel/test_fuse.py index 2168ca2546..a317b2dc5c 100644 --- a/tests/st/ops/graph_kernel/test_fuse.py +++ b/tests/st/ops/graph_kernel/test_fuse.py @@ -25,7 +25,7 @@ from mindspore.ops.operations import _grad_ops as G class Net(Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.sqrt_grad = G.SqrtGrad() diff --git a/tests/st/ops/graph_kernel/test_simplify.py b/tests/st/ops/graph_kernel/test_simplify.py index 27004829cf..9742ac49db 100644 --- a/tests/st/ops/graph_kernel/test_simplify.py +++ b/tests/st/ops/graph_kernel/test_simplify.py @@ -24,7 +24,7 @@ import mindspore.ops.operations as P class Net(Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() diff --git a/tests/st/ps/multi_full_ps/resnet.py b/tests/st/ps/multi_full_ps/resnet.py index 0e21222d21..a7961c3e32 100755 --- a/tests/st/ps/multi_full_ps/resnet.py +++ b/tests/st/ps/multi_full_ps/resnet.py @@ -105,7 +105,7 @@ class ResidualBlock(nn.Cell): if self.down_sample: self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride), _bn(out_channel)]) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/st/pynative/loss_scale/test_loss_scale.py b/tests/st/pynative/loss_scale/test_loss_scale.py index 6fdef1af71..a4074f9275 100644 --- a/tests/st/pynative/loss_scale/test_loss_scale.py +++ b/tests/st/pynative/loss_scale/test_loss_scale.py @@ -118,7 +118,7 @@ class NetFP16(nn.Cell): self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight") self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name="bias") self.matmul = P.MatMul() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() def construct(self, x): diff --git a/tests/st/pynative/parser/test_parser_construct.py b/tests/st/pynative/parser/test_parser_construct.py index 5206518908..1ecd1e8699 100644 --- a/tests/st/pynative/parser/test_parser_construct.py +++ b/tests/st/pynative/parser/test_parser_construct.py @@ -77,7 +77,7 @@ def test_parser_construct(): @pytest.mark.env_onecard def test_sit_parser_input_parameter(): def tensor_add(x, y): - add = P.TensorAdd() + add = P.Add() z = add(x, y) return z x = Tensor(np.ones([2, 2]).astype(np.float32)) diff --git a/tests/st/pynative/test_graph_param_transform.py b/tests/st/pynative/test_graph_param_transform.py index 647d85cd85..d30bf32d10 100644 --- a/tests/st/pynative/test_graph_param_transform.py +++ b/tests/st/pynative/test_graph_param_transform.py @@ -128,7 +128,7 @@ def test_parser_switch_layer_inputs_tuple(): class Add(nn.Cell): def __init__(self): super().__init__() - self.op = P.TensorAdd() + self.op = P.Add() def construct(self, x): y = self.op(x[0], x[1]) diff --git a/tests/st/pynative/test_pynative_mixed_precision_cells.py b/tests/st/pynative/test_pynative_mixed_precision_cells.py index 7db1191312..f80312c1ec 100644 --- a/tests/st/pynative/test_pynative_mixed_precision_cells.py +++ b/tests/st/pynative/test_pynative_mixed_precision_cells.py @@ -48,7 +48,7 @@ class Add(Cell, MetaFactory): def __init__(self): super().__init__() MetaFactory.__init__(self) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x, y): return self.add(x, y) diff --git a/tests/st/pynative/test_pynative_resnet50_ascend.py b/tests/st/pynative/test_pynative_resnet50_ascend.py index 69b8b2d581..783605cda9 100644 --- a/tests/st/pynative/test_pynative_resnet50_ascend.py +++ b/tests/st/pynative/test_pynative_resnet50_ascend.py @@ -116,7 +116,7 @@ class ResidualBlock(nn.Cell): self.bn3 = bn_with_initialize_last(out_channels) self.relu = P.ReLU() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x @@ -163,7 +163,7 @@ class ResidualBlockWithDown(nn.Cell): self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = bn_with_initialize(out_channels) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/st/pynative/test_pynative_resnet50_gpu.py b/tests/st/pynative/test_pynative_resnet50_gpu.py index 588b6842ea..45735e76d0 100644 --- a/tests/st/pynative/test_pynative_resnet50_gpu.py +++ b/tests/st/pynative/test_pynative_resnet50_gpu.py @@ -117,7 +117,7 @@ class ResidualBlock(nn.Cell): self.bn3 = bn_with_initialize_last(out_channels) self.relu = P.ReLU() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x @@ -164,7 +164,7 @@ class ResidualBlockWithDown(nn.Cell): self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = bn_with_initialize(out_channels) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/st/quantization/mobilenetv2_quant/mobilenetV2.py b/tests/st/quantization/mobilenetv2_quant/mobilenetV2.py index 969dd6cfb1..bd176021b2 100644 --- a/tests/st/quantization/mobilenetv2_quant/mobilenetV2.py +++ b/tests/st/quantization/mobilenetv2_quant/mobilenetV2.py @@ -125,7 +125,7 @@ class InvertedResidual(nn.Cell): pad_mode='pad', padding=0, group=1, has_bn=True) ]) self.conv = nn.SequentialCell(layers) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): out = self.conv(x) diff --git a/tests/st/tbe_networks/resnet.py b/tests/st/tbe_networks/resnet.py index a977bbe15d..53328daed8 100644 --- a/tests/st/tbe_networks/resnet.py +++ b/tests/st/tbe_networks/resnet.py @@ -94,7 +94,7 @@ class ResidualBlock(nn.Cell): self.bn3 = bn_with_initialize_last(out_channels) self.relu = P.ReLU() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x @@ -141,7 +141,7 @@ class ResidualBlockWithDown(nn.Cell): self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = bn_with_initialize(out_channels) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc b/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc index 731b5caf28..0693dfccc0 100644 --- a/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc @@ -26,9 +26,9 @@ namespace mindspore { namespace parallel { -class TensorAddInfo; -using TensorAddInfoPtr = std::shared_ptr; -TensorAddInfoPtr tensor_add, tensor_add1; +class AddInfo; +using AddInfoPtr = std::shared_ptr; +AddInfoPtr tensor_add, tensor_add1; class TestTensorAddInfo : public UT::Common { public: @@ -58,11 +58,11 @@ void TestTensorAddInfo::SetUp() { Shapes inputs_shape = {{32, 64, 96}, {32, 64, 96}}; Shapes outputs_shape = {{32, 64, 96}}; - tensor_add = std::make_shared("tensoradd_info", inputs_shape, outputs_shape, attr); + tensor_add = std::make_shared("tensoradd_info", inputs_shape, outputs_shape, attr); Shapes inputs_shape1 = {{1, 48}, {48, 1}}; Shapes outputs_shape1 = {{48, 48}}; - tensor_add1 = std::make_shared("tensoradd_info", inputs_shape1, outputs_shape1, attr); + tensor_add1 = std::make_shared("tensoradd_info", inputs_shape1, outputs_shape1, attr); } TEST_F(TestTensorAddInfo, InferDevMatrixShape1) { diff --git a/tests/ut/cpp/parallel/step_parallel_test.cc b/tests/ut/cpp/parallel/step_parallel_test.cc index f1b02034e2..672b651c2c 100644 --- a/tests/ut/cpp/parallel/step_parallel_test.cc +++ b/tests/ut/cpp/parallel/step_parallel_test.cc @@ -212,7 +212,7 @@ TEST_F(TestStepParallel, GetPythonPath1) { } TEST_F(TestStepParallel, GetPythonPath2) { - OperatorName operator_name = "TensorAdd"; + OperatorName operator_name = "Add"; const std::string expect = "mindspore.ops.operations"; auto temp = parallel::GetOpPythonPath(operator_name); ASSERT_EQ(temp, expect); diff --git a/tests/ut/cpp/pre_activate/mem_reuse/mem_reuse_test.cc b/tests/ut/cpp/pre_activate/mem_reuse/mem_reuse_test.cc index 52035b9a7c..3a2ef3476f 100644 --- a/tests/ut/cpp/pre_activate/mem_reuse/mem_reuse_test.cc +++ b/tests/ut/cpp/pre_activate/mem_reuse/mem_reuse_test.cc @@ -163,7 +163,7 @@ static KernelGraphPtr CreateGraphWithExecOrder() { EXPECT_NE(original_y_parameter, nullptr); original_y_parameter->set_name("original_y_parameter"); original_y_parameter->set_abstract(abstract); - std::vector add_inputs = {NewValueNode(prim::kPrimTensorAdd), original_x_parameter, original_y_parameter}; + std::vector add_inputs = {NewValueNode(prim::kPrimAdd), original_x_parameter, original_y_parameter}; auto original_add = anf_graph->NewCNode(add_inputs); EXPECT_NE(original_add, nullptr); original_add->set_abstract(abstract); @@ -198,7 +198,7 @@ static KernelGraphPtr CreateGraphWithExecOrder() { kernel_graph->SetExecOrderByDefault(); auto execution_order = kernel_graph->execution_order(); EXPECT_EQ(execution_order.size(), 2); - EXPECT_EQ(AnfAlgo::GetCNodeName(execution_order[0]), prim::kPrimTensorAdd->name()); + EXPECT_EQ(AnfAlgo::GetCNodeName(execution_order[0]), prim::kPrimAdd->name()); EXPECT_EQ(AnfAlgo::GetCNodeName(execution_order[1]), prim::kPrimMul->name()); auto new_outputs = kernel_graph->outputs(); EXPECT_EQ(new_outputs.size(), 1); diff --git a/tests/ut/cpp/python_input/gtest_input/mem_reuse/mem_reuse_test.py b/tests/ut/cpp/python_input/gtest_input/mem_reuse/mem_reuse_test.py index 8014d04b9b..730ec640e3 100644 --- a/tests/ut/cpp/python_input/gtest_input/mem_reuse/mem_reuse_test.py +++ b/tests/ut/cpp/python_input/gtest_input/mem_reuse/mem_reuse_test.py @@ -14,7 +14,7 @@ # ============================================================================ from mindspore.ops import operations as P -add = P.TensorAdd() +add = P.Add() reshape = P.Reshape() diff --git a/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py b/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py index 4d862465fc..b58a8107bf 100644 --- a/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py +++ b/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py @@ -920,7 +920,7 @@ def test_convert_switch_ops(tag): fns = FnDict() ge_switch = Primitive('GeSwitch') merge = Primitive('Merge') - add = Primitive('TensorAdd') + add = Primitive('Add') neg = Primitive('Neg') tuple_getitem = Primitive('tuple_getitem') make_tuple = Primitive('make_tuple') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_fusion_test.py index 654b922c25..1834026dcc 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_fusion_test.py @@ -16,7 +16,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P from mindspore.ops import functional as F -Add = P.TensorAdd() +Add = P.Add() Sub = P.Sub() Mul = P.Mul() RealDiv = P.RealDiv() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_with_decay_rule.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_with_decay_rule.py index 96bbe7c057..5d8c30b946 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_with_decay_rule.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_with_decay_rule.py @@ -18,7 +18,7 @@ from mindspore.ops import operations as P from mindspore.ops import functional as F mul = P.Mul() -add = P.TensorAdd() +add = P.Add() square = P.Square() sqrt = P.Sqrt() real_div = P.RealDiv() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/buffer_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/buffer_fusion_test.py index bc20a0eb43..0321d84443 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/buffer_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/buffer_fusion_test.py @@ -31,7 +31,7 @@ Fusion_biasadd = Primitive('FusionOp_ReLU_ReLU_ReLU_BiasAdd_ReLU_ReLU_ReLU') Fusion_biasaddgrad = Primitive('FusionOp_ReLU_ReLU_ReLU_BiasAddGrad_ReLU_ReLU_ReLU') Fusion_matmul_relu = Primitive('FusionOp_MatMul_ReLU') -Add = P.TensorAdd() +Add = P.Add() Sub = P.Sub() make_tuple = Primitive('make_tuple') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_input_to_dynamic_input_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_input_to_dynamic_input_test.py index d06ac1b8bb..66f4d81a02 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_input_to_dynamic_input_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_input_to_dynamic_input_test.py @@ -20,7 +20,7 @@ from mindspore.ops import operations as P make_tuple = Primitive('make_tuple') concat = P.Concat() -add = P.TensorAdd() +add = P.Add() t1 = Tensor(np.random.randn(1, 11, 20, 1, 1).astype(np.float32)) t2 = Tensor(np.random.randn(1, 11, 20, 1, 1).astype(np.float32)) diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_output_to_maketuple_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_output_to_maketuple_test.py index b2338deead..b35466395a 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_output_to_maketuple_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_output_to_maketuple_test.py @@ -18,7 +18,7 @@ from mindspore.ops import operations as P make_tuple = Primitive('make_tuple') tuple_get_item = Primitive("tuple_getitem") LSTM = P.LSTM(input_size=10, hidden_size=2, num_layers=1, has_bias=True, bidirectional=False, dropout=0.0) -add = P.TensorAdd() +add = P.Add() class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/eliminate_redundant_op_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/eliminate_redundant_op_test.py index c8c5f1cc9b..989a27cbac 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/eliminate_redundant_op_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/eliminate_redundant_op_test.py @@ -15,7 +15,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P -add = P.TensorAdd() +add = P.Add() sub = P.Sub() make_tuple = Primitive('make_tuple') four2five = Primitive('Four2Five') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/getnext_memcpy_elimination_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/getnext_memcpy_elimination_test.py index 444cf8282d..6f04a9a197 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/getnext_memcpy_elimination_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/getnext_memcpy_elimination_test.py @@ -22,7 +22,7 @@ memcpy_async_attr = Primitive('memcpy_async') memcpy_async_attr.add_prim_attr("label_for_insert_stream_active", True) memcpy_async = Primitive('memcpy_async') cast = P.Cast() -add = P.TensorAdd() +add = P.Add() class FnDict: diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/hw_opt_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/hw_opt_test.py index 9d682da495..0bea8450f7 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/hw_opt_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/hw_opt_test.py @@ -18,7 +18,7 @@ from mindspore.ops import operations as P tuple_getitem = Primitive('tuple_getitem') depend = P.Depend() addn = P.AddN() -add = P.TensorAdd() +add = P.Add() sub = P.Sub() mul = P.Mul() max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2) diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_trans_op_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_trans_op_test.py index 1029e4b520..b48b99aa1c 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_trans_op_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_trans_op_test.py @@ -16,7 +16,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P tuple_getitem = Primitive('tuple_getitem') -add = P.TensorAdd() +add = P.Add() max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2) make_tuple = Primitive('make_tuple') transdata = Primitive("TransData") diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py index 195402c92b..431a5a3226 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/ir_fusion_test.py @@ -19,7 +19,7 @@ from mindspore.ops.operations import _grad_ops as G # pylint: disable=unused-variable tuple_getitem = Primitive('tuple_getitem') -add = P.TensorAdd() +add = P.Add() allreduce = P.AllReduce() allreduce.add_prim_attr('fusion', 1) make_tuple = Primitive('make_tuple') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_rule_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_rule_test.py index 5660771723..4628663cc4 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_rule_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_rule_test.py @@ -15,7 +15,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P -Add = P.TensorAdd() +Add = P.Add() Mul = P.Mul() RealDiv = P.RealDiv() Rsqrt = P.Rsqrt() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_rule_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_rule_test.py index 87dba9b3f7..4de8af2124 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_rule_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_rule_test.py @@ -15,7 +15,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P -Add = P.TensorAdd() +Add = P.Add() Mul = P.Mul() RealDiv = P.RealDiv() Rsqrt = P.Rsqrt() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_v1_rule.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_v1_rule.py index 859905beb0..eeae90cb32 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_v1_rule.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_mv_with_decay_v1_rule.py @@ -15,7 +15,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P -add = P.TensorAdd() +add = P.Add() mul = P.Mul() real_div = P.RealDiv() rsqrt = P.Rsqrt() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_right_rule_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_right_rule_test.py index ab6369bb63..2a159c40b7 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_right_rule_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/lamb_next_right_rule_test.py @@ -15,7 +15,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P -Add = P.TensorAdd() +Add = P.Add() Mul = P.Mul() Sqrt = P.Sqrt() Square = P.Square() diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/layer_norm_beta_gamma_backprop_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/layer_norm_beta_gamma_backprop_fusion_test.py index e4d2d92245..db6396d9ee 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/layer_norm_beta_gamma_backprop_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/layer_norm_beta_gamma_backprop_fusion_test.py @@ -15,7 +15,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P -Add = P.TensorAdd() +Add = P.Add() Cast = P.Cast() LayerNormBetaGammaBackprop = Primitive('LayerNormBetaGammaBackprop') tuple_getitem = Primitive('tuple_getitem') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/mixed_precision_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/mixed_precision_test.py index 7ca105d165..164e9a49b7 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/mixed_precision_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/mixed_precision_test.py @@ -18,7 +18,7 @@ from mindspore.ops import operations as P tuple_getitem = Primitive('tuple_getitem') depend = P.Depend() addn = P.AddN() -add = P.TensorAdd() +add = P.Add() sub = P.Sub() mul = P.Mul() max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2) diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_add_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_add_fusion_test.py index 6b2c9daf4b..4a0ab550f3 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_add_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/mul_add_fusion_test.py @@ -15,7 +15,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P -add = P.TensorAdd() +add = P.Add() mul = P.Mul() fused_mul_add = Primitive('FusedMulAdd') make_tuple = Primitive('make_tuple') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/optimize_dependence_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/optimize_dependence_test.py index 2d98b50e3f..2acaff2987 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/optimize_dependence_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/optimize_dependence_test.py @@ -18,7 +18,7 @@ from mindspore.ops import operations as P depend = P.Depend() controldepend = Primitive("ControlDepend") TransData = Primitive('TransData') -add = P.TensorAdd() +add = P.Add() make_tuple = Primitive('make_tuple') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/remove_internal_output_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/remove_internal_output_test.py index 02e8555ba1..2b32d3036d 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/remove_internal_output_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/remove_internal_output_test.py @@ -16,7 +16,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P tuple_getitem = Primitive('tuple_getitem') -add = P.TensorAdd() +add = P.Add() max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2) make_tuple = Primitive('make_tuple') trans_data = Primitive("TransData") diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/transdata_split_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/transdata_split_test.py index a7c45bdf9b..2bf9401a8e 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/transdata_split_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/transdata_split_test.py @@ -16,7 +16,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P tuple_getitem = Primitive('tuple_getitem') -add = P.TensorAdd() +add = P.Add() max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2) make_tuple = Primitive('make_tuple') four2five = Primitive('Four2Five') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py index 3f6b38c318..436cd04374 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py @@ -16,7 +16,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P tuple_getitem = Primitive('tuple_getitem') -add = P.TensorAdd() +add = P.Add() max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2) make_tuple = Primitive('make_tuple') transdata = Primitive("TransData") diff --git a/tests/ut/cpp/python_input/gtest_input/session/session_test.py b/tests/ut/cpp/python_input/gtest_input/session/session_test.py index dbd79e0444..dea463dc1f 100644 --- a/tests/ut/cpp/python_input/gtest_input/session/session_test.py +++ b/tests/ut/cpp/python_input/gtest_input/session/session_test.py @@ -17,7 +17,7 @@ from mindspore.ops import Primitive from mindspore.ops import operations as P addn = P.AddN() -add = P.TensorAdd() +add = P.Add() reshape = P.Reshape() cast = P.Cast() tuple_getitem = Primitive('tuple_getitem') diff --git a/tests/ut/cpp/session/anf_runtime_algorithm_test.cc b/tests/ut/cpp/session/anf_runtime_algorithm_test.cc index 507c3f61b1..e452b1175f 100644 --- a/tests/ut/cpp/session/anf_runtime_algorithm_test.cc +++ b/tests/ut/cpp/session/anf_runtime_algorithm_test.cc @@ -42,7 +42,7 @@ TEST_F(AnfRuntimeAlgorithmTest, VisitKernel) { // test nullptr as input EXPECT_THROW(AnfAlgo::VisitKernel(nullptr, 0), std::runtime_error); // test value node as input - ValueNodePtr value_node = NewValueNode(prim::kPrimTensorAdd); + ValueNodePtr value_node = NewValueNode(prim::kPrimAdd); kernel_with_index = AnfAlgo::VisitKernel(value_node, 0); EXPECT_NE(kernel_with_index.first->cast(), nullptr); EXPECT_EQ((kernel_with_index.first->cast()).get(), value_node.get()); @@ -61,7 +61,7 @@ TEST_F(AnfRuntimeAlgorithmTest, VisitKernel) { EXPECT_EQ((kernel_with_index.first->cast()).get(), add.get()); EXPECT_EQ(kernel_with_index.second, 0); // test maketuple node as input - std::vector add_inputs{NewValueNode(prim::kPrimTensorAdd)}; + std::vector add_inputs{NewValueNode(prim::kPrimAdd)}; auto add_second = kernel_graph->NewCNode(add_inputs); std::vector make_tuple_inputs{NewValueNode(prim::kPrimMakeTuple), add, add_second}; auto make_tuple = kernel_graph->NewCNode(make_tuple_inputs); @@ -104,7 +104,7 @@ TEST_F(AnfRuntimeAlgorithmTest, VisitKernel) { TEST_F(AnfRuntimeAlgorithmTest, GetCNodePrimitive) { auto kernel_graph = std::make_shared(); // test cnode node - PrimitivePtr add_primitive = prim::kPrimTensorAdd; + PrimitivePtr add_primitive = prim::kPrimAdd; std::vector inputs{NewValueNode(add_primitive)}; auto add = kernel_graph->NewCNode(inputs); EXPECT_NE(AnfAlgo::GetCNodePrimitive(add), nullptr); @@ -115,9 +115,9 @@ TEST_F(AnfRuntimeAlgorithmTest, GetCNodePrimitive) { TEST_F(AnfRuntimeAlgorithmTest, GetCNodeName) { auto kernel_graph = std::make_shared(); // test cnode node - std::vector inputs{NewValueNode(prim::kPrimTensorAdd)}; + std::vector inputs{NewValueNode(prim::kPrimAdd)}; auto add = kernel_graph->NewCNode(inputs); - EXPECT_EQ(AnfAlgo::GetCNodeName(add), prim::kPrimTensorAdd->name()); + EXPECT_EQ(AnfAlgo::GetCNodeName(add), prim::kPrimAdd->name()); EXPECT_THROW(AnfAlgo::GetCNodeName(nullptr), std::runtime_error); // test parameter auto parameter_node = kernel_graph->add_parameter(); @@ -127,7 +127,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetCNodeName) { TEST_F(AnfRuntimeAlgorithmTest, GetNodeDebugString) { auto kernel_graph = std::make_shared(); // test cnode node - std::vector inputs{NewValueNode(prim::kPrimTensorAdd)}; + std::vector inputs{NewValueNode(prim::kPrimAdd)}; auto add = kernel_graph->NewCNode(inputs); EXPECT_EQ(AnfAlgo::GetNodeDebugString(add), add->DebugString()); EXPECT_THROW(AnfAlgo::GetNodeDebugString(nullptr), std::runtime_error); @@ -136,7 +136,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetNodeDebugString) { TEST_F(AnfRuntimeAlgorithmTest, SetNodeAttr) { auto kernel_graph = std::make_shared(); // test cnode node - std::vector inputs{NewValueNode(prim::kPrimTensorAdd)}; + std::vector inputs{NewValueNode(prim::kPrimAdd)}; auto add = kernel_graph->NewCNode(inputs); AnfAlgo::SetNodeAttr("test_set_attr", MakeValue("test_value"), add); auto primitive = AnfAlgo::GetCNodePrimitive(add); @@ -150,7 +150,7 @@ TEST_F(AnfRuntimeAlgorithmTest, SetNodeAttr) { TEST_F(AnfRuntimeAlgorithmTest, CopyNodeAttr) { auto kernel_graph = std::make_shared(); // test cnode node - std::vector add_inputs{NewValueNode(prim::kPrimTensorAdd)}; + std::vector add_inputs{NewValueNode(prim::kPrimAdd)}; auto add = kernel_graph->NewCNode(add_inputs); AnfAlgo::SetNodeAttr("test_set_attr", MakeValue("test_value"), add); @@ -174,7 +174,7 @@ TEST_F(AnfRuntimeAlgorithmTest, CopyNodeAttr) { TEST_F(AnfRuntimeAlgorithmTest, CopyNodeAttrs) { auto kernel_graph = std::make_shared(); // test cnode node - std::vector add_inputs{NewValueNode(prim::kPrimTensorAdd)}; + std::vector add_inputs{NewValueNode(prim::kPrimAdd)}; auto add = kernel_graph->NewCNode(add_inputs); AnfAlgo::SetNodeAttr("test_set_attr", MakeValue("test_value"), add); @@ -198,7 +198,7 @@ TEST_F(AnfRuntimeAlgorithmTest, CopyNodeAttrs) { TEST_F(AnfRuntimeAlgorithmTest, EraseNodeAttr) { auto kernel_graph = std::make_shared(); // test cnode node - std::vector add_inputs{NewValueNode(prim::kPrimTensorAdd)}; + std::vector add_inputs{NewValueNode(prim::kPrimAdd)}; auto add = kernel_graph->NewCNode(add_inputs); AnfAlgo::SetNodeAttr("test_set_attr", MakeValue("test_value"), add); AnfAlgo::SetNodeAttr("test_set_attr_v2", MakeValue("test_value_v2"), add); @@ -215,7 +215,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetInputTensorNum) { // test cnode node auto parameter_one = kernel_graph->NewParameter(); auto parameter_two = kernel_graph->NewParameter(); - std::vector add_inputs{NewValueNode(prim::kPrimTensorAdd), parameter_one, parameter_two}; + std::vector add_inputs{NewValueNode(prim::kPrimAdd), parameter_one, parameter_two}; auto add = kernel_graph->NewCNode(add_inputs); EXPECT_EQ(AnfAlgo::GetInputTensorNum(add), 2); EXPECT_THROW(AnfAlgo::GetInputTensorNum(nullptr), std::runtime_error); @@ -238,7 +238,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetOutputTensorNum) { EXPECT_THROW(AnfAlgo::GetOutputTensorNum(nullptr), std::runtime_error); // test add as input inputs.clear(); - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_abstract(std::make_shared()); @@ -249,7 +249,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetOutputTensorNum) { TEST_F(AnfRuntimeAlgorithmTest, GetOutputFormat) { auto kernel_graph = std::make_shared(); - std::vector inputs = {NewValueNode(prim::kPrimTensorAdd), kernel_graph->NewParameter(), + std::vector inputs = {NewValueNode(prim::kPrimAdd), kernel_graph->NewParameter(), kernel_graph->NewParameter()}; auto add = kernel_graph->NewCNode(inputs); std::vector shape = {1, 2, 3, 4}; @@ -270,7 +270,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetOutputFormat) { TEST_F(AnfRuntimeAlgorithmTest, GetInputFormat) { auto kernel_graph = std::make_shared(); - std::vector inputs = {NewValueNode(prim::kPrimTensorAdd), kernel_graph->NewParameter(), + std::vector inputs = {NewValueNode(prim::kPrimAdd), kernel_graph->NewParameter(), kernel_graph->NewParameter()}; auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); @@ -290,7 +290,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetInputFormat) { TEST_F(AnfRuntimeAlgorithmTest, GetPrevNodeOutputFormat) { auto kernel_graph = std::make_shared(); std::vector pre_node_inputs; - pre_node_inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + pre_node_inputs.push_back(NewValueNode(prim::kPrimAdd)); auto pre_add = kernel_graph->NewCNode(pre_node_inputs); MS_EXCEPTION_IF_NULL(pre_add); pre_add->set_kernel_info(std::make_shared()); @@ -300,7 +300,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetPrevNodeOutputFormat) { builder.SetOutputsDeviceType({kFloat32->type_id()}); builder.SetOutputsFormat({kOpFormat_NCHW}); d_kernel_info->set_select_kernel_build_info(builder.Build()); - std::vector inputs{NewValueNode(prim::kPrimTensorAdd), pre_add}; + std::vector inputs{NewValueNode(prim::kPrimAdd), pre_add}; auto add = kernel_graph->NewCNode(inputs); EXPECT_EQ(AnfAlgo::GetPrevNodeOutputFormat(add, 0), kOpFormat_NCHW); EXPECT_THROW(AnfAlgo::GetPrevNodeOutputFormat(nullptr, 0), std::runtime_error); @@ -317,7 +317,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetOutputInferShape) { AbstractBasePtrList args_spec_list{x_abstract, none_abstract, x_abstract}; auto tuple_abstract = std::make_shared(args_spec_list); // test value node as input - auto value_node = NewValueNode(prim::kPrimTensorAdd); + auto value_node = NewValueNode(prim::kPrimAdd); MS_EXCEPTION_IF_NULL(value_node); value_node->set_abstract(x_abstract); EXPECT_EQ(AnfAlgo::GetOutputInferShape(value_node, 0)[1], 32); @@ -329,7 +329,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetOutputInferShape) { EXPECT_EQ(AnfAlgo::GetOutputInferShape(parameter_node, 0)[2], 224); // test cnode as input std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_abstract(std::make_shared()); @@ -354,7 +354,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetPrevNodeOutputInferShape) { parameter_node->set_abstract(x_abstract); EXPECT_THROW(AnfAlgo::GetPrevNodeOutputInferShape(parameter_node, 0), std::runtime_error); // test cnode as input - std::vector inputs{NewValueNode(prim::kPrimTensorAdd), parameter_node}; + std::vector inputs{NewValueNode(prim::kPrimAdd), parameter_node}; auto add = kernel_graph->NewCNode(inputs); EXPECT_EQ(AnfAlgo::GetPrevNodeOutputInferShape(add, 0)[1], 32); EXPECT_THROW(AnfAlgo::GetPrevNodeOutputInferShape(add, 1), std::runtime_error); @@ -369,7 +369,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetOutputDeviceShape) { auto tuple_abstract = std::make_shared(args_spec_list); // test cnode as input std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_abstract(tuple_abstract); @@ -401,7 +401,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetInputDeviceShape) { MS_EXCEPTION_IF_NULL(parameter_third); parameter_third->set_abstract(x_abstract); // test cnode as input - std::vector inputs{NewValueNode(prim::kPrimTensorAdd), parameter_one, parameter_two, parameter_third}; + std::vector inputs{NewValueNode(prim::kPrimAdd), parameter_one, parameter_two, parameter_third}; auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_kernel_info(std::make_shared()); @@ -435,13 +435,13 @@ TEST_F(AnfRuntimeAlgorithmTest, GetOutputInferDataTypeTest) { TEST_F(AnfRuntimeAlgorithmTest, GetPrevNodeOutputInferDataType) { auto kernel_graph = std::make_shared(); std::vector pre_node_inputs; - pre_node_inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + pre_node_inputs.push_back(NewValueNode(prim::kPrimAdd)); auto pre_add = kernel_graph->NewCNode(pre_node_inputs); MS_EXCEPTION_IF_NULL(pre_add); std::vector shp{2, 32, 224, 224}; auto x_abstract = std::make_shared(kFloat32, shp); pre_add->set_abstract(x_abstract); - std::vector inputs{NewValueNode(prim::kPrimTensorAdd), pre_add}; + std::vector inputs{NewValueNode(prim::kPrimAdd), pre_add}; auto add = kernel_graph->NewCNode(inputs); EXPECT_EQ(AnfAlgo::GetPrevNodeOutputInferDataType(add, 0), kFloat32->type_id()); EXPECT_THROW(AnfAlgo::GetPrevNodeOutputInferDataType(add, 1), std::runtime_error); @@ -454,7 +454,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetPrevNodeOutputInferDataType) { TEST_F(AnfRuntimeAlgorithmTest, GetOutputDeviceDataTypeTest) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_kernel_info(std::make_shared()); @@ -470,7 +470,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetOutputDeviceDataTypeTest) { TEST_F(AnfRuntimeAlgorithmTest, GetInputDeviceDataTypeTest) { auto kernel_graph = std::make_shared(); - std::vector inputs = {NewValueNode(prim::kPrimTensorAdd), kernel_graph->NewParameter(), + std::vector inputs = {NewValueNode(prim::kPrimAdd), kernel_graph->NewParameter(), kernel_graph->NewParameter()}; auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); @@ -489,7 +489,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetInputDeviceDataTypeTest) { TEST_F(AnfRuntimeAlgorithmTest, GetPrevNodeOutputDeviceDataType) { auto kernel_graph = std::make_shared(); std::vector pre_add_inputs; - pre_add_inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + pre_add_inputs.push_back(NewValueNode(prim::kPrimAdd)); auto pre_add = kernel_graph->NewCNode(pre_add_inputs); MS_EXCEPTION_IF_NULL(pre_add); pre_add->set_kernel_info(std::make_shared()); @@ -498,7 +498,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetPrevNodeOutputDeviceDataType) { KernelBuildInfoBuilder builder; builder.SetOutputsDeviceType({kFloat32->type_id()}); d_kernel_info->set_select_kernel_build_info(builder.Build()); - std::vector inputs{NewValueNode(prim::kPrimTensorAdd), pre_add}; + std::vector inputs{NewValueNode(prim::kPrimAdd), pre_add}; auto add = kernel_graph->NewCNode(inputs); EXPECT_EQ(AnfAlgo::GetPrevNodeOutputDeviceDataType(add, 0), kFloat32->type_id()); EXPECT_THROW(AnfAlgo::GetPrevNodeOutputDeviceDataType(add, 1), std::runtime_error); @@ -510,7 +510,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetPrevNodeOutputDeviceDataType) { TEST_F(AnfRuntimeAlgorithmTest, GetOutputAddr) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_kernel_info(std::make_shared()); @@ -525,7 +525,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetOutputAddr) { TEST_F(AnfRuntimeAlgorithmTest, GetPrevNodeOutputAddr) { auto kernel_graph = std::make_shared(); std::vector pre_add_inputs; - pre_add_inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + pre_add_inputs.push_back(NewValueNode(prim::kPrimAdd)); auto pre_add = kernel_graph->NewCNode(pre_add_inputs); MS_EXCEPTION_IF_NULL(pre_add); pre_add->set_kernel_info(std::make_shared()); @@ -534,7 +534,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetPrevNodeOutputAddr) { int *addr = nullptr; auto device_address = std::make_shared(addr, 1); d_kernel_info->SetOutputAddr(device_address, 0); - std::vector inputs{NewValueNode(prim::kPrimTensorAdd), pre_add}; + std::vector inputs{NewValueNode(prim::kPrimAdd), pre_add}; auto add = kernel_graph->NewCNode(inputs); EXPECT_EQ(AnfAlgo::GetPrevNodeOutputAddr(add, 0), device_address.get()); EXPECT_THROW(AnfAlgo::GetPrevNodeOutputAddr(add, 1), std::runtime_error); @@ -546,7 +546,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetPrevNodeOutputAddr) { TEST_F(AnfRuntimeAlgorithmTest, SetOutputAddr) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); int *addr = nullptr; auto device_address = std::make_shared(addr, 1); @@ -558,7 +558,7 @@ TEST_F(AnfRuntimeAlgorithmTest, SetOutputAddr) { TEST_F(AnfRuntimeAlgorithmTest, GetWorkspaceAddr) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_kernel_info(std::make_shared()); @@ -573,7 +573,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetWorkspaceAddr) { TEST_F(AnfRuntimeAlgorithmTest, SetWorkspaceAddr) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); int *addr = nullptr; auto device_address = std::make_shared(addr, 1); @@ -585,7 +585,7 @@ TEST_F(AnfRuntimeAlgorithmTest, SetWorkspaceAddr) { TEST_F(AnfRuntimeAlgorithmTest, SetOutputInferTypeAndShape) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); // set none abstract std::vector none_types = {}; @@ -615,7 +615,7 @@ TEST_F(AnfRuntimeAlgorithmTest, SetOutputInferTypeAndShape) { TEST_F(AnfRuntimeAlgorithmTest, CopyAbstract) { auto kernel_graph = std::make_shared(); std::vector first_inputs; - first_inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + first_inputs.push_back(NewValueNode(prim::kPrimAdd)); auto first_add = kernel_graph->NewCNode(first_inputs); // set single input std::vector single_types = {kFloat32->type_id()}; @@ -623,7 +623,7 @@ TEST_F(AnfRuntimeAlgorithmTest, CopyAbstract) { AnfAlgo::SetOutputInferTypeAndShape(single_types, single_shapes, first_add.get()); // set mutiple input std::vector second_inputs; - second_inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + second_inputs.push_back(NewValueNode(prim::kPrimAdd)); auto second_add = kernel_graph->NewCNode(second_inputs); std::vector mutiple_types = {kFloat16->type_id(), kFloat32->type_id(), kFloat64->type_id()}; std::vector> mutiple_shapes = {{2, 32, 224, 224}, {2, 32, 224, 224}, {2, 32, 224, 224}}; @@ -640,7 +640,7 @@ TEST_F(AnfRuntimeAlgorithmTest, CopyAbstract) { TEST_F(AnfRuntimeAlgorithmTest, GetKernelType) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_kernel_info(std::make_shared()); @@ -656,7 +656,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetKernelType) { TEST_F(AnfRuntimeAlgorithmTest, GetProcessor) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_kernel_info(std::make_shared()); @@ -672,7 +672,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetProcessor) { TEST_F(AnfRuntimeAlgorithmTest, GetFusionType) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_kernel_info(std::make_shared()); @@ -688,7 +688,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetFusionType) { TEST_F(AnfRuntimeAlgorithmTest, SetSelectKernelBuildInfo) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); std::shared_ptr builder = std::make_shared(); builder->SetFusionType(kernel::CONVLUTION); @@ -700,7 +700,7 @@ TEST_F(AnfRuntimeAlgorithmTest, SetSelectKernelBuildInfo) { TEST_F(AnfRuntimeAlgorithmTest, GetKernelMod) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_kernel_info(std::make_shared()); @@ -714,7 +714,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetKernelMod) { TEST_F(AnfRuntimeAlgorithmTest, SetKernelMod) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); AnfAlgo::SetKernelMod(nullptr, add.get()); EXPECT_THROW(AnfAlgo::SetKernelMod(nullptr, nullptr), std::runtime_error); @@ -724,7 +724,7 @@ TEST_F(AnfRuntimeAlgorithmTest, SetKernelMod) { TEST_F(AnfRuntimeAlgorithmTest, IsRealKernel) { auto kernel_graph = std::make_shared(); // test value node as input - auto value_node = NewValueNode(prim::kPrimTensorAdd); + auto value_node = NewValueNode(prim::kPrimAdd); EXPECT_TRUE(AnfAlgo::IsRealKernel(value_node)); EXPECT_THROW(AnfAlgo::IsRealKernel(nullptr), std::runtime_error); // test parameter as input @@ -732,7 +732,7 @@ TEST_F(AnfRuntimeAlgorithmTest, IsRealKernel) { EXPECT_TRUE(AnfAlgo::IsRealKernel(parameter_node)); // test add as input std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); EXPECT_TRUE(AnfAlgo::IsRealKernel(add)); // test Depend as input @@ -745,7 +745,7 @@ TEST_F(AnfRuntimeAlgorithmTest, IsRealKernel) { TEST_F(AnfRuntimeAlgorithmTest, IsRealCNodeKernel) { auto kernel_graph = std::make_shared(); // test value node as input - auto value_node = NewValueNode(prim::kPrimTensorAdd); + auto value_node = NewValueNode(prim::kPrimAdd); EXPECT_FALSE(AnfAlgo::IsRealCNodeKernel(value_node)); EXPECT_THROW(AnfAlgo::IsRealCNodeKernel(nullptr), std::runtime_error); // test parameter as input @@ -753,7 +753,7 @@ TEST_F(AnfRuntimeAlgorithmTest, IsRealCNodeKernel) { EXPECT_FALSE(AnfAlgo::IsRealCNodeKernel(parameter_node)); // test add as input std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); EXPECT_TRUE(AnfAlgo::IsRealCNodeKernel(add)); // test ImageSummary as input @@ -776,7 +776,7 @@ TEST_F(AnfRuntimeAlgorithmTest, IsParameterWeight) { TEST_F(AnfRuntimeAlgorithmTest, GetStreamId) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_kernel_info(std::make_shared()); @@ -790,7 +790,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetStreamId) { TEST_F(AnfRuntimeAlgorithmTest, SetStreamId) { auto kernel_graph = std::make_shared(); std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + inputs.push_back(NewValueNode(prim::kPrimAdd)); auto add = kernel_graph->NewCNode(inputs); AnfAlgo::SetStreamId(0, add.get()); EXPECT_THROW(AnfAlgo::SetStreamId(0, nullptr), std::runtime_error); diff --git a/tests/ut/cpp/session/kernel_graph_test.cc b/tests/ut/cpp/session/kernel_graph_test.cc index 5157c9587f..684ece7a63 100644 --- a/tests/ut/cpp/session/kernel_graph_test.cc +++ b/tests/ut/cpp/session/kernel_graph_test.cc @@ -96,11 +96,11 @@ TEST_F(KernelGraphTest, NewParameter) { TEST_F(KernelGraphTest, NewCNode) { auto kernel_graph = std::make_shared(); - auto add_value = NewValueNode(prim::kPrimTensorAdd); + auto add_value = NewValueNode(prim::kPrimAdd); std::vector inputs = {add_value}; auto new_cnode = kernel_graph->NewCNode(inputs); EXPECT_NE(new_cnode, nullptr); - EXPECT_EQ(AnfAlgo::GetCNodeName(new_cnode), prim::kPrimTensorAdd->name()); + EXPECT_EQ(AnfAlgo::GetCNodeName(new_cnode), prim::kPrimAdd->name()); EXPECT_TRUE(AnfAlgo::GetOutputInferShape(new_cnode, 0).empty()); EXPECT_EQ(AnfAlgo::GetOutputInferDataType(new_cnode, 0), kMetaTypeNone); } @@ -149,7 +149,7 @@ TEST_F(KernelGraphTest, SetExecOrderByDefault) { MS_EXCEPTION_IF_NULL(y_parameter); y_parameter->set_name("y_parameter"); y_parameter->set_abstract(abstract); - std::vector add_inputs = {NewValueNode(prim::kPrimTensorAdd), x_parameter, y_parameter}; + std::vector add_inputs = {NewValueNode(prim::kPrimAdd), x_parameter, y_parameter}; auto add = kernel_graph->NewCNode(add_inputs); MS_EXCEPTION_IF_NULL(add); add->set_abstract(abstract); @@ -174,13 +174,13 @@ TEST_F(KernelGraphTest, SetExecOrderByDefault) { kernel_graph->SetExecOrderByDefault(); auto execution_order = kernel_graph->execution_order(); EXPECT_EQ(execution_order.size(), 2); - EXPECT_EQ(AnfAlgo::GetCNodeName(execution_order[0]), prim::kPrimTensorAdd->name()); + EXPECT_EQ(AnfAlgo::GetCNodeName(execution_order[0]), prim::kPrimAdd->name()); EXPECT_EQ(AnfAlgo::GetCNodeName(execution_order[1]), prim::kPrimMul->name()); // test set_execution_order() function kernel_graph->set_execution_order({add}); execution_order = kernel_graph->execution_order(); EXPECT_EQ(execution_order.size(), 1); - EXPECT_EQ(AnfAlgo::GetCNodeName(execution_order[0]), prim::kPrimTensorAdd->name()); + EXPECT_EQ(AnfAlgo::GetCNodeName(execution_order[0]), prim::kPrimAdd->name()); } TEST_F(KernelGraphTest, SetGraphId) { diff --git a/tests/ut/cpp/session/session_basic_test.cc b/tests/ut/cpp/session/session_basic_test.cc index 92bec6f227..ac5dc5cbb9 100644 --- a/tests/ut/cpp/session/session_basic_test.cc +++ b/tests/ut/cpp/session/session_basic_test.cc @@ -52,7 +52,7 @@ TEST_F(SessionBasicTest, ConstructKernelGraph) { EXPECT_NE(original_y_parameter, nullptr); original_y_parameter->set_name("original_y_parameter"); original_y_parameter->set_abstract(abstract); - std::vector add_inputs = {NewValueNode(prim::kPrimTensorAdd), original_x_parameter, original_y_parameter}; + std::vector add_inputs = {NewValueNode(prim::kPrimAdd), original_x_parameter, original_y_parameter}; auto original_add = anf_graph->NewCNode(add_inputs); EXPECT_NE(original_add, nullptr); original_add->set_abstract(abstract); @@ -87,7 +87,7 @@ TEST_F(SessionBasicTest, ConstructKernelGraph) { kernel_graph->SetExecOrderByDefault(); auto execution_order = kernel_graph->execution_order(); EXPECT_EQ(execution_order.size(), 2); - EXPECT_EQ(AnfAlgo::GetCNodeName(execution_order[0]), prim::kPrimTensorAdd->name()); + EXPECT_EQ(AnfAlgo::GetCNodeName(execution_order[0]), prim::kPrimAdd->name()); EXPECT_EQ(AnfAlgo::GetCNodeName(execution_order[1]), prim::kPrimMul->name()); auto new_outputs = kernel_graph->outputs(); EXPECT_EQ(new_outputs.size(), 1); diff --git a/tests/ut/cpp/transform/convert_test.cc b/tests/ut/cpp/transform/convert_test.cc index c532bf21cd..fde3666017 100644 --- a/tests/ut/cpp/transform/convert_test.cc +++ b/tests/ut/cpp/transform/convert_test.cc @@ -678,7 +678,7 @@ TEST_F(TestConvert, TestNPUClearFloatStatusOps) { #endif TEST_F(TestConvert, TestAddOps) { - auto prim = std::make_shared("TensorAdd"); + auto prim = std::make_shared("Add"); auto func_graph = MakeFuncGraph(prim, 2); ASSERT_TRUE(nullptr != func_graph); diff --git a/tests/ut/python/communication/test_data_parallel_resnet.py b/tests/ut/python/communication/test_data_parallel_resnet.py index ad60c3c957..cbeb807af5 100644 --- a/tests/ut/python/communication/test_data_parallel_resnet.py +++ b/tests/ut/python/communication/test_data_parallel_resnet.py @@ -22,7 +22,7 @@ import mindspore.nn as nn from mindspore import Tensor, Model from mindspore.context import ParallelMode from mindspore.nn.optim import Momentum -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add from ....dataset_mock import MindData @@ -67,7 +67,7 @@ class ResidualBlock(nn.Cell): self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = nn.BatchNorm2d(out_channels) - self.add = TensorAdd() + self.add = Add() def construct(self, x): """ diff --git a/tests/ut/python/exec/resnet_example.py b/tests/ut/python/exec/resnet_example.py index 816849f4e6..85ea8e34f7 100644 --- a/tests/ut/python/exec/resnet_example.py +++ b/tests/ut/python/exec/resnet_example.py @@ -79,7 +79,7 @@ class ResidualBlock(nn.Cell): self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = bn_with_initialize(out_channels) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): """ diff --git a/tests/ut/python/exec/test_tensor_add.py b/tests/ut/python/exec/test_tensor_add.py index 85ea68f6e7..e9d62e3ea1 100644 --- a/tests/ut/python/exec/test_tensor_add.py +++ b/tests/ut/python/exec/test_tensor_add.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -""" test TensorAdd """ +""" test Add """ import numpy as np import mindspore.nn as nn @@ -23,7 +23,7 @@ from mindspore.ops import operations as P class Net(nn.Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, input1, input2): return self.add(input1, input2) @@ -31,7 +31,7 @@ class Net(nn.Cell): def test_tensor_add(): """test_tensor_add""" - add = P.TensorAdd() + add = P.Add() input1 = Tensor(np.random.rand(1, 3, 4, 4).astype(np.float32)) input2 = Tensor(np.random.rand(1, 3, 4, 4).astype(np.float32)) output = add(input1, input2) diff --git a/tests/ut/python/ir/test_row_tensor.py b/tests/ut/python/ir/test_row_tensor.py index b83f985ea8..8d21d8550f 100644 --- a/tests/ut/python/ir/test_row_tensor.py +++ b/tests/ut/python/ir/test_row_tensor.py @@ -347,7 +347,7 @@ def test_row_tensor_model_train(): def __init__(self, in_features, out_features): super(Net, self).__init__() self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight") - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() self.flag = True diff --git a/tests/ut/python/keep_order/test_keep_order.py b/tests/ut/python/keep_order/test_keep_order.py index 0113a36278..b2a6d4d144 100644 --- a/tests/ut/python/keep_order/test_keep_order.py +++ b/tests/ut/python/keep_order/test_keep_order.py @@ -23,9 +23,9 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P context.set_context(mode=context.GRAPH_MODE) -add1 = P.TensorAdd() +add1 = P.Add() mul1 = P.MatMul() -add2 = P.TensorAdd() +add2 = P.Add() def add(x, y): diff --git a/tests/ut/python/model/res18_example.py b/tests/ut/python/model/res18_example.py index ac7d77fc48..20924baadf 100644 --- a/tests/ut/python/model/res18_example.py +++ b/tests/ut/python/model/res18_example.py @@ -20,7 +20,7 @@ import numpy as np import mindspore.nn as nn # pylint: disable=C0414 from mindspore import Tensor from mindspore.common.api import _executor -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add from ...train_step_wrap import train_step_with_loss_warp @@ -65,7 +65,7 @@ class ResidualBlock(nn.Cell): self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = nn.BatchNorm2d(out_channels) - self.add = TensorAdd() + self.add = Add() def construct(self, x): """ diff --git a/tests/ut/python/model/resnet.py b/tests/ut/python/model/resnet.py index 001e1db0cf..163502a7e5 100644 --- a/tests/ut/python/model/resnet.py +++ b/tests/ut/python/model/resnet.py @@ -105,7 +105,7 @@ class ResidualBlock(nn.Cell): if self.down_sample: self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride), _bn(out_channel)]) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/ut/python/model/test_mix_precision.py b/tests/ut/python/model/test_mix_precision.py index 722ce1c39c..94ca0e8a97 100644 --- a/tests/ut/python/model/test_mix_precision.py +++ b/tests/ut/python/model/test_mix_precision.py @@ -205,7 +205,7 @@ def test_dict_cast(): class SecondNet(nn.Cell): def __init__(self): super(SecondNet, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, tensor_c, **kwargs): d = F.mixed_precision_cast(mstype.float16, tensor_c) @@ -225,7 +225,7 @@ def test_kwarg_cast(): def __init__(self): super(FirstNet, self).__init__() self.net = SecondNet().add_flags_recursive(fp16=True) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, tensor_a, tensor_b): tensor_c = self.add(tensor_a, tensor_b) @@ -236,7 +236,7 @@ def test_kwarg_cast(): class SecondNet(nn.Cell): def __init__(self): super(SecondNet, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, key1=1, key2=2): tensor_d = self.add(key1, key2["key"]) diff --git a/tests/ut/python/ops/test_array_ops.py b/tests/ut/python/ops/test_array_ops.py index 3992508265..8a1ac7dfa8 100644 --- a/tests/ut/python/ops/test_array_ops.py +++ b/tests/ut/python/ops/test_array_ops.py @@ -208,7 +208,7 @@ class CustNet3(Cell): class MathBinaryNet1(Cell): def __init__(self): super(MathBinaryNet1, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.mul = P.Mul() self.max = P.Maximum() self.number = 3 diff --git a/tests/ut/python/ops/test_bprop_disorder.py b/tests/ut/python/ops/test_bprop_disorder.py index 7f1829d5e7..b0392e5cd4 100644 --- a/tests/ut/python/ops/test_bprop_disorder.py +++ b/tests/ut/python/ops/test_bprop_disorder.py @@ -42,7 +42,7 @@ class DisOrderTest1(nn.Cell): self.s3 = Parameter(weight, name="s3") self.s4 = Parameter(weight, name="s4") self.mul = P.Mul() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): return x * (self.s1 * self.s2 + self.s2 * self.s3 + self.s3 * self.s4 + self.s4 * self.s1) @@ -59,7 +59,7 @@ class DisOrderTest2(nn.Cell): self.s3 = Parameter(weight, name="s3") self.s4 = Parameter(weight, name="s4") self.mul = P.Mul() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): return self.mul(x, (self.add(self.add(self.add(self.mul(self.s1, self.s2), self.mul(self.s2, self.s3)), diff --git a/tests/ut/python/ops/test_control_ops.py b/tests/ut/python/ops/test_control_ops.py index e446644bdd..96e31459ab 100644 --- a/tests/ut/python/ops/test_control_ops.py +++ b/tests/ut/python/ops/test_control_ops.py @@ -40,7 +40,7 @@ def cond_data_test(x_init, y_init): """""" super(Net, self).__init__() self.square = P.Square() - self.add = P.TensorAdd() + self.add = P.Add() self.value = Tensor(3, dtype=ms.float32) self.switch = P.GeSwitch() self.merge = P.Merge() @@ -79,7 +79,7 @@ def if_compile_test(x_init, y_init): """""" super(Net, self).__init__() self.square = P.Square() - self.add = P.TensorAdd() + self.add = P.Add() self.value = Tensor(3, dtype=ms.float32) self.switch = P.GeSwitch() self.merge = P.Merge() @@ -465,7 +465,7 @@ def test_parser_switch_layer_switch_in_bprop(): class Add(nn.Cell): def __init__(self): super().__init__() - self.op = P.TensorAdd() + self.op = P.Add() def construct(self, x, y): return self.op(x, y) @@ -503,7 +503,7 @@ def test_parser_switch_layer_inputs_tuple(): class Add(nn.Cell): def __init__(self): super().__init__() - self.op = P.TensorAdd() + self.op = P.Add() def construct(self, x): y = self.op(x[0], x[1]) @@ -842,7 +842,7 @@ def test_while_add(): self.start = Tensor(0, dtype=mstype.int32) self.end = Tensor(2, dtype=mstype.int32) self.out = Tensor(np.zeros([2, 3], dtype=np.float32)) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, inputs): idx = self.start diff --git a/tests/ut/python/ops/test_dynamic_shape.py b/tests/ut/python/ops/test_dynamic_shape.py index 8cdc4c0a9b..23c5f004af 100755 --- a/tests/ut/python/ops/test_dynamic_shape.py +++ b/tests/ut/python/ops/test_dynamic_shape.py @@ -41,7 +41,7 @@ def test_sparse_apply_proximal_ada_grad(): def __init__(self): super(NetWrapper, self).__init__() self.unq = P.Unique() - self.add = P.TensorAdd() + self.add = P.Add() self.expand_dims = P.ExpandDims() self.cast = P.Cast() self.net = Net() @@ -75,7 +75,7 @@ def test_sparse_apply_ftrl(): def __init__(self): super(NetWrapper, self).__init__() self.unq = P.Unique() - self.add = P.TensorAdd() + self.add = P.Add() self.expand_dims = P.ExpandDims() self.cast = P.Cast() self.net = SparseApplyFtrlNet() diff --git a/tests/ut/python/ops/test_math_ops_check.py b/tests/ut/python/ops/test_math_ops_check.py index 9772de82e4..a69f1e38e7 100755 --- a/tests/ut/python/ops/test_math_ops_check.py +++ b/tests/ut/python/ops/test_math_ops_check.py @@ -81,7 +81,7 @@ class CumSumNet(nn.Cell): raise_set = [ # input two tensors, their shapes do not match ('TensorAdd2', { - 'block': (P.TensorAdd(), {'exception': ValueError, 'error_keywords': ['TensorAdd']}), + 'block': (P.Add(), {'exception': ValueError, 'error_keywords': ['Add']}), 'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), @@ -573,7 +573,7 @@ raise_set = [ test_case_math_ops = [ # input two tensors, but element types are not same ('TensorAdd1', { - 'block': P.TensorAdd(), + 'block': P.Add(), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # input two tensors, but element types are not same diff --git a/tests/ut/python/ops/test_nn_ops.py b/tests/ut/python/ops/test_nn_ops.py index 4806b8963e..64f09b19e5 100644 --- a/tests/ut/python/ops/test_nn_ops.py +++ b/tests/ut/python/ops/test_nn_ops.py @@ -78,7 +78,7 @@ class ResidualBlock(nn.Cell): self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = nn.BatchNorm2d(out_channels) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): """ diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 686d6a6cbb..e847cadd20 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -198,7 +198,7 @@ class SummaryNet(nn.Cell): def __init__(self): super(SummaryNet, self).__init__() self.s = P.ScalarSummary() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x, y): self.s("x1", x) @@ -209,7 +209,7 @@ class HistogramSummaryNet(nn.Cell): def __init__(self): super(HistogramSummaryNet, self).__init__() self.summary = P.HistogramSummary() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x, y): out = self.add(x, y) @@ -1000,8 +1000,8 @@ test_case_math_ops = [ 'block': P.Sub(), 'desc_inputs': [[3, 5], [2, 3, 3, 5]], 'desc_bprop': [[2, 3, 3, 5]]}), - ('TensorAdd', { - 'block': P.TensorAdd(), + ('Add', { + 'block': P.Add(), 'desc_inputs': [[3, 5], [2, 3, 3, 5]], 'desc_bprop': [[2, 3, 3, 5]]}), ('Mul0', { @@ -1028,26 +1028,26 @@ test_case_math_ops = [ 'desc_bprop': [[2, 3, 3, 5]], 'skip': ['backward']}), ('Add0', { - 'block': P.TensorAdd(), + 'block': P.Add(), 'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]], 'desc_bprop': [[2, 3, 3, 5]]}), ('Add1', { - 'block': P.TensorAdd(), + 'block': P.Add(), 'desc_inputs': [[3, 5], [2, 3, 3, 5]], 'desc_bprop': [[2, 3, 3, 5]], 'skip': ['backward']}), ('Add2', { - 'block': P.TensorAdd(), + 'block': P.Add(), 'desc_inputs': [[2, 3, 3, 5], [3, 5]], 'desc_bprop': [[2, 3, 3, 5]], 'skip': ['backward']}), ('Add3', { - 'block': P.TensorAdd(), + 'block': P.Add(), 'desc_inputs': [[2, 3, 1, 1], [2, 3, 3, 5]], 'desc_bprop': [[2, 3, 3, 5]], 'skip': ['backward']}), ('Add4', { - 'block': P.TensorAdd(), + 'block': P.Add(), 'desc_inputs': [[2, 3, 3, 5], [2, 3, 1, 1]], 'desc_bprop': [[2, 3, 3, 5]], 'skip': ['backward']}), diff --git a/tests/ut/python/optimizer/test_debug_location.py b/tests/ut/python/optimizer/test_debug_location.py index e4ab48e78a..589a3414b5 100644 --- a/tests/ut/python/optimizer/test_debug_location.py +++ b/tests/ut/python/optimizer/test_debug_location.py @@ -77,7 +77,7 @@ class Net(nn.Cell): self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight") self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name="bias") self.matmul = P.MatMul() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, input_): output = self.add(self.matmul(input_, self.weight), self.bias) @@ -90,7 +90,7 @@ class NetFP16(nn.Cell): self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight") self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name="bias") self.matmul = P.MatMul() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() def construct(self, input_): diff --git a/tests/ut/python/optimizer/test_optimizer_with_loss_scale.py b/tests/ut/python/optimizer/test_optimizer_with_loss_scale.py index 84feccb672..08fafffc9f 100644 --- a/tests/ut/python/optimizer/test_optimizer_with_loss_scale.py +++ b/tests/ut/python/optimizer/test_optimizer_with_loss_scale.py @@ -54,7 +54,7 @@ class Net(nn.Cell): self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight") self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name="bias") self.matmul = P.MatMul() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, input_): output = self.add(self.matmul(input_, self.weight), self.bias) @@ -67,7 +67,7 @@ class NetFP16(nn.Cell): self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight") self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name="bias") self.matmul = P.MatMul() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() def construct(self, input_): diff --git a/tests/ut/python/parallel/test_add_relu_redistribution.py b/tests/ut/python/parallel/test_add_relu_redistribution.py index 1efb3acde8..b7e09621b7 100644 --- a/tests/ut/python/parallel/test_add_relu_redistribution.py +++ b/tests/ut/python/parallel/test_add_relu_redistribution.py @@ -29,7 +29,7 @@ grad_all = C.GradOperation(get_all=True) class AddRelu(nn.Cell): def __init__(self, strategy0=None, strategy1=None): super(AddRelu, self).__init__() - self.add = P.TensorAdd().shard(strategy=strategy0) + self.add = P.Add().shard(strategy=strategy0) self.relu = P.ReLU().shard(strategy=strategy1) def construct(self, x, z): diff --git a/tests/ut/python/parallel/test_arithmetic.py b/tests/ut/python/parallel/test_arithmetic.py index 2d475945ba..85d73ea8bb 100644 --- a/tests/ut/python/parallel/test_arithmetic.py +++ b/tests/ut/python/parallel/test_arithmetic.py @@ -81,7 +81,7 @@ def test_matmul_add(): def __init__(self, strategy1, strategy2): super().__init__() self.matmul = P.MatMul().shard(strategy1) - self.add = P.TensorAdd().shard(strategy2) + self.add = P.Add().shard(strategy2) def construct(self, x, y, b): out = self.matmul(x, y) @@ -305,7 +305,7 @@ def test_matmul_add_broadcast(): def __init__(self, strategy1, strategy2): super().__init__() self.matmul = P.MatMul().shard(strategy1) - self.add = P.TensorAdd().shard(strategy2) + self.add = P.Add().shard(strategy2) def construct(self, x, y, b): out = self.matmul(x, y) @@ -329,7 +329,7 @@ def test_matmul_add_broadcast2(): def __init__(self, strategy1, strategy2): super().__init__() self.matmul = P.MatMul().shard(strategy1) - self.add = P.TensorAdd().shard(strategy2) + self.add = P.Add().shard(strategy2) def construct(self, x, y, b): out = self.matmul(x, y) diff --git a/tests/ut/python/parallel/test_auto_parallel_flag.py b/tests/ut/python/parallel/test_auto_parallel_flag.py index 4b26d3321b..17927ad3e8 100644 --- a/tests/ut/python/parallel/test_auto_parallel_flag.py +++ b/tests/ut/python/parallel/test_auto_parallel_flag.py @@ -51,7 +51,7 @@ class Net(nn.Cell): self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight") self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name="bias") self.matmul = P.MatMul() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, input_): output = self.add(self.matmul(input_, self.weight), self.bias) @@ -64,7 +64,7 @@ class NetFP16(nn.Cell): self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight") self.bias = Parameter(Tensor(np.ones([out_features]).astype(np.float32)), name="bias") self.matmul = P.MatMul() - self.add = P.TensorAdd() + self.add = P.Add() self.cast = P.Cast() def construct(self, input_): diff --git a/tests/ut/python/parallel/test_auto_parallel_for_loop.py b/tests/ut/python/parallel/test_auto_parallel_for_loop.py index 90404aed13..d4392b3335 100644 --- a/tests/ut/python/parallel/test_auto_parallel_for_loop.py +++ b/tests/ut/python/parallel/test_auto_parallel_for_loop.py @@ -54,7 +54,7 @@ class LayerNorm(nn.Cell): self.mean = P.ReduceMean(keep_dims=True) self.eps = eps self.sub = P.Sub() - self.add = P.TensorAdd() + self.add = P.Add() self.mul = P.Mul() self.div = P.RealDiv() diff --git a/tests/ut/python/parallel/test_auto_parallel_for_loop_reshape.py b/tests/ut/python/parallel/test_auto_parallel_for_loop_reshape.py index 20b5655c20..bc358e4871 100644 --- a/tests/ut/python/parallel/test_auto_parallel_for_loop_reshape.py +++ b/tests/ut/python/parallel/test_auto_parallel_for_loop_reshape.py @@ -54,7 +54,7 @@ class LayerNorm(nn.Cell): self.mean = P.ReduceMean(keep_dims=True) self.eps = eps self.sub = P.Sub() - self.add = P.TensorAdd() + self.add = P.Add() self.mul = P.Mul() self.div = P.RealDiv() diff --git a/tests/ut/python/parallel/test_auto_parallel_resnet.py b/tests/ut/python/parallel/test_auto_parallel_resnet.py index d12a028167..fce38a3255 100644 --- a/tests/ut/python/parallel/test_auto_parallel_resnet.py +++ b/tests/ut/python/parallel/test_auto_parallel_resnet.py @@ -97,7 +97,7 @@ class ResidualBlock(nn.Cell): elif self.stride != 1: self.maxpool_down = nn.MaxPool2d(kernel_size=1, stride=2, pad_mode='same') - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/ut/python/parallel/test_auto_parallel_rhombus.py b/tests/ut/python/parallel/test_auto_parallel_rhombus.py index 2bfc0ee4f9..97e821c0e3 100644 --- a/tests/ut/python/parallel/test_auto_parallel_rhombus.py +++ b/tests/ut/python/parallel/test_auto_parallel_rhombus.py @@ -58,8 +58,8 @@ def test_rhombus1(): def __init__(self): super().__init__() self.matmul = P.MatMul() - self.tadd1 = P.TensorAdd() - self.tadd2 = P.TensorAdd() + self.tadd1 = P.Add() + self.tadd2 = P.Add() self.weight = Parameter(Tensor(np.ones([128, 128]).astype(np.float32) * 0.01), "w", requires_grad=True) def construct(self, x, y, z): @@ -85,9 +85,9 @@ def test_rhombus2(): super().__init__() self.matmul1 = P.MatMul() self.matmul2 = P.MatMul() - self.tadd1 = P.TensorAdd() - self.tadd2 = P.TensorAdd() - self.tadd3 = P.TensorAdd() + self.tadd1 = P.Add() + self.tadd2 = P.Add() + self.tadd3 = P.Add() self.weight1 = Parameter(Tensor(np.ones([128, 128]).astype(np.float32) * 0.01), "w", requires_grad=True) self.weight2 = Parameter(Tensor(np.ones([128, 128]).astype(np.float32) * 0.01), "w", requires_grad=True) @@ -115,10 +115,10 @@ def test_rhombus3(): def __init__(self): super().__init__() self.matmul1 = P.MatMul() - self.tadd1 = P.TensorAdd() - self.tadd2 = P.TensorAdd() - self.tadd3 = P.TensorAdd() - self.tadd4 = P.TensorAdd() + self.tadd1 = P.Add() + self.tadd2 = P.Add() + self.tadd3 = P.Add() + self.tadd4 = P.Add() self.weight1 = Parameter(Tensor(np.ones([128, 128]).astype(np.float32) * 0.01), "w", requires_grad=True) self.t = Tensor(np.ones([128, 128]).astype(np.float32) * 0.01) diff --git a/tests/ut/python/parallel/test_auto_parallel_star_partial_strategy.py b/tests/ut/python/parallel/test_auto_parallel_star_partial_strategy.py index 28ec839831..9fe72f7abf 100644 --- a/tests/ut/python/parallel/test_auto_parallel_star_partial_strategy.py +++ b/tests/ut/python/parallel/test_auto_parallel_star_partial_strategy.py @@ -54,7 +54,7 @@ class Net(nn.Cell): self.relu1 = P.ReLU() self.relu2 = P.ReLU() self.ba1 = P.BiasAdd() - self.add = P.TensorAdd() + self.add = P.Add() self.weight = Parameter(Tensor(np.ones([128, 1000]), dtype=ms.float32), name="weight") self.bias = Parameter(Tensor(np.ones([1000]), dtype=ms.float32), name="bias") diff --git a/tests/ut/python/parallel/test_auto_parallel_transformer.py b/tests/ut/python/parallel/test_auto_parallel_transformer.py index 196da302b2..6e7751cf0a 100644 --- a/tests/ut/python/parallel/test_auto_parallel_transformer.py +++ b/tests/ut/python/parallel/test_auto_parallel_transformer.py @@ -52,7 +52,7 @@ class CustomDense(nn.Cell): self.weight = Parameter(Tensor(np.ones([row, column]).astype(np.float32) * 0.01), "w", requires_grad=True) self.bias = Parameter(Tensor(np.zeros([row, column]).astype(np.float32)), "b", requires_grad=True) self.matmul1 = P.MatMul() - self.add2 = P.TensorAdd() + self.add2 = P.Add() self.activation3 = nn.ReLU() def construct(self, x): diff --git a/tests/ut/python/parallel/test_auto_parallel_triangle_overwrite.py b/tests/ut/python/parallel/test_auto_parallel_triangle_overwrite.py index 5dd825b175..4389a39393 100644 --- a/tests/ut/python/parallel/test_auto_parallel_triangle_overwrite.py +++ b/tests/ut/python/parallel/test_auto_parallel_triangle_overwrite.py @@ -51,7 +51,7 @@ def test_triangle_strategy_consistency(): self.ba1 = P.BiasAdd() self.weight = Parameter(Tensor(np.ones([128, 1000]), dtype=ms.float32), name="weight") self.bias = Parameter(Tensor(np.ones([1000]), dtype=ms.float32), name="bias") - self.add = P.TensorAdd().shard(((1, 8), (1, 8))) + self.add = P.Add().shard(((1, 8), (1, 8))) self.relu = P.ReLU() def construct(self, x): diff --git a/tests/ut/python/parallel/test_auto_parallel_two_bn.py b/tests/ut/python/parallel/test_auto_parallel_two_bn.py index 7a9702444d..a610ad2f39 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_bn.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_bn.py @@ -58,7 +58,7 @@ def test_two_bn(): self.block1 = get_block() self.block2 = get_block() self.relu = P.ReLU() - self.add = P.TensorAdd() + self.add = P.Add() self.bias = Tensor(np.ones([64, 64]), dtype=ms.float32) def construct(self, x): diff --git a/tests/ut/python/parallel/test_batch_parallel_tensoradd.py b/tests/ut/python/parallel/test_batch_parallel_tensoradd.py index a92b9ee2ba..a1004db118 100644 --- a/tests/ut/python/parallel/test_batch_parallel_tensoradd.py +++ b/tests/ut/python/parallel/test_batch_parallel_tensoradd.py @@ -52,7 +52,7 @@ def test_matmul_add(): def __init__(self): super().__init__() self.matmul = P.MatMul() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x, y, b): out = self.matmul(x, y) diff --git a/tests/ut/python/parallel/test_linear.py b/tests/ut/python/parallel/test_linear.py index d368b5a033..ce2f1a142f 100644 --- a/tests/ut/python/parallel/test_linear.py +++ b/tests/ut/python/parallel/test_linear.py @@ -51,7 +51,7 @@ def test_linear(): def __init__(self, strategy0, strategy1, strategy2): super().__init__() self.fc_nobias = P.MatMul(transpose_b=True).shard(strategy0) - self.add = P.TensorAdd().shard(strategy1) + self.add = P.Add().shard(strategy1) self.gelu = P.Gelu().shard(strategy2) def construct(self, x, y, bias): diff --git a/tests/ut/python/parallel/test_matmul_tensor.py b/tests/ut/python/parallel/test_matmul_tensor.py index 12924a7275..e052ed05c0 100644 --- a/tests/ut/python/parallel/test_matmul_tensor.py +++ b/tests/ut/python/parallel/test_matmul_tensor.py @@ -145,7 +145,7 @@ def test_matmul_add_tensor(): def __init__(self, strategy1, strategy2): super().__init__() self.matmul = P.MatMul().shard(strategy1) - self.add = P.TensorAdd().shard(strategy2) + self.add = P.Add().shard(strategy2) self.b = Tensor(0.9, ms.float32) def construct(self, x, y): diff --git a/tests/ut/python/parallel/test_mul_div_bn.py b/tests/ut/python/parallel/test_mul_div_bn.py index 9254ae9a18..9c0f6360cf 100644 --- a/tests/ut/python/parallel/test_mul_div_bn.py +++ b/tests/ut/python/parallel/test_mul_div_bn.py @@ -26,7 +26,7 @@ class TwoInputBpropOperator(Cell): def __init__(self): super().__init__() self.op = P.Mul() - self.bp = P.TensorAdd() + self.bp = P.Add() def construct(self, x, y): return self.op(x, y) diff --git a/tests/ut/python/parallel/test_one_hot_net.py b/tests/ut/python/parallel/test_one_hot_net.py index 8ba68e1fe5..81d7dcf701 100644 --- a/tests/ut/python/parallel/test_one_hot_net.py +++ b/tests/ut/python/parallel/test_one_hot_net.py @@ -111,7 +111,7 @@ class SemiAutoOneHotNet(Cell): self.exp3.shard(strategy=strategy.twod_strategy) self.mul_const = P.Mul() self.mul_const.shard(strategy=strategy.scalar_twod_strategy) - self.mul_const2 = P.TensorAdd() + self.mul_const2 = P.Add() self.mul_const2.shard(strategy=strategy.scalar_twod_strategy) self.mul_const3 = P.Sub() self.mul_const3.shard(strategy=strategy.twod_scalar_strategy) @@ -123,7 +123,7 @@ class SemiAutoOneHotNet(Cell): self.mul.shard(strategy=strategy.twod_twod_strategy) self.mul2 = P.Mul() self.mul2.shard(strategy=strategy.twod_twod_strategy) - self.mul3 = P.TensorAdd() + self.mul3 = P.Add() self.mul3.shard(strategy=strategy.twod_twod_strategy) self.mul4 = P.Sub() self.mul4.shard(strategy=strategy.twod_twodbc_strategy) @@ -135,7 +135,7 @@ class SemiAutoOneHotNet(Cell): self.mul7.shard(strategy=strategy.twod_scalar_strategy) self.mul8 = P.RealDiv() self.mul8.shard(strategy=strategy.scalar_scalar_strategy) - self.mul9 = P.TensorAdd() + self.mul9 = P.Add() self.mul9.shard(strategy=strategy.twod_scalar_strategy) self.reduce_max = P.ReduceMax(keep_dims=True) diff --git a/tests/ut/python/parallel/test_operator_model_parallel.py b/tests/ut/python/parallel/test_operator_model_parallel.py index 45946f7e19..912be93406 100644 --- a/tests/ut/python/parallel/test_operator_model_parallel.py +++ b/tests/ut/python/parallel/test_operator_model_parallel.py @@ -28,7 +28,7 @@ from mindspore.nn.layer.pooling import MaxPool2d from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum from mindspore.ops import operations as P -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add from mindspore.train import Model from mindspore.context import ParallelMode from tests.dataset_mock import MindData @@ -70,7 +70,7 @@ class DenseWrap(Cell): 'zeros', [output_channels]), name="bias") self.matmul = P.MatMul(transpose_b=True).shard(matmul_strategy) - self.bias_add = P.TensorAdd().shard(shard) + self.bias_add = P.Add().shard(shard) def construct(self, x): if self.has_bias: @@ -200,7 +200,7 @@ class ResidualBlock(Cell): self.relu1 = P.ReLU().shard(strategy_no_weight) self.relu2 = P.ReLU().shard(strategy_no_weight) self.relu3 = P.ReLU().shard(strategy_no_weight) - self.add = TensorAdd().shard(strategy_add) + self.add = Add().shard(strategy_add) def construct(self, x): identity = x @@ -249,7 +249,7 @@ class ResidualBlockWithDown(Cell): self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride) self.bn_down_sample = bn_with_initialize(out_channels) - self.add = TensorAdd().shard(strategy_add) + self.add = Add().shard(strategy_add) def construct(self, x): identity = x diff --git a/tests/ut/python/parallel/test_reduce_method_info.py b/tests/ut/python/parallel/test_reduce_method_info.py index 6e369ff134..14021c0a3c 100644 --- a/tests/ut/python/parallel/test_reduce_method_info.py +++ b/tests/ut/python/parallel/test_reduce_method_info.py @@ -565,7 +565,7 @@ def test_max_empty_tuple(): super().__init__() self.mul = P.Mul().shard(strategy1) self.reduce_max = P.ReduceMax(keep_dims=False).shard(strategy2) - self.add = P.TensorAdd().shard(strategy3) + self.add = P.Add().shard(strategy3) def construct(self, x, y, b): out = self.mul(x, y) diff --git a/tests/ut/python/parallel/test_repeated_calc.py b/tests/ut/python/parallel/test_repeated_calc.py index 8e1d7f3c48..56e0ae58d9 100644 --- a/tests/ut/python/parallel/test_repeated_calc.py +++ b/tests/ut/python/parallel/test_repeated_calc.py @@ -58,7 +58,7 @@ def test_tensoradd_reshape_matmul(): class Net(nn.Cell): def __init__(self, strategy1, strategy2): super().__init__() - self.add = P.TensorAdd().shard(strategy1) + self.add = P.Add().shard(strategy1) self.reshape = P.Reshape() self.matmul = P.MatMul().shard(strategy2) diff --git a/tests/ut/python/parallel/test_reshape.py b/tests/ut/python/parallel/test_reshape.py index ad83e93cb1..a98315a4b3 100644 --- a/tests/ut/python/parallel/test_reshape.py +++ b/tests/ut/python/parallel/test_reshape.py @@ -292,7 +292,7 @@ class ReshapeNet6(nn.Cell): self.matmul1_2 = P.MatMul().shard(strategy0) self.matmul1_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight") self.matmul2 = P.MatMul().shard(strategy0) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): x = self.reshape(x, (256, 25088)) diff --git a/tests/ut/python/parameter_feature/test_parameter.py b/tests/ut/python/parameter_feature/test_parameter.py index 61127d1075..5577bd1395 100644 --- a/tests/ut/python/parameter_feature/test_parameter.py +++ b/tests/ut/python/parameter_feature/test_parameter.py @@ -130,7 +130,7 @@ def test_prim_vararg_kwonlyarg(): super(SecondNet, self).__init__() self.addN = P.AddN() self.max = P.Maximum() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x, y, *args, z=0, r=1): c = self.max(args[0], args[1]) @@ -195,7 +195,7 @@ def test_net_variable_and_weights(): super(SecondNet, self).__init__() self.addN = P.AddN() self.max = P.Maximum() - self.add = P.TensorAdd() + self.add = P.Add() self.weight = Parameter(Tensor(np.ones((2, 3, 4), np.float32)), "w2", requires_grad=True) def construct(self, a, b, *args): diff --git a/tests/ut/python/pipeline/infer/test_scalar_add_grad.py b/tests/ut/python/pipeline/infer/test_scalar_add_grad.py index a775b82236..8d6a1e9ea1 100644 --- a/tests/ut/python/pipeline/infer/test_scalar_add_grad.py +++ b/tests/ut/python/pipeline/infer/test_scalar_add_grad.py @@ -20,7 +20,7 @@ from mindspore.common.tensor import Tensor from mindspore.nn import Cell from mindspore.nn import ReLU from mindspore.ops import composite as C -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add context.set_context(mode=context.GRAPH_MODE) grad = C.GradOperation(get_all=True, sens_param=True) @@ -32,7 +32,7 @@ class TensorAddNetMe(Cell): def __init__(self): super(TensorAddNetMe, self).__init__() self.relu = ReLU() - self.add = TensorAdd() + self.add = Add() def construct(self, inputA, inputB): inputA = self.relu(inputA) diff --git a/tests/ut/python/pipeline/parse/test_create_obj.py b/tests/ut/python/pipeline/parse/test_create_obj.py index 11e7807694..201aa0ae2d 100644 --- a/tests/ut/python/pipeline/parse/test_create_obj.py +++ b/tests/ut/python/pipeline/parse/test_create_obj.py @@ -71,11 +71,11 @@ class Net1(nn.Cell): def __init__(self): super(Net1, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() @ms_function def construct(self, x, y): - add = P.TensorAdd() + add = P.Add() result = add(x, y) return result diff --git a/tests/ut/python/pipeline/parse/test_structure_output.py b/tests/ut/python/pipeline/parse/test_structure_output.py index 92c92deb1a..24499cff30 100644 --- a/tests/ut/python/pipeline/parse/test_structure_output.py +++ b/tests/ut/python/pipeline/parse/test_structure_output.py @@ -128,7 +128,7 @@ def test_tuple_tuple_0(): class Net(Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() def construct(self, x, y): @@ -150,7 +150,7 @@ def test_tuple_tuple_1(): class Net(Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() def construct(self, x, y): @@ -170,7 +170,7 @@ def test_tuple_tuple_2(): class Net(Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.relu = P.ReLU() self.depend = depend @@ -196,7 +196,7 @@ def test_tuple_tuple_3(): class Net(Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.relu = P.ReLU() self.depend = depend diff --git a/tests/ut/python/pynative_mode/ge/ops/test_tensor_add.py b/tests/ut/python/pynative_mode/ge/ops/test_tensor_add.py index e530aa201a..d1497a1ce4 100644 --- a/tests/ut/python/pynative_mode/ge/ops/test_tensor_add.py +++ b/tests/ut/python/pynative_mode/ge/ops/test_tensor_add.py @@ -18,7 +18,7 @@ test pooling api import numpy as np from mindspore import Tensor -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add from ....ut_filter import non_graph_engine @@ -27,7 +27,7 @@ def test_tensor_add(): x = Tensor(np.ones([1, 3, 4, 4]).astype(np.float32)) y = Tensor(np.ones([1, 3, 4, 4]).astype(np.float32)) - tensor_add = TensorAdd() + tensor_add = Add() z = tensor_add(x, y) assert np.all(z.asnumpy() - (x.asnumpy() + y.asnumpy()) < 0.0001) diff --git a/tests/ut/python/pynative_mode/ops/test_hypermap.py b/tests/ut/python/pynative_mode/ops/test_hypermap.py index db391301ba..7105634e49 100644 --- a/tests/ut/python/pynative_mode/ops/test_hypermap.py +++ b/tests/ut/python/pynative_mode/ops/test_hypermap.py @@ -27,7 +27,7 @@ from ...ut_filter import non_graph_engine # W0613: unused-argument -tensor_add = P.TensorAdd() +tensor_add = P.Add() scala_add = Primitive('scalar_add') add = C.MultitypeFuncGraph('add') diff --git a/tests/ut/python/pynative_mode/ops/test_multitype.py b/tests/ut/python/pynative_mode/ops/test_multitype.py index 67da12b088..8b3f491f4d 100644 --- a/tests/ut/python/pynative_mode/ops/test_multitype.py +++ b/tests/ut/python/pynative_mode/ops/test_multitype.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from mindspore import dtype as mstype from ...ut_filter import non_graph_engine -tensor_add = P.TensorAdd() +tensor_add = P.Add() op_add = P.AddN() scala_add = Primitive('scalar_add') add = C.MultitypeFuncGraph('add') diff --git a/tests/ut/python/pynative_mode/test_backend.py b/tests/ut/python/pynative_mode/test_backend.py index b0f660fbaf..537c58d07a 100644 --- a/tests/ut/python/pynative_mode/test_backend.py +++ b/tests/ut/python/pynative_mode/test_backend.py @@ -34,7 +34,7 @@ class Net(nn.Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.x = Parameter(initializer('normal', [1, 3, 3, 4]), name='x') self.y = Parameter(initializer('normal', [1, 3, 3, 4]), name='y') diff --git a/tests/ut/python/pynative_mode/test_cont_cases.py b/tests/ut/python/pynative_mode/test_cont_cases.py index 5c7350ed56..a3d295e9ad 100644 --- a/tests/ut/python/pynative_mode/test_cont_cases.py +++ b/tests/ut/python/pynative_mode/test_cont_cases.py @@ -651,7 +651,7 @@ def test_if_by_if_forward(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -687,7 +687,7 @@ def test_if_by_if_forward_control_tuple_switch(): class Branch3Net(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -702,7 +702,7 @@ def test_if_by_if_forward_control_tuple_switch(): class Branch2Net(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -718,7 +718,7 @@ def test_if_by_if_forward_control_tuple_switch(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -747,7 +747,7 @@ def test_if_by_if_forward_control_inside_net(): class Branch3Net(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -764,7 +764,7 @@ def test_if_by_if_forward_control_inside_net(): class Branch2Net(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -780,7 +780,7 @@ def test_if_by_if_forward_control_inside_net(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -807,7 +807,7 @@ def test_if_by_if_forward_use_namespace(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -815,7 +815,7 @@ def test_if_by_if_forward_use_namespace(): @ms_function def construct(self, a, b, x): if a < b: - a = P.TensorAdd()(a, b) + a = P.Add()(a, b) else: a = P.Sub()(a, b) if a == x: @@ -823,9 +823,9 @@ def test_if_by_if_forward_use_namespace(): else: a = P.RealDiv()(a, b) if b == x: - b = P.TensorAdd()(a, b) + b = P.Add()(a, b) else: - b = P.TensorAdd()(a, x) + b = P.Add()(a, x) a = a * b out = a + b + x return out @@ -842,14 +842,14 @@ def test_if_by_if_forward_use_global_op(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @ms_function def construct(self, a, b, x): - add = P.TensorAdd() + add = P.Add() sub = P.Sub() mul = P.Mul() div = P.RealDiv() @@ -881,7 +881,7 @@ def test_for_with_if_by_if_forward(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() @ms_function @@ -907,7 +907,7 @@ def test_for_with_if_by_if_forward_namespace(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @@ -916,7 +916,7 @@ def test_for_with_if_by_if_forward_namespace(): def construct(self, a, b, x): for _ in range(0, 6): if a < b: - a = P.TensorAdd()(a, b) + a = P.Add()(a, b) else: b = P.Sub()(b, x) a = a * b @@ -935,14 +935,14 @@ def test_if_by_if_forward_const_branch_inner(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @ms_function def construct(self, a, b, x): - add = P.TensorAdd() + add = P.Add() sub = P.Sub() mul = P.Mul() div = P.RealDiv() @@ -974,14 +974,14 @@ def test_if_by_if_forward_all_const_branch(): class MyIfByIfNet(nn.Cell): def __init__(self): super().__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.sub = P.Sub() self.mul = P.Mul() self.div = P.RealDiv() @ms_function def construct(self, a, b, x): - add = P.TensorAdd() + add = P.Add() sub = P.Sub() mul = P.Mul() div = P.RealDiv() diff --git a/tests/ut/python/pynative_mode/test_implicit_conversion.py b/tests/ut/python/pynative_mode/test_implicit_conversion.py index aab0961f56..33a02d71fb 100644 --- a/tests/ut/python/pynative_mode/test_implicit_conversion.py +++ b/tests/ut/python/pynative_mode/test_implicit_conversion.py @@ -111,7 +111,7 @@ def test_float_tensor_and_str_add(): y = "ok" with pytest.raises(TypeError) as er: ret = x + y - assert "For 'TensorAdd', the 1th input is a not support implicit conversion type: str" in str(er.value) + assert "For 'Add', the 1th input is a not support implicit conversion type: str" in str(er.value) def test_float_tensor_and_tuple_add(): @@ -119,7 +119,7 @@ def test_float_tensor_and_tuple_add(): y = (1, 2, 3) with pytest.raises(TypeError) as er: ret = x + y - assert "For 'TensorAdd', the 1th input is a not support implicit conversion type: tuple" in str(er.value) + assert "For 'Add', the 1th input is a not support implicit conversion type: tuple" in str(er.value) def test_float_tensor_and_list_add(): @@ -127,7 +127,7 @@ def test_float_tensor_and_list_add(): y = [1, 2, 3] with pytest.raises(TypeError) as er: ret = x + y - assert "For 'TensorAdd', the 1th input is a not support implicit conversion type: list" in str(er.value) + assert "For 'Add', the 1th input is a not support implicit conversion type: list" in str(er.value) def test_float_tensor_and_bool_tensors_add_grad(): diff --git a/tests/ut/python/pynative_mode/test_staging.py b/tests/ut/python/pynative_mode/test_staging.py index 9c5c044e10..7b53d53be8 100644 --- a/tests/ut/python/pynative_mode/test_staging.py +++ b/tests/ut/python/pynative_mode/test_staging.py @@ -83,7 +83,7 @@ class TensorAddMulNet(nn.Cell): def __init__(self): super(TensorAddMulNet, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() @ms_function def add_stage0(self, x, y): @@ -110,7 +110,7 @@ class TensorAddNet(nn.Cell): def __init__(self): super(TensorAddNet, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() @ms_function def compute(self, x, y): diff --git a/tests/ut/python/pynative_mode/test_tuple_parameter.py b/tests/ut/python/pynative_mode/test_tuple_parameter.py index a14e8470da..169ec67c21 100644 --- a/tests/ut/python/pynative_mode/test_tuple_parameter.py +++ b/tests/ut/python/pynative_mode/test_tuple_parameter.py @@ -29,7 +29,7 @@ class Block2(nn.Cell): def __init__(self): super(Block2, self).__init__() self.mul = P.Mul() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x, y): z1 = self.mul(x, y) @@ -50,7 +50,7 @@ class Net1(nn.Cell): class Net2(nn.Cell): def __init__(self): super(Net2, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.block = Block2() def construct(self, x, y): diff --git a/tests/ut/python/train/summary/test_summary.py b/tests/ut/python/train/summary/test_summary.py index cf60780413..535bd6ffff 100644 --- a/tests/ut/python/train/summary/test_summary.py +++ b/tests/ut/python/train/summary/test_summary.py @@ -99,7 +99,7 @@ class SummaryDemo(nn.Cell): super(SummaryDemo, self).__init__() self.s = P.ScalarSummary() self.histogram_summary = P.HistogramSummary() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x, y): self.s("x1", x) diff --git a/tests/ut/python/train/summary/test_summary_collector.py b/tests/ut/python/train/summary/test_summary_collector.py index 0ca19b3fa5..02644ee421 100644 --- a/tests/ut/python/train/summary/test_summary_collector.py +++ b/tests/ut/python/train/summary/test_summary_collector.py @@ -30,7 +30,7 @@ from mindspore.train.summary.enums import ModeEnum, PluginEnum from mindspore.train.summary import SummaryRecord from mindspore.nn import Cell from mindspore.nn.optim.optimizer import Optimizer -from mindspore.ops.operations import TensorAdd +from mindspore.ops.operations import Add _VALUE_CACHE = list() @@ -58,7 +58,7 @@ class CustomNet(Cell): """Define custom network.""" def __init__(self): super(CustomNet, self).__init__() - self.add = TensorAdd + self.add = Add self.optimizer = Optimizer(learning_rate=1, parameters=[Parameter(Tensor(1), 'weight')]) def construct(self, data): @@ -356,7 +356,7 @@ class TestSummaryCollector: """Define net.""" def __init__(self): super(Net, self).__init__() - self.add = TensorAdd() + self.add = Add() def construct(self, data): return data diff --git a/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py b/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py index 6a7577bdf1..7beb13dd81 100644 --- a/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py +++ b/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py @@ -43,7 +43,7 @@ class SummaryNet(nn.Cell): self.data = data self.summary_fn = getattr(P, summary_type)() self.one = Tensor(np.array([1]).astype(np.float32)) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self): self.summary_fn(self.tag, self.data) diff --git a/tests/ut/python/train/summary/test_tensor_summary.py b/tests/ut/python/train/summary/test_tensor_summary.py index 2bbd7fa56b..cbf27873ca 100644 --- a/tests/ut/python/train/summary/test_tensor_summary.py +++ b/tests/ut/python/train/summary/test_tensor_summary.py @@ -113,7 +113,7 @@ class SummaryDemo(nn.Cell): def __init__(self,): super(SummaryDemo, self).__init__() self.s = P.TensorSummary() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x, y): self.s("x1", x) diff --git a/tests/ut/python/transform/test_transform.py b/tests/ut/python/transform/test_transform.py index e183a50ed0..0a54aaf7ff 100644 --- a/tests/ut/python/transform/test_transform.py +++ b/tests/ut/python/transform/test_transform.py @@ -75,7 +75,7 @@ class ResidualBlock(nn.Cell): self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) self.bn_down_sample = nn.BatchNorm2d(out_channels) - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x): identity = x diff --git a/tests/ut/python/utils/test_initializer.py b/tests/ut/python/utils/test_initializer.py index c107d8fd62..427a035ce4 100644 --- a/tests/ut/python/utils/test_initializer.py +++ b/tests/ut/python/utils/test_initializer.py @@ -235,7 +235,7 @@ def test_conv2d_abnormal_kernel_truncated_normal(): class Net(nn.Cell): def __init__(self): super(Net, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() self.t1 = Parameter(init.initializer('uniform', [5, 4], ms.float32), name="w1") self.t2 = Parameter(init.initializer(init.TruncatedNormal(), [5, 4], ms.float32), name="w2") diff --git a/tests/ut/python/utils/test_serialize.py b/tests/ut/python/utils/test_serialize.py index e7c3d6b85b..bb8a21fd9b 100644 --- a/tests/ut/python/utils/test_serialize.py +++ b/tests/ut/python/utils/test_serialize.py @@ -75,7 +75,7 @@ def test_save_graph(): class Net1(nn.Cell): def __init__(self): super(Net1, self).__init__() - self.add = P.TensorAdd() + self.add = P.Add() def construct(self, x, y): z = self.add(x, y) diff --git a/tests/vm_impl/math_ops_vm_impl.py b/tests/vm_impl/math_ops_vm_impl.py index 76ccebbb8e..ad6f79d7a6 100644 --- a/tests/vm_impl/math_ops_vm_impl.py +++ b/tests/vm_impl/math_ops_vm_impl.py @@ -26,7 +26,7 @@ from .vm_interface import vm # pylint: disable=unused-argument -@vm_impl_getters.register(P.TensorAdd) +@vm_impl_getters.register(P.Add) def vm_impl_tensor_add(self): """Generate vm_impl function for TensorAdd."""